repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/early_stoppers/base/early_stopper_base.py | src/trainers/early_stoppers/base/early_stopper_base.py | import logging
from utils.param_checking import check_exclusive
class EarlyStopperBase:
def __init__(
self,
every_n_epochs=None,
every_n_updates=None,
every_n_samples=None,
):
self.logger = logging.getLogger(type(self).__name__)
assert check_exclusive(every_n_epochs, every_n_updates, every_n_samples), \
"specify only one of every_n_epochs/every_n_updates/every_n_samples"
self.every_n_epochs = every_n_epochs
self.every_n_updates = every_n_updates
self.every_n_samples = every_n_samples
def to_short_interval_string(self):
results = []
if self.every_n_epochs is not None:
results.append(f"E{self.every_n_epochs}")
if self.every_n_updates is not None:
results.append(f"U{self.every_n_updates}")
if self.every_n_samples is not None:
results.append(f"S{self.every_n_samples}")
return "_".join(results)
def should_stop_after_sample(self, checkpoint, effective_batch_size):
if self.every_n_samples is not None:
last_update_samples = checkpoint.sample - effective_batch_size
prev_log_step = int(last_update_samples / self.every_n_samples)
cur_log_step = int(checkpoint.sample / self.every_n_samples)
if cur_log_step > prev_log_step:
return self._should_stop()
return False
def should_stop_after_update(self, checkpoint):
if self.every_n_updates is None or checkpoint.update % self.every_n_updates != 0:
return False
return self._should_stop()
def should_stop_after_epoch(self, checkpoint):
if self.every_n_epochs is None or checkpoint.epoch % self.every_n_epochs != 0:
return False
return self._should_stop()
def _should_stop(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/early_stoppers/base/__init__.py | src/trainers/early_stoppers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/eval/__init__.py | src/trainers/eval/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/eval/single_eval_trainer.py | src/trainers/eval/single_eval_trainer.py | import torch
import torch.nn as nn
from kappadata.wrappers import ModeWrapper
from trainers.base.sgd_trainer import SgdTrainer
class SingleEvalTrainer(SgdTrainer):
def __init__(
self,
max_epochs=0,
precision="float32",
effective_batch_size=2,
disable_gradient_accumulation=True,
**kwargs,
):
super().__init__(
max_epochs=max_epochs,
precision=precision,
effective_batch_size=effective_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
@property
def output_shape(self):
return 2,
@property
def dataset_mode(self):
return f"index x"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def forward(self, batch):
batch, ctx = batch
x = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="x", batch=batch)
x = x.to(self.model.device, non_blocking=True)
_ = self.model(x)
return dict(total=torch.tensor(0.)), {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/dummy_model.py | src/models/dummy_model.py | import numpy as np
import torch.nn as nn
from .base.single_model_base import SingleModelBase
class DummyModel(SingleModelBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layer = nn.Linear(np.prod(self.input_shape), np.prod(self.output_shape or self.input_shape))
def forward(self, x, *_, **__):
return self.layer(x.flatten(start_dim=1)).reshape(len(x), *(self.output_shape or self.input_shape))
def predict(self, x):
return dict(main=self(x))
def predict_binary(self, x):
return dict(main=self(x))
def load_state_dict(self, state_dict, strict=True):
pass | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/__init__.py | src/models/__init__.py | import logging
from copy import deepcopy
from functools import partial
import torch.nn as nn
import yaml
from initializers import initializer_from_kwargs
from utils.factory import instantiate
def model_from_kwargs(kind=None, path_provider=None, data_container=None, **kwargs):
# exclude update_counter from copying (otherwise model and trainer have different update_counter objects)
update_counter = kwargs.pop("update_counter", None)
static_ctx = kwargs.pop("static_ctx", None)
dynamic_ctx = kwargs.pop("dynamic_ctx", None)
kwargs = deepcopy(kwargs)
# allow setting multiple kwargs in yaml; but allow also overwriting it
# kind: vit.masked_encoder
# kwargs: ${select:${vars.encoder_model_key}:${yaml:models/vit}}
# patch_size: [128, 1] # this will overwrite the patch_size in kwargs
kwargs_from_yaml = kwargs.pop("kwargs", {})
kwargs = {**kwargs_from_yaml, **kwargs}
# try to load kwargs from checkpoint
if "initializers" in kwargs:
# only first one can have use_checkpoint_kwargs
initializer_kwargs = kwargs["initializers"][0]
assert all(obj.get("use_checkpoint_kwargs", None) is None for obj in kwargs["initializers"][1:])
use_checkpoint_kwargs = initializer_kwargs.pop("use_checkpoint_kwargs", False)
initializer = initializer_from_kwargs(**initializer_kwargs, path_provider=path_provider)
if use_checkpoint_kwargs:
ckpt_kwargs = initializer.get_model_kwargs()
if kind is None and "kind" in ckpt_kwargs:
kind = ckpt_kwargs.pop("kind")
else:
ckpt_kwargs.pop("kind", None)
# initializer/optim/freezers shouldnt be used
ckpt_kwargs.pop("initializers", None)
ckpt_kwargs.pop("optim_ctor", None)
# check if keys overlap; this can be intended
# - vit trained with drop_path_rate but then for evaluation this should be set to 0
# if keys overlap the explicitly specified value dominates (i.e. from yaml or from code)
kwargs_intersection = set(kwargs.keys()).intersection(set(ckpt_kwargs.keys()))
if len(kwargs_intersection) > 0:
logging.info(f"checkpoint_kwargs overlap with kwargs (intersection={kwargs_intersection})")
for intersecting_kwarg in kwargs_intersection:
ckpt_kwargs.pop(intersecting_kwarg)
kwargs.update(ckpt_kwargs)
# LEGACY start: shape was stored as torch.Size -> yaml cant parse that
if "input_shape" in kwargs and not isinstance(kwargs["input_shape"], tuple):
kwargs["input_shape"] = tuple(kwargs["input_shape"])
# LEGACY end
logging.info(f"postprocessed checkpoint kwargs:\n{yaml.safe_dump(kwargs, sort_keys=False)[:-1]}")
else:
logging.info(f"not loading checkpoint kwargs")
else:
logging.info(f"model has no initializers -> not loading a checkpoint or an optimizer state")
assert kind is not None, "model has no kind (maybe use_checkpoint_kwargs=True is missing in the initializer?)"
# rename optim to optim_ctor (in yaml it is intuitive to call it optim as the yaml should not bother with the
# implementation details but the implementation passes a ctor so it should also be called like it)
optim = kwargs.pop("optim", None)
# model doesn't need to have an optimizer
if optim is not None:
kwargs["optim_ctor"] = optim
# filter out modules passed to ctor
ctor_kwargs_filtered = {k: v for k, v in kwargs.items() if not isinstance(v, nn.Module)}
ctor_kwargs = deepcopy(ctor_kwargs_filtered)
ctor_kwargs["kind"] = kind
ctor_kwargs.pop("input_shape", None)
ctor_kwargs.pop("output_shape", None)
ctor_kwargs.pop("optim_ctor", None)
return instantiate(
module_names=[
f"models.{kind}",
f"models.composite.{kind}",
],
type_names=[kind.split(".")[-1]],
update_counter=update_counter,
path_provider=path_provider,
data_container=data_container,
static_ctx=static_ctx,
dynamic_ctx=dynamic_ctx,
ctor_kwargs=ctor_kwargs,
**kwargs,
)
def prepare_momentum_kwargs(kwargs):
# remove optim from all SingleModels (e.g. used for EMA)
kwargs = deepcopy(kwargs)
_prepare_momentum_kwargs(kwargs)
return kwargs
def _prepare_momentum_kwargs(kwargs):
if isinstance(kwargs, dict):
kwargs.pop("optim", None)
kwargs.pop("freezers", None)
kwargs.pop("initializers", None)
kwargs.pop("is_frozen", None)
for v in kwargs.values():
_prepare_momentum_kwargs(v)
elif isinstance(kwargs, partial):
kwargs.keywords.pop("optim_ctor", None)
kwargs.keywords.pop("freezers", None)
kwargs.keywords.pop("initializers", None)
kwargs.keywords.pop("is_frozen", None)
for v in kwargs.keywords.values():
_prepare_momentum_kwargs(v)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/cfd_hybrid_model.py | src/models/composite/cfd_hybrid_model.py | import torch
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class CfdHybridModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
conditioner=None,
force_latent_fp32=True,
**kwargs,
):
super().__init__(**kwargs)
self.force_latent_fp32 = force_latent_fp32
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# conditioner
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
)
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(conditioner=self.conditioner) if self.conditioner is not None else {}),
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(
self,
x,
mesh_pos,
grid_pos,
query_pos,
mesh_to_grid_edges,
unbatch_idx,
unbatch_select,
timestep=None,
velocity=None,
):
outputs = {}
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
else:
condition = None
# encode data ((x_{t-2}, x_{t-1} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
grid_pos=grid_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
condition=condition,
)
# predict current latent (dynamic_{t-1} -> dynamic_t)
if self.force_latent_fp32:
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
prev_dynamics = prev_dynamics.float()
condition = condition.float()
dynamics = self.latent(prev_dynamics, condition=condition)
else:
dynamics = self.latent(prev_dynamics, condition=condition)
# decode next_latent to next_data (dynamic_t -> x_t)
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
)
outputs["x_hat"] = x_hat
return outputs
@torch.no_grad()
def rollout(
self,
x,
mesh_pos,
grid_pos,
query_pos,
mesh_to_grid_edges,
unbatch_idx,
unbatch_select,
velocity=None,
num_rollout_timesteps=None,
):
# check num_rollout_timesteps
max_timesteps = self.data_container.get_dataset().getdim_timestep()
num_rollout_timesteps = num_rollout_timesteps or max_timesteps
assert 0 < num_rollout_timesteps <= max_timesteps
# setup
x_hats = []
timestep = torch.zeros(1, device=x.device, dtype=torch.long)
for _ in range(num_rollout_timesteps):
# predict next timestep
outputs = self(
x,
mesh_pos=mesh_pos,
grid_pos=grid_pos,
query_pos=query_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
timestep=timestep,
velocity=velocity,
)
x_hat = outputs["x_hat"]
x_hats.append(x_hat)
# shift last prediction into history
x = torch.concat([x[:, x_hat.size(1):], x_hat], dim=1)
# increase timestep
timestep.add_(1)
# num_rollout_timesteps * (batch_size * num_points, num_channels)
# -> (batch_size * num_points, num_channels, num_rollout_timesteps)
return torch.stack(x_hats, dim=2)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/rans_interpolated_model.py | src/models/composite/rans_interpolated_model.py | from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class RansInterpolatedModel(CompositeModelBase):
def __init__(
self,
latent,
decoder,
grid_resolution=None,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
grid_resolution = grid_resolution or self.data_container.get_dataset().grid_resolution
self.static_ctx["grid_resolution"] = grid_resolution
self.static_ctx["ndim"] = len(grid_resolution)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, x, query_pos):
outputs = {}
# propagate
propagated = self.latent(x)
# decode
x_hat = self.decoder(propagated, query_pos=query_pos)
outputs["x_hat"] = x_hat
return outputs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/rans_gino_encdec_sdf_model.py | src/models/composite/rans_gino_encdec_sdf_model.py | from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class RansGinoEncdecSdfModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, mesh_pos, sdf, grid_pos, query_pos, mesh_to_grid_edges, grid_to_query_edges):
outputs = {}
# encode data
encoded = self.encoder(
mesh_pos=mesh_pos,
sdf=sdf,
grid_pos=grid_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
)
# propagate
propagated = self.latent(encoded)
# decode
x_hat = self.decoder(propagated, query_pos=query_pos, grid_to_query_edges=grid_to_query_edges)
outputs["x_hat"] = x_hat
return outputs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/rans_simformer_nognn_sdf_model.py | src/models/composite/rans_simformer_nognn_sdf_model.py | import torch
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class RansSimformerNognnSdfModel(CompositeModelBase):
def __init__(
self,
grid_encoder,
mesh_encoder,
latent,
decoder,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# grid_encoder
self.grid_encoder = create(
grid_encoder,
model_from_kwargs,
**common_kwargs,
)
# mesh_encoder
self.mesh_encoder = create(
mesh_encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.mesh_encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
grid_encoder=self.grid_encoder,
mesh_encoder=self.mesh_encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, mesh_pos, sdf, query_pos, batch_idx, unbatch_idx, unbatch_select):
outputs = {}
# encode data
grid_embed = self.grid_encoder(sdf)
mesh_embed = self.mesh_encoder(mesh_pos=mesh_pos, batch_idx=batch_idx)
embed = torch.concat([grid_embed, mesh_embed], dim=1)
# propagate
propagated = self.latent(embed)
# decode
x_hat = self.decoder(
propagated,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
)
outputs["x_hat"] = x_hat
return outputs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/rans_baseline_model.py | src/models/composite/rans_baseline_model.py | from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class RansBaselineModel(CompositeModelBase):
def __init__(
self,
latent,
decoder,
encoder=None,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(encoder=self.encoder) if self.encoder is not None else {}),
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, mesh_pos, grid_pos, query_pos, mesh_to_grid_edges, grid_to_query_edges):
outputs = {}
# encode data
encoded = self.encoder(
mesh_pos=mesh_pos,
grid_pos=grid_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
)
# propagate
propagated = self.latent(encoded)
# decode
x_hat = self.decoder(propagated, query_pos=query_pos, grid_to_query_edges=grid_to_query_edges)
outputs["x_hat"] = x_hat
return outputs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/cfd_simformer_model.py | src/models/composite/cfd_simformer_model.py | import einops
import torch
import torch.nn.functional as F
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
from utils.amp_utils import NoopContext
class CfdSimformerModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
force_decoder_fp32=True,
conditioner=None,
geometry_encoder=None,
**kwargs,
):
super().__init__(**kwargs)
self.force_decoder_fp32 = force_decoder_fp32
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# timestep embed
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
input_shape=self.input_shape,
)
# desc2latent
self.geometry_encoder = create(
geometry_encoder,
model_from_kwargs,
**common_kwargs,
)
# set static_ctx["num_static_tokens"]
if self.geometry_encoder is not None:
assert self.geometry_encoder.output_shape is not None and len(self.geometry_encoder.output_shape) == 2
self.static_ctx["num_static_tokens"] = self.geometry_encoder.output_shape[0]
else:
self.static_ctx["num_static_tokens"] = 0
# set static_ctx["dim"]
if self.conditioner is not None:
self.static_ctx["dim"] = self.conditioner.dim
elif self.geometry_encoder is not None:
self.static_ctx["dim"] = self.geometry_encoder.output_shape[1]
else:
self.static_ctx["dim"] = latent["kwargs"]["dim"]
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
assert self.encoder.output_shape is not None
# dynamics
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(conditioner=self.conditioner) if self.conditioner is not None else {}),
**(dict(geometry_encoder=self.geometry_encoder) if self.geometry_encoder is not None else {}),
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(
self,
x,
geometry2d,
timestep,
velocity,
mesh_pos,
query_pos,
mesh_edges,
batch_idx,
unbatch_idx,
unbatch_select,
target=None,
detach_reconstructions=True,
reconstruct_prev_x=False,
reconstruct_dynamics=False,
):
outputs = {}
# encode timestep t
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
else:
condition = None
# encode geometry
if self.geometry_encoder is not None:
static_tokens = self.geometry_encoder(geometry2d)
outputs["static_tokens"] = static_tokens
raise NotImplementedError("static tokens are deprecated")
else:
static_tokens = None
# encode data ((x_{t-2}, x_{t-1} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
condition=condition,
static_tokens=static_tokens,
)
outputs["prev_dynamics"] = prev_dynamics
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(
prev_dynamics,
condition=condition,
static_tokens=static_tokens,
)
outputs["dynamics"] = dynamics
# decode next_latent to next_data (dynamic_t -> x_t)
if self.force_decoder_fp32:
with torch.autocast(device_type=str(dynamics.device).split(":")[0], enabled=False):
x_hat = self.decoder(
dynamics.float(),
query_pos=query_pos.float(),
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition.float(),
)
else:
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
)
outputs["x_hat"] = x_hat
# reconstruct dynamics_t from (x_{t-1}, \hat{x}_t)
if reconstruct_dynamics:
# calculate t+1
next_timestep = torch.clamp_max(timestep + 1, max=self.conditioner.num_total_timesteps - 1)
next_condition = self.conditioner(timestep=next_timestep, velocity=velocity)
# reconstruct dynamics_t
num_output_channels = x_hat.size(1)
if target is None:
# use prediction as encoder input for reconstruction
# this could lead to instabilities if the decoder predicts fastly incorrect values
x_hat_or_gt = x_hat
if detach_reconstructions:
x_hat_or_gt = x_hat_or_gt.detach()
else:
x_hat_or_gt = target
dynamics_hat = self.encoder(
torch.concat([x[:, num_output_channels:], x_hat_or_gt], dim=1),
mesh_pos=mesh_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
condition=next_condition,
)
outputs["dynamics_hat"] = dynamics_hat
# reconstruct x_{t-1} from dynamic_{t-1}
if reconstruct_prev_x:
# calculate t-1
prev_timestep = F.relu(timestep - 1)
prev_condition = self.conditioner(timestep=prev_timestep, velocity=velocity)
# reconstruct prev_x_hat
if self.force_decoder_fp32:
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
prev_x_hat = self.decoder(
prev_dynamics.detach().float() if detach_reconstructions else prev_dynamics.float(),
query_pos=query_pos.float(),
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=prev_condition.float(),
)
else:
prev_x_hat = self.decoder(
prev_dynamics.detach() if detach_reconstructions else prev_dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=prev_condition,
)
outputs["prev_x_hat"] = prev_x_hat
return outputs
@torch.no_grad()
def rollout(
self,
x,
geometry2d,
velocity,
mesh_pos,
query_pos,
mesh_edges,
batch_idx,
unbatch_idx,
unbatch_select,
num_rollout_timesteps=None,
mode="image",
intermediate_results=True,
clip=None,
):
# check num_rollout_timesteps
max_timesteps = self.data_container.get_dataset().getdim_timestep()
num_rollout_timesteps = num_rollout_timesteps or max_timesteps
assert 0 < num_rollout_timesteps <= max_timesteps
# setup
x_hats = []
timestep = torch.zeros(1, device=x.device, dtype=torch.long)
condition = None
if mode == "latent":
# rollout via latent (depending on dynamics_transformer, encoder is either not used at all or only for t0)
# initial forward
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
# encode mesh
dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
condition=condition,
)
# predict initial latent
dynamics = self.latent(
dynamics,
condition=condition,
)
if intermediate_results:
if self.force_decoder_fp32:
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
x_hat = self.decoder(
dynamics.float(),
query_pos=query_pos.float(),
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition.float(),
)
else:
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
)
x_hats.append(x_hat)
# rollout
for i in range(num_rollout_timesteps - 1):
# encode timestep
if self.conditioner is not None:
# increase timestep
timestep.add_(1)
condition = self.conditioner(timestep=timestep, velocity=velocity)
# predict next latent
dynamics = self.latent(
dynamics,
condition=condition,
)
if intermediate_results or i == num_rollout_timesteps - 2:
# decode dynamic to data
if self.force_decoder_fp32:
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
x_hat = self.decoder(
dynamics.float(),
query_pos=query_pos.float(),
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition.float(),
)
else:
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
)
if clip is not None:
x_hat = x_hat.clip(-clip, clip)
x_hats.append(x_hat)
elif mode == "image":
assert intermediate_results
# initial forward pass (to get static_tokens)
outputs = self(
x,
geometry2d=geometry2d,
velocity=velocity,
timestep=timestep,
mesh_pos=mesh_pos,
query_pos=query_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
)
x_hat = outputs["x_hat"]
x_hats.append(x_hat)
for _ in range(num_rollout_timesteps - 1):
# shift last prediction into history
x = torch.concat([x[:, x_hat.size(1):], x_hat], dim=1)
# increase timestep
timestep.add_(1)
# predict next timestep
outputs = self(
x,
geometry2d=geometry2d,
velocity=velocity,
timestep=timestep,
mesh_pos=mesh_pos,
query_pos=query_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
)
x_hat = outputs["x_hat"]
if clip is not None:
x_hat = x_hat.clip(-clip, clip)
x_hats.append(x_hat)
else:
raise NotImplementedError
if not intermediate_results:
assert len(x_hats) == 1
# num_rollout_timesteps * (batch_size * num_points, num_channels)
# -> (batch_size * num_points, num_channels, num_rollout_timesteps)
return torch.stack(x_hats, dim=2)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/__init__.py | src/models/composite/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/cfd_interpolated_model.py | src/models/composite/cfd_interpolated_model.py | import scipy
import einops
import torch
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class CfdInterpolatedModel(CompositeModelBase):
def __init__(
self,
latent,
decoder,
conditioner=None,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
self.static_ctx["grid_resolution"] = self.data_container.get_dataset().grid_resolution
self.static_ctx["ndim"] = len(self.data_container.get_dataset().grid_resolution)
# conditioner
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(conditioner=self.conditioner) if self.conditioner is not None else {}),
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, x, query_pos, timestep=None, velocity=None):
outputs = {}
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
else:
condition = None
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(x, condition=condition)
# decode next_latent to next_data (dynamic_t -> x_t)
x_hat = self.decoder(dynamics, query_pos=query_pos)
outputs["x_hat"] = x_hat
return outputs
@torch.no_grad()
def rollout(self, x, query_pos, velocity=None, num_rollout_timesteps=None):
# check num_rollout_timesteps
max_timesteps = self.data_container.get_dataset().getdim_timestep()
num_rollout_timesteps = num_rollout_timesteps or max_timesteps
assert 0 < num_rollout_timesteps <= max_timesteps
# create interpolation grid
dataset = self.data_container.get_dataset()
x_linspace = torch.linspace(0, dataset.max_x_pos, dataset.grid_resolution[1])
y_linspace = torch.linspace(0, dataset.max_y_pos, dataset.grid_resolution[0])
grid_pos = torch.meshgrid(x_linspace, y_linspace, indexing="xy")
#
x_hats = []
timestep = torch.zeros(1, device=x.device, dtype=torch.long)
for _ in range(num_rollout_timesteps):
# predict next timestep
outputs = self(
x,
query_pos=query_pos,
timestep=timestep,
velocity=velocity,
)
x_hat = outputs["x_hat"]
x_hats.append(x_hat)
# interpolate back to image for input in next timestep
x_hat = einops.rearrange(
x_hat,
"(batch_size num_query_pos) dim -> batch_size num_query_pos dim",
batch_size=len(query_pos),
)
interpolated = []
for i in range(len(x_hat)):
grid = torch.from_numpy(
scipy.interpolate.griddata(
query_pos[i].cpu().unbind(1),
x_hat[i].cpu(),
grid_pos,
method="linear",
fill_value=0.,
),
).float()
interpolated.append(grid)
x_hat = torch.stack(interpolated).to(x.device)
# shift last prediction into history
x = torch.concat([x[..., x_hat.size(-1):], x_hat], dim=-1)
# increase timestep
timestep.add_(1)
# num_rollout_timesteps * (batch_size * num_points, num_channels)
# -> (batch_size * num_points, num_channels, num_rollout_timesteps)
return torch.stack(x_hats, dim=2)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/cfd_baseline_model.py | src/models/composite/cfd_baseline_model.py | import torch
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class CfdBaselineModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
conditioner=None,
force_latent_fp32=True,
**kwargs,
):
super().__init__(**kwargs)
self.force_latent_fp32 = force_latent_fp32
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# conditioner
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
)
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(conditioner=self.conditioner) if self.conditioner is not None else {}),
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(
self,
x,
mesh_pos,
grid_pos,
query_pos,
mesh_to_grid_edges,
grid_to_query_edges,
batch_idx,
timestep=None,
velocity=None,
):
outputs = {}
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
else:
condition = None
# encode data ((x_{t-2}, x_{t-1} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
grid_pos=grid_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
batch_idx=batch_idx,
)
# predict current latent (dynamic_{t-1} -> dynamic_t)
if self.force_latent_fp32:
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
prev_dynamics = prev_dynamics.float()
condition = condition.float()
dynamics = self.latent(prev_dynamics, condition=condition)
else:
dynamics = self.latent(prev_dynamics, condition=condition)
# decode next_latent to next_data (dynamic_t -> x_t)
x_hat = self.decoder(dynamics, grid_pos=grid_pos, query_pos=query_pos, grid_to_query_edges=grid_to_query_edges)
outputs["x_hat"] = x_hat
return outputs
@torch.no_grad()
def rollout(
self,
x,
mesh_pos,
grid_pos,
query_pos,
mesh_to_grid_edges,
grid_to_query_edges,
batch_idx,
velocity=None,
num_rollout_timesteps=None,
):
# check num_rollout_timesteps
max_timesteps = self.data_container.get_dataset().getdim_timestep()
num_rollout_timesteps = num_rollout_timesteps or max_timesteps
assert 0 < num_rollout_timesteps <= max_timesteps
#
x_hats = []
timestep = torch.zeros(1, device=x.device, dtype=torch.long)
for _ in range(num_rollout_timesteps):
# predict next timestep
outputs = self(
x,
mesh_pos=mesh_pos,
grid_pos=grid_pos,
query_pos=query_pos,
mesh_to_grid_edges=mesh_to_grid_edges,
grid_to_query_edges=grid_to_query_edges,
timestep=timestep,
velocity=velocity,
batch_idx=batch_idx,
)
x_hat = outputs["x_hat"]
x_hats.append(x_hat)
# shift last prediction into history
x = torch.concat([x[:, x_hat.size(1):], x_hat], dim=1)
# increase timestep
timestep.add_(1)
# num_rollout_timesteps * (batch_size * num_points, num_channels)
# -> (batch_size * num_points, num_channels, num_rollout_timesteps)
return torch.stack(x_hats, dim=2)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/lagrangian_simformer_model.py | src/models/composite/lagrangian_simformer_model.py | import einops
import torch
import torch.nn.functional as F
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class LagrangianSimformerModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
conditioner=None,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# timestep embed
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
input_shape=self.input_shape,
)
# set static_ctx["dim"]
if self.conditioner is not None:
self.static_ctx["dim"] = self.conditioner.dim
else:
self.static_ctx["dim"] = latent["kwargs"]["dim"]
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
assert self.encoder.output_shape is not None
# dynamics
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
# Box for PBC
self.box = self.data_container.get_dataset().box
@property
def submodels(self):
return dict(
**(dict(conditioner_encoder=self.conditioner) if self.conditioner is not None else {}),
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(
self,
x,
timestep,
curr_pos,
curr_pos_decode,
prev_pos_decode,
edge_index,
batch_idx,
edge_index_target=None,
target_pos_encode=None,
perm_batch=None,
unbatch_idx=None,
unbatch_select=None,
reconstruct_prev_target=False,
encode_target=False,
predict_velocity=False
):
outputs = {}
# encode timestep t
if self.conditioner is not None:
# No velocity for lagrangian simulations -> set to 0
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
else:
timestep_embed = None
# encode data (v_{t-T}, ..., v{t} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=curr_pos,
mesh_edges=edge_index,
batch_idx=batch_idx,
condition=timestep_embed
)
outputs["prev_dynamics"] = prev_dynamics
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(
prev_dynamics,
condition=timestep_embed
)
outputs["dynamics"] = dynamics
# decode next_latent to next_data (dynamic_t -> target)
target = self.decoder(
dynamics,
query_pos=curr_pos_decode,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=timestep_embed
)
outputs["target"] = target
# reconstruct a_{t} from dynamic_{t-1}
if reconstruct_prev_target:
if self.conditioner is not None:
prev_timestep_embed = self.conditioner(timestep=timestep-1, velocity=torch.zeros_like(timestep))
else:
prev_timestep_embed = None
# reconstruct prev_target
prev_target = self.decoder(
prev_dynamics,
query_pos=prev_pos_decode,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=prev_timestep_embed
)
outputs["prev_target"] = prev_target
# Reconstruct input from dynamic
if encode_target:
if self.conditioner is not None:
next_timestep_embed = self.conditioner(timestep=timestep+1, velocity=torch.zeros_like(timestep))
else:
next_timestep_embed = None
vels = einops.rearrange(x,
"bs_times_n_particles (timesteps dim) -> bs_times_n_particles timesteps dim",
dim=target.shape[1]
)
if predict_velocity:
current_vel = target[perm_batch]
else:
# Get last velocity
current_vel = vels[:,-1,:]
# Unnormalize for integration later to get positions
current_vel = self.data_container.get_dataset().unnormalize_vel(current_vel)
# Get acceleration -> target only at perm_batch and unnormalized
a = self.data_container.get_dataset().unnormalize_acc(target[perm_batch])
# Integrate
current_vel = current_vel + a
# Normalize velocity
current_vel = self.data_container.get_dataset().normalize_vel(current_vel)
# Add new velocity to input of the encoder
new_vels = torch.concat((vels[:,1:,:], current_vel.unsqueeze(dim=1)), dim=1)
new_vels = einops.rearrange(new_vels,
"bs_times_n_particles timesteps dim -> bs_times_n_particles (timesteps dim)")
pred_dynamics = self.encoder(
new_vels,
mesh_pos=target_pos_encode,
mesh_edges=edge_index_target,
batch_idx=batch_idx,
condition=next_timestep_embed
)
outputs["pred_dynamics"] = pred_dynamics
return outputs
# noinspection PyMethodOverriding
def forward_large_t(
self,
x,
timestep,
curr_pos,
curr_pos_decode,
prev_pos_decode,
edge_index,
batch_idx,
edge_index_target=None,
target_pos_encode=None,
perm_batch=None,
unbatch_idx=None,
unbatch_select=None,
reconstruct_prev_target=False,
encode_target=False,
const_timestep=False
):
outputs = {}
# encode timestep t
# No velocity for lagrangian simulations -> set to 0
if self.conditioner is not None:
if const_timestep:
# Constant timestep over all time
timestep = torch.tensor([0]).to(x.device)
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
next_timestep_embed = timestep_embed
else:
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
next_timestep = timestep + self.data_container.get_dataset().n_pushforward_timesteps + 1
next_timestep_embed = self.conditioner(timestep=next_timestep, velocity=torch.zeros_like(timestep))
else:
timestep_embed = None
next_timestep_embed = None
# encode data (v_{t-1}, v{t} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=curr_pos,
mesh_edges=edge_index,
batch_idx=batch_idx,
condition=timestep_embed
)
outputs["prev_dynamics"] = prev_dynamics
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(
prev_dynamics,
condition=timestep_embed
)
outputs["dynamics"] = dynamics
# decode next_latent to next_data (dynamic_t -> target)
target = self.decoder(
dynamics,
query_pos=curr_pos_decode,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=next_timestep_embed
)
outputs["target"] = target
# reconstruct prev_target from dynamic_{t-1}
if reconstruct_prev_target:
# reconstruct prev_x_hat
prev_target = self.decoder(
prev_dynamics,
query_pos=prev_pos_decode,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=timestep_embed
)
outputs["prev_target"] = prev_target
if encode_target:
pred_dynamics = self.encoder(
target[perm_batch],
mesh_pos=target_pos_encode,
mesh_edges=edge_index_target,
batch_idx=batch_idx,
condition=next_timestep_embed
)
outputs["pred_dynamics"] = pred_dynamics
return outputs
@torch.no_grad()
def rollout(
self,
x,
timestep,
curr_pos,
edge_index,
batch_idx,
unbatch_idx=None,
unbatch_select=None,
full_rollout=False,
rollout_length=20,
predict_velocity=False
):
if self.conditioner is not None:
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
else:
timestep_embed = None
# encode data (v_{t-T}, ..., v{t} -> dynamic_{t-1})
dynamics = self.encoder(
x,
mesh_pos=curr_pos,
mesh_edges=edge_index,
batch_idx=batch_idx,
condition=timestep_embed
)
vels = einops.rearrange(x,
"bs_times_n_particles (timesteps dim) -> bs_times_n_particles timesteps dim",
dim=curr_pos.shape[1]
)
# Get last velocity
current_vel = vels[:,-1,:]
# Unnormalize for integration later to get positions
current_vel = self.data_container.get_dataset().unnormalize_vel(current_vel)
all_predictions = []
all_velocities = []
curr_pos = einops.rearrange(
curr_pos,
"(bs n_particles) dim -> bs n_particles dim",
bs=len(unbatch_select)
)
for i in range(rollout_length):
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(
dynamics,
condition=timestep_embed
)
# decode next_latent to next_data (dynamic_t -> a_{t+1})
target = self.decoder(
dynamics,
query_pos=curr_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=timestep_embed
)
if predict_velocity:
current_vel = self.data_container.get_dataset().unnormalize_vel(target)
else:
# Unnormalize a_hat to calculate next velocity
a_hat = self.data_container.get_dataset().unnormalize_acc(target)
# Calculate new velocity
current_vel = current_vel + a_hat
# Unscale curr_pos to be in original scale
curr_pos = self.data_container.get_dataset().unscale_pos(curr_pos)
curr_pos = einops.rearrange(
curr_pos,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
# Integrate
curr_pos = (curr_pos + current_vel) % self.box.to(curr_pos.device)
# New position
curr_pos = einops.rearrange(
curr_pos,
"(bs n_particles) n_dim -> bs n_particles n_dim", bs=len(unbatch_select)
)
curr_vel_reshaped = einops.rearrange(
current_vel,
"(bs n_particles) n_dim -> bs n_particles n_dim", bs=len(unbatch_select)
)
all_predictions.append(curr_pos)
all_velocities.append(curr_vel_reshaped)
# Scale new position for decoder
curr_pos = self.data_container.get_dataset().scale_pos(curr_pos)
# New timestep embedding
timestep = timestep + 1
if self.conditioner is not None:
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
else:
timestep_embed = None
if full_rollout:
# Normalize current vel to be used as input for the encoder
current_vel_normalized = self.data_container.get_dataset().normalize_vel(current_vel)
x = torch.concat([vels[:,1:,:], current_vel_normalized.unsqueeze(1)],dim=1)
x = einops.rearrange(
x,
"bs num_input_timesteps num_points -> bs (num_input_timesteps num_points)",
)
mesh_pos = einops.rearrange(
curr_pos,
"bs n_particles dim -> (bs n_particles) dim",
)
dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
mesh_edges=edge_index,
batch_idx=batch_idx,
condition=timestep_embed
)
all_predictions = torch.stack(all_predictions, dim=1)
all_velocities = torch.stack(all_velocities, dim=1)
return all_predictions, all_velocities
@torch.no_grad()
def rollout_large_t(
self,
x,
all_pos,
timestep,
edge_index,
batch_idx,
unbatch_idx=None,
unbatch_select=None
):
pos_idx = self.data_container.get_dataset().n_input_timesteps - 1
large_t = self.data_container.get_dataset().n_pushforward_timesteps + 1
# timestep = torch.tensor([pos_idx])
if self.conditioner is not None:
if timestep is not None:
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
const_timestep = False
else:
timestep = torch.tensor([0]).to(x.device)
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
const_timestep = True
else:
timestep_embed = None
curr_pos = all_pos[:,pos_idx,:,:]
curr_pos = einops.rearrange(
curr_pos,
"bs n_particles dim -> (bs n_particles) dim",
)
# encode data
dynamics = self.encoder(
x,
mesh_pos=curr_pos,
mesh_edges=edge_index,
batch_idx=batch_idx,
condition=timestep_embed
)
vel_predictions = []
while True:
# predict current latent
dynamics = self.latent(
dynamics,
condition=timestep_embed
)
# Next position index
pos_idx = pos_idx + large_t
# Check if still in trajectory
if pos_idx >= all_pos.shape[1]:
break
curr_pos = all_pos[:,pos_idx,:,:]
# New timestep embedding
if self.conditioner is not None and not const_timestep:
timestep = timestep + large_t
timestep_embed = self.conditioner(timestep=timestep, velocity=torch.zeros_like(timestep))
# decode next_latent to next_data (dynamic_t -> a_{t+1})
v_hat = self.decoder(
dynamics,
query_pos=curr_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=timestep_embed
)
# Reshape to get time dim
v_hat = einops.rearrange(
v_hat,
"a (time dim) -> a time dim",
dim=curr_pos.shape[-1]
)
vel_predictions.append(v_hat)
vel_predictions = torch.concat(vel_predictions, dim=1)
return vel_predictions
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/cfd_simformer_efficient_reconstruct_model.py | src/models/composite/cfd_simformer_efficient_reconstruct_model.py | import einops
import torch
import torch.nn.functional as F
from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class CfdSimformerEfficientReconstructModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
conditioner=None,
geometry_encoder=None,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# timestep embed
self.conditioner = create(
conditioner,
model_from_kwargs,
**common_kwargs,
input_shape=self.input_shape,
)
# desc2latent
self.geometry_encoder = create(
geometry_encoder,
model_from_kwargs,
**common_kwargs,
)
# set static_ctx["num_static_tokens"]
if self.geometry_encoder is not None:
assert self.geometry_encoder.output_shape is not None and len(self.geometry_encoder.output_shape) == 2
self.static_ctx["num_static_tokens"] = self.geometry_encoder.output_shape[0]
else:
self.static_ctx["num_static_tokens"] = 0
# set static_ctx["dim"]
if self.conditioner is not None:
self.static_ctx["dim"] = self.conditioner.dim
elif self.geometry_encoder is not None:
self.static_ctx["dim"] = self.geometry_encoder.output_shape[1]
else:
self.static_ctx["dim"] = latent["kwargs"]["dim"]
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
assert self.encoder.output_shape is not None
# dynamics
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
**(dict(conditioner=self.conditioner) if self.conditioner is not None else {}),
**(dict(geometry_encoder=self.geometry_encoder) if self.geometry_encoder is not None else {}),
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(
self,
x,
geometry2d,
timestep,
velocity,
mesh_pos,
query_pos,
mesh_edges,
batch_idx,
unbatch_idx,
unbatch_select,
reconstruction_input,
reconstruction_pos,
detach_reconstructions=True,
reconstruct_prev_x=False,
reconstruct_dynamics=False,
):
outputs = {}
# encode timestep t
if self.conditioner is not None:
condition = self.conditioner(timestep=timestep, velocity=velocity)
else:
condition = None
# encode geometry
if self.geometry_encoder is not None:
static_tokens = self.geometry_encoder(geometry2d)
outputs["static_tokens"] = static_tokens
else:
static_tokens = None
# encode data ((x_{t-2}, x_{t-1} -> dynamic_{t-1})
prev_dynamics = self.encoder(
x,
mesh_pos=mesh_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
condition=condition,
static_tokens=static_tokens,
)
outputs["prev_dynamics"] = prev_dynamics
# predict current latent (dynamic_{t-1} -> dynamic_t)
dynamics = self.latent(
prev_dynamics,
condition=condition,
static_tokens=static_tokens,
)
outputs["dynamics"] = dynamics
# decode next_latent to next_data (dynamic_t -> x_t)
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
static_tokens=static_tokens,
)
outputs["x_hat"] = x_hat
# reconstruct dynamics_t from (x_{t-1}, \hat{x}_t)
if reconstruct_dynamics:
# calculate t+1
next_timestep = torch.clamp_max(timestep + 1, max=self.conditioner.num_total_timesteps - 1)
next_condition = self.conditioner(timestep=next_timestep, velocity=velocity)
# reconstruct dynamics_t
num_output_channels = x_hat.size(1)
dynamics_hat = self.encoder(
torch.concat([x[:, num_output_channels:], reconstruction_input], dim=1),
mesh_pos=mesh_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
condition=next_condition,
static_tokens=static_tokens,
)
outputs["dynamics_hat"] = dynamics_hat
# reconstruct x_{t-1} from dynamic_{t-1}
if reconstruct_prev_x:
# calculate t-1
prev_timestep = F.relu(timestep - 1)
prev_condition = self.conditioner(timestep=prev_timestep, velocity=velocity)
# reconstruct prev_x_hat
prev_x_hat = self.decoder(
prev_dynamics.detach() if detach_reconstructions else prev_dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=prev_condition,
static_tokens=static_tokens,
)
outputs["prev_x_hat"] = prev_x_hat
return outputs
@torch.no_grad()
def rollout(
self,
x,
geometry2d,
velocity,
mesh_pos,
query_pos,
mesh_edges,
batch_idx,
unbatch_idx,
unbatch_select,
num_rollout_timesteps=None,
mode="image",
clip=None,
):
# check num_rollout_timesteps
max_timesteps = self.data_container.get_dataset().getdim_timestep()
num_rollout_timesteps = num_rollout_timesteps or max_timesteps
assert 0 < num_rollout_timesteps <= max_timesteps
# setup
x_hats = []
timestep = torch.zeros(1, device=x.device, dtype=torch.long)
condition = None
# initial forward pass (to get static_tokens)
outputs = self(
x,
geometry2d=geometry2d,
velocity=velocity,
timestep=timestep,
mesh_pos=mesh_pos,
query_pos=query_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
)
x_hat = outputs["x_hat"]
x_hats.append(x_hat)
static_tokens = outputs.get("static_tokens", None)
if mode == "latent":
# rollout via latent (depending on dynamics_transformer, encoder is either not used at all or only for t0)
dynamics = outputs["dynamics"]
for _ in range(num_rollout_timesteps - 1):
# encode timestep
if self.conditioner is not None:
# increase timestep
timestep.add_(1)
condition = self.conditioner(timestep=timestep, velocity=velocity)
# predict next latent
dynamics = self.latent(
dynamics,
condition=condition,
static_tokens=static_tokens,
)
# decode dynamic to data
x_hat = self.decoder(
dynamics,
query_pos=query_pos,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
condition=condition,
static_tokens=static_tokens,
)
if clip is not None:
x_hat = x_hat.clip(-clip, clip)
x_hats.append(x_hat)
elif mode == "image":
for _ in range(num_rollout_timesteps - 1):
# shift last prediction into history
x = torch.concat([x[:, x_hat.size(1):], x_hat], dim=1)
# increase timestep
timestep.add_(1)
# predict next timestep
outputs = self(
x,
geometry2d=geometry2d,
velocity=velocity,
timestep=timestep,
mesh_pos=mesh_pos,
query_pos=query_pos,
mesh_edges=mesh_edges,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
)
x_hat = outputs["x_hat"]
if clip is not None:
x_hat = x_hat.clip(-clip, clip)
x_hats.append(x_hat)
else:
raise NotImplementedError
# num_rollout_timesteps * (batch_size * num_points, num_channels)
# -> (batch_size * num_points, num_channels, num_rollout_timesteps)
return torch.stack(x_hats, dim=2)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/composite/rans_simformer_nognn_model.py | src/models/composite/rans_simformer_nognn_model.py | from models import model_from_kwargs
from models.base.composite_model_base import CompositeModelBase
from utils.factory import create
class RansSimformerNognnModel(CompositeModelBase):
def __init__(
self,
encoder,
latent,
decoder,
**kwargs,
):
super().__init__(**kwargs)
common_kwargs = dict(
update_counter=self.update_counter,
path_provider=self.path_provider,
dynamic_ctx=self.dynamic_ctx,
static_ctx=self.static_ctx,
data_container=self.data_container,
)
# encoder
self.encoder = create(
encoder,
model_from_kwargs,
input_shape=self.input_shape,
**common_kwargs,
)
# latent
self.latent = create(
latent,
model_from_kwargs,
input_shape=self.encoder.output_shape,
**common_kwargs,
)
# decoder
self.decoder = create(
decoder,
model_from_kwargs,
**common_kwargs,
input_shape=self.latent.output_shape,
output_shape=self.output_shape,
)
@property
def submodels(self):
return dict(
encoder=self.encoder,
latent=self.latent,
decoder=self.decoder,
)
# noinspection PyMethodOverriding
def forward(self, mesh_pos, query_pos, batch_idx, unbatch_idx, unbatch_select):
outputs = {}
# encode data
encoded = self.encoder(mesh_pos=mesh_pos, batch_idx=batch_idx)
# propagate
propagated = self.latent(encoded)
# decode
x_hat = self.decoder(propagated, query_pos=query_pos, unbatch_idx=unbatch_idx, unbatch_select=unbatch_select)
outputs["x_hat"] = x_hat
return outputs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/base/single_model_base.py | src/models/base/single_model_base.py | import torch
from freezers import freezer_from_kwargs
from initializers.functional import ALL_BATCHNORMS
from optimizers import optim_ctor_from_kwargs
from utils.factory import create, create_collection
from utils.model_utils import get_trainable_param_count
from .model_base import ModelBase
class SingleModelBase(ModelBase):
def __init__(
self,
optim_ctor=None,
freezers=None,
is_frozen=False,
update_counter=None,
**kwargs
):
super().__init__(update_counter=update_counter, **kwargs)
self._device = torch.device("cpu")
self.optim_ctor = create(
optim_ctor,
optim_ctor_from_kwargs,
instantiate_if_ctor=False,
)
self.freezers = create_collection(freezers, freezer_from_kwargs, update_counter=update_counter)
self.is_frozen = is_frozen
self._is_batch_size_dependent = None
# check parameter combinations
if self.is_frozen:
assert self.optim_ctor is None, "model.is_frozen=True but model.optim_ctor is not None"
# check base methods were not overwritten
assert type(self).before_accumulation_step == SingleModelBase.before_accumulation_step
def clear_buffers(self):
pass
@property
def is_batch_size_dependent(self):
if self._is_batch_size_dependent is None:
for m in self.modules():
if isinstance(m, ALL_BATCHNORMS):
self._is_batch_size_dependent = True
break
else:
self._is_batch_size_dependent = False
return self._is_batch_size_dependent
def forward(self, *args, **kwargs):
""" all computations for training have to be within the forward method (otherwise DDP doesn't sync grads) """
raise NotImplementedError
@property
def submodels(self):
return {self.name: self}
def optim_step(self, grad_scaler):
if self._optim is not None:
self._optim.step(grad_scaler)
# after step (e.g. for EMA)
self.after_update_step()
def optim_schedule_step(self):
if self._optim is not None:
self._optim.schedule_step()
def optim_zero_grad(self, set_to_none=True):
if self._optim is not None:
self._optim.zero_grad(set_to_none)
@property
def device(self):
return self._device
def before_accumulation_step(self):
for freezer in self.freezers:
freezer.before_accumulation_step(self)
@staticmethod
def get_model_specific_param_group_modifiers():
return []
def initialize_weights(self):
# model specific initialization
if self.model_specific_initialization != ModelBase.model_specific_initialization:
self.logger.info(f"{self.name} applying model specific initialization")
self.model_specific_initialization()
else:
self.logger(f"{self.name} no model specific initialization")
# freeze all parameters (and put into eval mode)
if self.is_frozen:
self.eval()
for param in self.parameters():
param.requires_grad = False
# freeze some parameters
for freezer in self.freezers:
freezer.after_weight_init(self)
return self
def initialize_optim(self, lr_scale_factor=None):
if self.optim_ctor is not None:
self.logger.info(f"{self.name} initialize optimizer")
self._optim = self.optim_ctor(self, update_counter=self.update_counter, lr_scale_factor=lr_scale_factor)
elif not self.is_frozen:
if get_trainable_param_count(self) == 0:
self.logger.info(f"{self.name} has no trainable parameters -> freeze and put into eval mode")
self.is_frozen = True
self.eval()
else:
raise RuntimeError(f"no optimizer for {self.name} and it's also not frozen")
else:
self.logger.info(f"{self.name} is frozen -> no optimizer to initialize")
def apply_initializers(self):
for initializer in self.initializers:
initializer.init_weights(self)
initializer.init_optim(self)
return self
def train(self, mode=True):
# avoid setting mode to train if whole network is frozen
# this prevents the training behavior of e.g. the following components
# - Dropout/StochasticDepth dropping during
# - BatchNorm (in train mode the statistics are tracked)
if self.is_frozen and mode is True:
return
return super().train(mode=mode)
def to(self, device, *args, **kwargs):
if isinstance(device, str):
device = torch.device(device)
assert isinstance(device, torch.device)
self._device = device
return super().to(*args, **kwargs, device=device)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/base/composite_model_base.py | src/models/base/composite_model_base.py | import torch
from .model_base import ModelBase
from .single_model_base import SingleModelBase
class CompositeModelBase(ModelBase):
def forward(self, *args, **kwargs):
""" all computations for training have to be within the forward method (otherwise DDP doesn't sync grads) """
raise NotImplementedError
@property
def submodels(self):
raise NotImplementedError
def optim_step(self, grad_scaler):
for submodel in self.submodels.values():
if isinstance(submodel, SingleModelBase) and submodel.optim is None:
continue
submodel.optim_step(grad_scaler)
# after step (e.g. for EMA)
self.after_update_step()
def optim_schedule_step(self):
for submodel in self.submodels.values():
if isinstance(submodel, SingleModelBase) and submodel.optim is None:
continue
submodel.optim_schedule_step()
def optim_zero_grad(self, set_to_none=True):
for submodel in self.submodels.values():
if isinstance(submodel, SingleModelBase) and submodel.optim is None:
continue
submodel.optim_zero_grad(set_to_none)
@property
def is_frozen(self):
return all(m.is_frozen for m in self.submodels.values())
@is_frozen.setter
def is_frozen(self, value):
for m in self.submodels.values():
m.is_frozen = value
@property
def device(self):
devices = [sub_model.device for sub_model in self.submodels.values()]
assert all(device == devices[0] for device in devices[1:])
return devices[0]
def clear_buffers(self):
for submodel in self.submodels.values():
submodel.clear_buffers()
@property
def is_batch_size_dependent(self):
return any(m.is_batch_size_dependent for m in self.submodels.values())
def initialize_weights(self):
for sub_model in self.submodels.values():
sub_model.initialize_weights()
if self.model_specific_initialization != ModelBase.model_specific_initialization:
self.logger.info(f"applying model specific initialization")
self.model_specific_initialization()
else:
self.logger(f"no model specific initialization")
return self
def apply_initializers(self):
for sub_model in self.submodels.values():
sub_model.apply_initializers()
for initializer in self.initializers:
initializer.init_weights(self)
initializer.init_optim(self)
return self
def initialize_optim(self, lr_scale_factor=None):
for submodel in self.submodels.values():
submodel.initialize_optim(lr_scale_factor=lr_scale_factor)
if self.is_frozen:
self.logger.info(f"{self.name} has only frozen submodels -> put into eval mode")
self.eval()
def train(self, mode=True):
for sub_model in self.submodels.values():
sub_model.train(mode=mode)
# avoid setting mode to train if whole network is frozen
if self.is_frozen and mode is True:
return
return super().train(mode=mode)
def to(self, device, *args, **kwargs):
if isinstance(device, str):
device = torch.device(device)
assert isinstance(device, torch.device)
for sub_model in self.submodels.values():
sub_model.to(*args, **kwargs, device=device)
return super().to(*args, **kwargs, device=device)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/base/model_base.py | src/models/base/model_base.py | import logging
import torch.nn
from initializers import initializer_from_kwargs
from providers.path_provider import PathProvider
from utils.factory import create_collection
from utils.naming_util import snake_type_name
from utils.data_container import DataContainer
class ModelBase(torch.nn.Module):
def __init__(
self,
input_shape=None,
name=None,
output_shape=None,
ctor_kwargs=None,
update_counter=None,
path_provider: PathProvider = None,
data_container: DataContainer = None,
initializers=None,
dynamic_ctx: dict = None,
static_ctx: dict = None,
):
super().__init__()
self.logger = logging.getLogger(type(self).__name__)
self.input_shape = input_shape
self.output_shape = output_shape
self.update_counter = update_counter
self.path_provider = path_provider
self.data_container = data_container
self._optim = None
self.initializers = create_collection(initializers, initializer_from_kwargs, path_provider=self.path_provider)
# a context allows extractors to store activations for later pooling (e.g. use features from last 4 layers)
# the context has to be cleared manually after every call (e.g. model.features) to avoid memory leaks
# "self.outputs = outputs or {}" does not work here as an empty dictionary evaluates to false
if dynamic_ctx is None:
self.dynamic_ctx = {}
else:
self.dynamic_ctx = dynamic_ctx
# a static context allows models to propagate information to poolings (e.g. patch_size, num_aux_tokens)
if static_ctx is None:
self.static_ctx = {}
if self.input_shape is not None:
self.static_ctx["input_shape"] = tuple(self.input_shape)
else:
self.static_ctx = static_ctx
if self.input_shape is None and "input_shape" in self.static_ctx:
self.input_shape = self.static_ctx["input_shape"]
# allow setting name of model manually (useful if a standalone model is trained in multiple stages
# then the checkpoint from the previous stage is only the name; if the typename is used for this,
# the checkpoint loader would have to be changed when the model type changes; if the name is set for this case
# it doesn't have to be changed)
self.name = name or snake_type_name(self)
# store the kwargs that are relevant
self.ctor_kwargs = ctor_kwargs
# don't save update_counter in ctor_kwargs
if self.ctor_kwargs is not None and "update_counter" in self.ctor_kwargs:
self.ctor_kwargs.pop("update_counter")
# flag to make sure the model was initialized before wrapping into DDP
# (parameters/buffers are synced in __init__ of DDP, so if model is not initialized before that,
# different ranks will have diffefent parameters because the seed is different for every rank)
# different seeds per rank are needed to avoid stochastic processes being the same across devices
# (e.g. if seeds are equal, all masks for MAE are the same per batch)
self.is_initialized = False
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def submodels(self):
raise NotImplementedError
def clear_buffers(self):
raise NotImplementedError
@property
def is_batch_size_dependent(self):
raise NotImplementedError
def initialize(self, lr_scale_factor=None):
self.initialize_weights()
self.initialize_optim(lr_scale_factor=lr_scale_factor)
self.apply_initializers()
self.is_initialized = True
return self
def initialize_weights(self):
raise NotImplementedError
def apply_initializers(self):
raise NotImplementedError
def initialize_optim(self, lr_scale_factor=None):
raise NotImplementedError
def model_specific_initialization(self):
pass
@property
def optim(self):
return self._optim
def optim_step(self, grad_scaler):
raise NotImplementedError
def optim_schedule_step(self):
raise NotImplementedError
def optim_zero_grad(self, set_to_none=True):
raise NotImplementedError
@property
def device(self):
raise NotImplementedError
def before_accumulation_step(self):
""" before_accumulation_step hook (e.g. for freezers) """
for model in self.submodels.values():
model.before_accumulation_step()
def after_update_step(self):
""" after_update_step hook (e.g. for EMA) """
pass
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/base/__init__.py | src/models/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/fno_gino_model.py | src/models/latent/fno_gino_model.py | import einops
from kappamodules.layers import LinearProjection
from kappautils.param_checking import to_ntuple
from neuralop.models import FNO
from models.base.single_model_base import SingleModelBase
class FnoGinoModel(SingleModelBase):
""" FNO model from GINO """
def __init__(
self,
modes=32,
dim=86,
norm="group_norm",
factorization="tucker",
rank=0.4,
domain_padding=0,
**kwargs,
):
super().__init__(**kwargs)
self.modes = to_ntuple(modes, n=self.static_ctx["ndim"])
self.dim = dim
self.norm = norm
self.factorization = factorization
self.rank = rank
self.domain_padding = domain_padding
# propagate output_shape
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
# GINO uses domain_padding=0.125 but this interfers with the requirement that
# torch.fft.rfftn with fp16 only supports powers of two
self.proj_in = LinearProjection(input_dim, dim)
self.fno = FNO(
self.modes,
in_channels=dim,
hidden_channels=dim,
out_channels=dim,
use_mlp=True,
mlp_expansion=1.0,
factorization=factorization,
domain_padding=domain_padding,
norm=norm,
rank=rank,
)
def forward(self, x, condition=None):
assert condition is None
# input projection
x = self.proj_in(x)
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# fno
x = self.fno(x)
# dim first with spatial -> dim last without spatial
x = einops.rearrange(x, "batch_size dim ... -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/fno_cond_model.py | src/models/latent/fno_cond_model.py | from functools import partial
import einops
from models.base.single_model_base import SingleModelBase
from modules.pdearena.conditional_twod_resnet import ResNet, FourierBasicBlock
class FnoCondModel(SingleModelBase):
""" FNO model from PDEArena with conditioning """
def __init__(self, dim, modes, depth=4, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.modes = modes
self.depth = depth
# propagate output_shape
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
# "FNO-128-16m": dim=128
self.model = ResNet(
input_dim=input_dim,
hidden_dim=dim,
cond_dim=self.static_ctx["condition_dim"],
num_blocks=[1] * depth,
block=partial(FourierBasicBlock, modes1=modes, modes2=modes),
norm=False,
)
def forward(self, x, condition):
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# model
x = self.model(x, condition)
# dim first with spatial -> dim last without spatial
x = einops.rearrange(x, "batch_size dim ... -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/transformer_model.py | src/models/latent/transformer_model.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import DitBlock, PrenormBlock
from torch import nn
from models.base.single_model_base import SingleModelBase
class TransformerModel(SingleModelBase):
def __init__(
self,
dim,
depth,
num_attn_heads,
drop_path_rate=0.0,
drop_path_decay=True,
init_weights="xavier_uniform",
init_last_proj_zero=False,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
self.num_attn_heads = num_attn_heads
self.drop_path_rate = drop_path_rate
self.drop_path_decay = drop_path_decay
self.init_weights = init_weights
self.init_last_proj_zero = init_last_proj_zero
# input/output shape
assert len(self.input_shape) == 2
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
self.input_proj = LinearProjection(input_dim, dim, init_weights=init_weights)
# blocks
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
if drop_path_decay:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
else:
dpr = [drop_path_rate] * depth
self.blocks = nn.ModuleList([
block_ctor(
dim=dim,
num_heads=num_attn_heads,
drop_path=dpr[i],
init_weights=init_weights,
init_last_proj_zero=init_last_proj_zero,
)
for i in range(self.depth)
])
def forward(self, x, condition=None, static_tokens=None):
assert x.ndim == 3
# concat static tokens
if static_tokens is not None:
x = torch.cat([static_tokens, x], dim=1)
# input projection
x = self.input_proj(x)
# apply blocks
blk_kwargs = dict(cond=condition) if condition is not None else dict()
for blk in self.blocks:
x = blk(x, **blk_kwargs)
# remove static tokens
if static_tokens is not None:
num_static_tokens = static_tokens.size(1)
x = x[:, num_static_tokens:]
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/unet_gino_model.py | src/models/latent/unet_gino_model.py | import einops
from kappamodules.layers import LinearProjection
from kappautils.param_checking import to_ntuple
from kappamodules.unet import UnetGino
from models.base.single_model_base import SingleModelBase
class UnetGinoModel(SingleModelBase):
""" Unet model from GINO """
def __init__(self, dim, depth=4, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
# propagate output_shape
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
self.unet = UnetGino(
input_dim=input_dim,
hidden_dim=dim,
depth=depth,
)
def forward(self, x, condition=None):
assert condition is None
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# unet
x = self.unet(x)
# dim first with spatial -> dim last without spatial
x = einops.rearrange(x, "batch_size dim ... -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/unet_pdearena_model.py | src/models/latent/unet_pdearena_model.py | import einops
from kappamodules.unet import UnetPdearena
from models.base.single_model_base import SingleModelBase
class UnetPdearenaModel(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
# propagate output shape
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
assert self.static_ctx["ndim"] == 2
# Unetmod-64
self.model = UnetPdearena(
hidden_channels=dim,
input_dim=input_dim,
output_dim=dim,
norm=False, # dont use norm because it uses a hardcoded num_groups=8
cond_dim=self.static_ctx.get("condition_dim", None),
)
def forward(self, x, condition=None):
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# unet
x = self.model(x, emb=condition)
# dim first with spatial -> dim last without spatial
x = einops.rearrange(x, "batch_size dim ... -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/unet_denoising_diffusion_model.py | src/models/latent/unet_denoising_diffusion_model.py | import einops
from kappamodules.unet import UnetDenoisingDiffusion
from models.base.single_model_base import SingleModelBase
class UnetDenoisingDiffusionModel(SingleModelBase):
def __init__(self, dim, depth, num_attn_heads=None, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
self.num_attn_heads = num_attn_heads
# propagate output shape
seqlen, input_dim = self.input_shape
self.output_shape = (seqlen, dim)
self.model = UnetDenoisingDiffusion(
dim=dim,
dim_in=input_dim,
ndim=self.static_ctx["ndim"],
num_heads=num_attn_heads,
depth=depth,
dim_cond=self.static_ctx.get("condition_dim", None),
)
def forward(self, x, condition=None):
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# unet
x = self.model(x, cond=condition)
# dim first with spatial -> dim last without spatial
x = einops.rearrange(x, "batch_size dim ... -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/latent/__init__.py | src/models/latent/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/cfd_pool_transformer_perceiver.py | src/models/encoders/cfd_pool_transformer_perceiver.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock, DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_pool import CfdPool
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class CfdPoolTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_latent_tokens=None,
use_enc_norm=False,
drop_path_rate=0.0,
init_weights="xavier_uniform",
gnn_init_weights=None,
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_latent_tokens = num_latent_tokens
self.use_enc_norm = use_enc_norm
self.drop_path_rate = drop_path_rate
self.init_weights = init_weights
gnn_init_weights = gnn_init_weights or init_weights
self.gnn_init_weights = gnn_init_weights
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.mesh_embed = CfdPool(
input_dim=input_dim,
hidden_dim=gnn_dim,
init_weights=gnn_init_weights,
)
# blocks
self.enc_norm = nn.LayerNorm(gnn_dim, eps=1e-6) if use_enc_norm else nn.Identity()
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights, drop_path=drop_path_rate)
for _ in range(enc_depth)
])
# perceiver pooling
self.perc_proj = LinearProjection(enc_dim, perc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(
DitPerceiverPoolingBlock,
perceiver_kwargs=dict(
cond_dim=self.static_ctx["condition_dim"],
init_weights=init_weights,
),
)
else:
block_ctor = partial(
PerceiverPoolingBlock,
perceiver_kwargs=dict(init_weights=init_weights),
)
self.perceiver = block_ctor(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_latent_tokens,
)
# output shape
self.output_shape = (num_latent_tokens, perc_dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="perceiver.query")]
def forward(self, x, mesh_pos, mesh_edges, batch_idx, condition=None, static_tokens=None):
# embed mesh
x = self.mesh_embed(x, mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
# project static_tokens to encoder dim
# static_tokens = self.static_token_proj(static_tokens)
# concat static tokens
# x = torch.cat([static_tokens, x], dim=1)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.enc_norm(x)
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x, **block_kwargs)
# perceiver
x = self.perc_proj(x)
x = self.perceiver(kv=x, **block_kwargs)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/cfd_gino.py | src/models/encoders/cfd_gino.py | import numpy as np
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_gino_mesh_to_grid import CfdGinoMeshToGrid
class CfdGino(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.resolution = self.data_container.get_dataset().grid_resolution
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.embed = CfdGinoMeshToGrid(
input_dim=input_dim,
hidden_dim=dim,
resolution=self.resolution,
)
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), self.embed.output_dim)
def forward(self, x, mesh_pos, grid_pos, mesh_to_grid_edges, batch_idx):
return self.embed(x=x, mesh_pos=mesh_pos, grid_pos=grid_pos, mesh_to_grid_edges=mesh_to_grid_edges)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/cfd_interpolate.py | src/models/encoders/cfd_interpolate.py | import einops
import numpy as np
from torch import nn
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_interpolate_mesh_to_grid import CfdInterpolateMeshToGrid
class CfdInterpolate(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.resolution = self.data_container.get_dataset().grid_resolution
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.embed = CfdInterpolateMeshToGrid()
self.proj = nn.Linear(input_dim, dim)
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), dim)
def forward(self, x, mesh_pos, grid_pos, mesh_to_grid_edges, batch_idx):
x = self.embed(x=x, mesh_pos=mesh_pos, grid_pos=grid_pos, batch_idx=batch_idx)
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, x.size(1))
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
x = self.proj(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/lagrangian_perceiver.py | src/models/encoders/lagrangian_perceiver.py | import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed
from kappamodules.transformer import PerceiverPoolingBlock, Mlp
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class LagrangianPerceiver(SingleModelBase):
def __init__(self, dim, num_attn_heads, num_output_tokens, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
self.num_output_tokens = num_output_tokens
ndim = self.data_container.get_dataset().metadata["dim"]
self.static_ctx["ndim"] = ndim
# input_embed
self.embed = Mlp(in_dim=self.input_shape[0], hidden_dim=dim, out_dim=dim)
# pos_embed
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=ndim)
# perceiver
self.mlp = Mlp(in_dim=dim, hidden_dim=dim * 4)
self.block = PerceiverPoolingBlock(
dim=dim,
num_heads=num_attn_heads,
num_query_tokens=num_output_tokens,
)
# output shape
self.output_shape = (num_output_tokens, dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="block.query")]
def forward(self, x, pos, batch_idx):
x = self.embed(x) + self.pos_embed(pos)
x, mask = to_dense_batch(x, batch_idx)
if torch.all(mask):
mask = None
else:
# add dimensions for num_heads and query (keys are masked)
mask = einops.rearrange(mask, "batchsize num_nodes -> batchsize 1 1 num_nodes")
# perceiver
x = self.mlp(x)
x = self.block(kv=x, attn_mask=mask)
return x | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_gino.py | src/models/encoders/rans_gino.py | import numpy as np
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_mesh_to_grid import RansGinoMeshToGrid
class RansGino(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.resolution = self.data_container.get_dataset().grid_resolution
self.embed = RansGinoMeshToGrid(dim=dim, resolution=self.resolution)
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), self.embed.output_dim)
def forward(self, mesh_pos, grid_pos, mesh_to_grid_edges):
return self.embed(mesh_pos=mesh_pos, grid_pos=grid_pos, mesh_to_grid_edges=mesh_to_grid_edges)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_grid_simple_cnn.py | src/models/encoders/rans_grid_simple_cnn.py | import einops
import torch
from torch import nn
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansGridSimpleCnn(SingleModelBase):
def __init__(
self,
dim,
depth=4,
num_poolings=3,
pooling_kernel_sizes=None,
num_groups=8,
add_type_token=False,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
self.num_poolings = num_poolings
self.pooling_kernel_sizes = pooling_kernel_sizes
self.num_groups = num_groups
self.add_type_token = add_type_token
self.resolution = self.data_container.get_dataset().grid_resolution
self.ndim = len(self.resolution)
assert num_poolings <= depth - 1
assert dim % 2 ** (depth - 1) == 0
assert all(self.resolution[0] == resolution for resolution in self.resolution[1:])
assert self.resolution[0] % (2 ** (depth - 1)) == 0
dim_per_block = [dim // (2 ** (depth - i - 1)) for i in range(depth)]
# sdf + grid_pos
if self.data_container.get_dataset().concat_pos_to_sdf:
input_dim = 4
else:
input_dim = 1
# down path of latent.unet_gino_model
self.blocks = nn.ModuleList()
for i in range(depth):
if i == 0:
# first block has no pooling and goes from input_dim -> dim_per_block[0] -> dim_per_block[1]
block = nn.Sequential(
# pooling
nn.Identity(),
# conv1
nn.GroupNorm(num_groups=1, num_channels=input_dim),
nn.Conv3d(input_dim, dim_per_block[0] // 2, kernel_size=3, padding=1, bias=False),
nn.GELU(),
# conv2
nn.GroupNorm(num_groups=num_groups, num_channels=dim_per_block[0] // 2),
nn.Conv3d(dim_per_block[0] // 2, dim_per_block[0], kernel_size=3, padding=1, bias=False),
nn.GELU(),
)
else:
if depth - num_poolings <= i:
if pooling_kernel_sizes is None:
kernel_size = 2
else:
kernel_size = pooling_kernel_sizes[i - (depth - num_poolings)]
pooling = nn.MaxPool3d(kernel_size=kernel_size, stride=kernel_size)
else:
pooling = nn.Identity()
block = nn.Sequential(
# pooling
pooling,
# conv1
nn.GroupNorm(num_groups=num_groups, num_channels=dim_per_block[i] // 2),
nn.Conv3d(dim_per_block[i] // 2, dim_per_block[i] // 2, kernel_size=3, padding=1, bias=False),
nn.GELU(),
# conv2
nn.GroupNorm(num_groups=num_groups, num_channels=dim_per_block[i] // 2),
nn.Conv3d(dim_per_block[i] // 2, dim_per_block[i], kernel_size=3, padding=1, bias=False),
nn.GELU(),
)
self.blocks.append(block)
if add_type_token:
self.type_token = nn.Parameter(torch.empty(size=(1, 1, dim,)))
else:
self.type_token = None
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = self.ndim
self.output_shape = (int(self.resolution[0] // (2 ** (depth - 1)) ** self.ndim), dim)
def model_specific_initialization(self):
if self.add_type_token:
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
modifiers = []
if self.add_type_token:
modifiers += [ExcludeFromWdByNameModifier(name="type_token")]
return modifiers
def forward(self, x):
# sdf is passed as dim-last with spatial -> convert to dim-first with spatial
x = einops.rearrange(x, "batch_size height width depth dim -> batch_size dim height width depth")
# embed
for block in self.blocks:
x = block(x)
# flatten to tokens
x = einops.rearrange(x, "batch_size dim height width depth -> batch_size (height width depth) dim")
if self.add_type_token:
x = x + self.type_token
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_gnn_transformer_perceiver.py | src/models/encoders/rans_gnn_transformer_perceiver.py | import torch
from functools import partial
import einops
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock
from kappamodules.vit import DitBlock
from torch import nn
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_posembed_message import RansPosembedMessage
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
from torch_geometric.utils import to_dense_batch
class RansGnnTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_output_tokens,
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_output_tokens = num_output_tokens
# set ndim
_, ndim = self.input_shape
self.static_ctx["ndim"] = ndim
# gnn
self.gnn = RansPosembedMessage(dim=gnn_dim, ndim=ndim)
# transformer
self.transformer_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.transformer_blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads)
for _ in range(enc_depth)
])
# perceiver
self.perceiver_proj = LinearProjection(enc_dim, perc_dim)
self.perceiver_pooling = PerceiverPoolingBlock(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_output_tokens,
)
# output shape
self.output_shape = (num_output_tokens, perc_dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="perceiver_pooling.query")]
def forward(self, mesh_pos, mesh_edges, batch_idx):
# gnn
x = self.gnn(mesh_pos=mesh_pos, mesh_edges=mesh_edges)
x, mask = to_dense_batch(x, batch_idx)
if torch.all(mask):
mask = None
else:
# add dimensions for num_heads and query (keys are masked)
mask = einops.rearrange(mask, "batchsize num_nodes -> batchsize 1 1 num_nodes")
# transformer
x = self.transformer_proj(x)
for transformer_block in self.transformer_blocks:
x = transformer_block(x, attn_mask=mask)
# perceiver
x = self.perceiver_proj(x)
x = self.perceiver_pooling(kv=x, attn_mask=mask)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/lagrangian_pool_transformer_perceiver.py | src/models/encoders/lagrangian_pool_transformer_perceiver.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_pool import CfdPool
from modules.gno.cfd_pool_gaussian_sincos_pos import CfdPoolGaussianSincosPos
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class LagrangianPoolTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_latent_tokens=None,
use_enc_norm=False,
init_weights="xavier_uniform",
gnn_init_weights=None,
positional_std=None,
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_latent_tokens = num_latent_tokens
self.use_enc_norm = use_enc_norm
self.init_weights = init_weights
gnn_init_weights = gnn_init_weights or init_weights
self.gnn_init_weights = gnn_init_weights
# input_shape is (None, input_dim)
input_dim, _ = self.input_shape
ndim = self.data_container.get_dataset().metadata["dim"]
if positional_std is not None:
self.mesh_embed = CfdPoolGaussianSincosPos(
input_dim=input_dim,
hidden_dim=gnn_dim,
init_weights=gnn_init_weights,
ndim=ndim,
positional_std=positional_std
)
else:
self.mesh_embed = CfdPool(
input_dim=input_dim,
hidden_dim=gnn_dim,
init_weights=gnn_init_weights,
ndim=ndim
)
# blocks
self.enc_norm = nn.LayerNorm(gnn_dim, eps=1e-6) if use_enc_norm else nn.Identity()
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights)
for _ in range(enc_depth)
])
# perceiver pooling
self.perc_proj = LinearProjection(enc_dim, perc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(
DitPerceiverPoolingBlock,
perceiver_kwargs=dict(
cond_dim=self.static_ctx["condition_dim"],
init_weights=init_weights,
),
)
else:
block_ctor = partial(
PerceiverPoolingBlock,
perceiver_kwargs=dict(init_weights=init_weights),
)
self.perceiver = block_ctor(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_latent_tokens,
)
# output shape
self.output_shape = (num_latent_tokens, perc_dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="perceiver.query")]
def forward(self, x, mesh_pos, mesh_edges, batch_idx, condition=None, static_tokens=None):
# embed mesh
x = self.mesh_embed(x, mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
# project static_tokens to encoder dim
# static_tokens = self.static_token_proj(static_tokens)
# concat static tokens
# x = torch.cat([static_tokens, x], dim=1)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.enc_norm(x)
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x, **block_kwargs)
# perceiver
x = self.perc_proj(x)
x = self.perceiver(kv=x, **block_kwargs)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_sdf.py | src/models/encoders/rans_sdf.py | import numpy as np
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_mesh_to_grid_sdf import RansGinoMeshToGridSdf
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from kappamodules.layers.continuous_sincos_embed import ContinuousSincosEmbed
class RansSdf(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.resolution = self.data_container.get_dataset().grid_resolution
self.sdf_embed = nn.Sequential(
nn.Linear(1, dim),
nn.GELU(),
nn.Linear(dim, dim),
)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=len(self.resolution))
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), dim)
def forward(self, sdf, grid_pos):
# convert sdf to sparse tensor
assert sdf.size(-1) == 1
# embed
sdf_embed = self.sdf_embed(sdf.view(-1, 1))
grid_pos_embed = self.pos_embed(grid_pos)
embed = sdf_embed + grid_pos_embed
# convert to dim-last without spatial resolution
embed = embed.view(len(sdf), *self.resolution, -1)
return embed
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/lagrangian_gnn_pool_transformer_perceiver.py | src/models/encoders/lagrangian_gnn_pool_transformer_perceiver.py | from functools import partial
import einops
import torch
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock
from kappamodules.layers import LinearProjection
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
from modules.gno.cfd_gnn_pool import CfdGnnPool
class LagrangianGnnPoolTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
gnn_depth,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_supernodes,
num_latent_tokens=None,
gnn_norm="none",
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.gnn_depth = gnn_depth
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_supernodes = num_supernodes
self.num_latent_tokens = num_latent_tokens
# input_shape is (input_dim, None)
input_dim, _ = self.input_shape
ndim = self.data_container.get_dataset().metadata["dim"]
self.mesh_embed = CfdGnnPool(
input_dim=input_dim,
hidden_dim=gnn_dim,
depth=gnn_depth,
num_output_nodes=num_supernodes,
norm=gnn_norm,
ndim=ndim
)
# blocks
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads)
for _ in range(enc_depth)
])
# perceiver pooling
self.perc_proj = LinearProjection(enc_dim, perc_dim)
self.perceiver = PerceiverPoolingBlock(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_latent_tokens,
)
# output shape
self.output_shape = (num_latent_tokens, perc_dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="perceiver.query")]
def forward(self, x, mesh_pos, mesh_edges, batch_idx, condition=None, static_tokens=None):
# embed mesh
x, batch_idx_pooled = self.mesh_embed(x, mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
x, mask = to_dense_batch(x, batch_idx_pooled)
assert torch.all(mask)
# project static_tokens to encoder dim
# static_tokens = self.static_token_proj(static_tokens)
# concat static tokens
# x = torch.cat([static_tokens, x], dim=1)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x, **block_kwargs)
# perceiver
x = self.perc_proj(x)
x = self.perceiver(kv=x)
return x | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_hierarchical_perceiver.py | src/models/encoders/rans_hierarchical_perceiver.py | from functools import partial
import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock
from kappamodules.vit import DitBlock
from kappautils.param_checking import to_ntuple
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansHierarchicalPerceiver(SingleModelBase):
def __init__(
self,
num_stages,
dim,
depth,
num_attn_heads,
num_query_tokens,
**kwargs,
):
super().__init__(**kwargs)
self.num_stages = num_stages
self.dim = to_ntuple(dim, n=num_stages * 2)
self.depth = to_ntuple(depth, n=num_stages)
self.num_attn_heads = to_ntuple(num_attn_heads, n=num_stages * 2)
self.num_query_tokens = to_ntuple(num_query_tokens, n=num_stages)
# set ndim
_, ndim = self.input_shape
self.static_ctx["ndim"] = ndim
# pos_embed
self.pos_embed = ContinuousSincosEmbed(dim=self.dim[0], ndim=ndim)
# ctors
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.models = nn.ModuleList()
for i in range(num_stages):
stage_models = nn.ModuleList()
# projection
if i == 0:
stage_models.append(nn.Identity())
else:
stage_models.append(LinearProjection(self.dim[(i - 1) * 2 + 1], self.dim[i * 2]))
# transformer
stage_models.append(
nn.ModuleList([
block_ctor(dim=self.dim[i * 2], num_heads=self.num_attn_heads[i * 2])
for _ in range(self.depth[i])
]),
)
# projection
stage_models.append(LinearProjection(self.dim[i * 2], self.dim[i * 2 + 1]))
# perceiver
stage_models.append(
PerceiverPoolingBlock(
dim=self.dim[i * 2 + 1],
num_heads=self.num_attn_heads[i * 2 + 1],
num_query_tokens=self.num_query_tokens[i],
),
)
self.models.append(stage_models)
# output shape
self.output_shape = (self.num_query_tokens[-1], self.dim[-1])
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name=f"models.{i}.3.query") for i in range(self.num_stages)]
def forward(self, mesh_pos, mesh_edges, batch_idx):
x = self.pos_embed(mesh_pos)
x, mask = to_dense_batch(x, batch_idx)
if torch.all(mask):
mask = None
else:
# add dimensions for num_heads and query (keys are masked)
mask = einops.rearrange(mask, "batchsize num_nodes -> batchsize 1 1 num_nodes")
for i, (proj1, blocks, proj2, pooling) in enumerate(self.models):
block_kwargs = dict(attn_mask=mask) if i == 0 else dict()
x = proj1(x)
for block in blocks:
x = block(x, **block_kwargs)
x = proj2(x)
x = pooling(x, **block_kwargs)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_perceiver.py | src/models/encoders/rans_perceiver.py | import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed
from kappamodules.transformer import PerceiverPoolingBlock, Mlp
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansPerceiver(SingleModelBase):
def __init__(
self,
dim,
num_attn_heads,
num_output_tokens,
add_type_token=False,
init_weights="xavier_uniform",
init_last_proj_zero=False,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
self.num_output_tokens = num_output_tokens
self.add_type_token = add_type_token
# set ndim
_, ndim = self.input_shape
self.static_ctx["ndim"] = ndim
# pos_embed
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=ndim)
# perceiver
self.mlp = Mlp(in_dim=dim, hidden_dim=dim * 4, init_weights=init_weights)
self.block = PerceiverPoolingBlock(
dim=dim,
num_heads=num_attn_heads,
num_query_tokens=num_output_tokens,
perceiver_kwargs=dict(
init_weights=init_weights,
init_last_proj_zero=init_last_proj_zero,
),
)
if add_type_token:
self.type_token = nn.Parameter(torch.empty(size=(1, 1, dim,)))
else:
self.type_token = None
# output shape
self.output_shape = (num_output_tokens, dim)
def model_specific_initialization(self):
if self.add_type_token:
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
modifiers = [ExcludeFromWdByNameModifier(name="block.query")]
if self.add_type_token:
modifiers += [ExcludeFromWdByNameModifier(name="type_token")]
return modifiers
def forward(self, mesh_pos, batch_idx, mesh_edges=None):
x = self.pos_embed(mesh_pos)
x, mask = to_dense_batch(x, batch_idx)
if torch.all(mask):
mask = None
else:
# add dimensions for num_heads and query (keys are masked)
mask = einops.rearrange(mask, "batchsize num_nodes -> batchsize 1 1 num_nodes")
# perceiver
x = self.mlp(x)
x = self.block(kv=x, attn_mask=mask)
if self.add_type_token:
x = x + self.type_token
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/cfd_gnn_pool_transformer_perceiver.py | src/models/encoders/cfd_gnn_pool_transformer_perceiver.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_gnn_pool import CfdGnnPool
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class CfdGnnPoolTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
gnn_depth,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_supernodes,
num_latent_tokens=None,
init_weights="xavier_uniform",
gnn_init_weights=None,
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.gnn_depth = gnn_depth
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_supernodes = num_supernodes
self.num_latent_tokens = num_latent_tokens
self.init_weights = init_weights
gnn_init_weights = gnn_init_weights or init_weights
self.gnn_init_weights = gnn_init_weights
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.mesh_embed = CfdGnnPool(
input_dim=input_dim,
hidden_dim=gnn_dim,
depth=gnn_depth,
num_output_nodes=num_supernodes,
init_weights=gnn_init_weights,
)
# blocks
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights)
for _ in range(enc_depth)
])
# perceiver pooling
self.perc_proj = LinearProjection(enc_dim, perc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(
DitPerceiverPoolingBlock,
perceiver_kwargs=dict(
cond_dim=self.static_ctx["condition_dim"],
init_weights=init_weights,
),
)
else:
block_ctor = partial(
PerceiverPoolingBlock,
perceiver_kwargs=dict(init_weights=init_weights),
)
self.perceiver = block_ctor(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_latent_tokens,
)
# output shape
self.output_shape = (num_latent_tokens, perc_dim)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="perceiver.query")]
def forward(self, x, mesh_pos, mesh_edges, batch_idx, condition=None, static_tokens=None):
# embed mesh
x, batch_idx_pooled = self.mesh_embed(x, mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
x, mask = to_dense_batch(x, batch_idx_pooled)
assert torch.all(mask)
# project static_tokens to encoder dim
# static_tokens = self.static_token_proj(static_tokens)
# concat static tokens
# x = torch.cat([static_tokens, x], dim=1)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x, **block_kwargs)
# perceiver
x = self.perc_proj(x)
x = self.perceiver(kv=x, **block_kwargs)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_gino_sdf.py | src/models/encoders/rans_gino_sdf.py | import numpy as np
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_mesh_to_grid_sdf import RansGinoMeshToGridSdf
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
class RansGinoSdf(SingleModelBase):
def __init__(self, dim, resolution=None, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.resolution = resolution or self.data_container.get_dataset().grid_resolution
self.mesh_to_grid = RansGinoMeshToGridSdf(dim=dim, resolution=self.resolution)
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), dim)
def forward(self, mesh_pos, sdf, grid_pos, mesh_to_grid_edges):
return self.mesh_to_grid(mesh_pos=mesh_pos, sdf=sdf, grid_pos=grid_pos, mesh_to_grid_edges=mesh_to_grid_edges)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/__init__.py | src/models/encoders/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_pool_transformer.py | src/models/encoders/rans_pool_transformer.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_pool import RansPool
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansPoolTransformer(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
enc_depth,
enc_num_attn_heads,
add_type_token=False,
init_weights="xavier_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.add_type_token = add_type_token
self.init_weights = init_weights
# input_shape is (None, input_dim)
self.mesh_embed = RansPool(
hidden_dim=gnn_dim,
init_weights=init_weights,
ndim=self.static_ctx["ndim"],
)
# blocks
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
self.blocks = nn.ModuleList([
PrenormBlock(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights)
for _ in range(enc_depth)
])
if add_type_token:
self.type_token = nn.Parameter(torch.empty(size=(1, 1, enc_dim,)))
else:
self.type_token = None
# output shape
self.output_shape = (None, enc_dim)
def model_specific_initialization(self):
if self.add_type_token:
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
modifiers = []
if self.add_type_token:
modifiers += [ExcludeFromWdByNameModifier(name="type_token")]
return modifiers
def forward(self, mesh_pos, mesh_edges, batch_idx):
# embed mesh
x = self.mesh_embed(mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
# apply blocks
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x)
# add type token
if self.add_type_token:
x = x + self.type_token
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/cfd_pool_transformer.py | src/models/encoders/cfd_pool_transformer.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_pool import CfdPool
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class CfdPoolTransformer(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
enc_depth,
enc_num_attn_heads,
init_weights="xavier_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.init_weights = init_weights
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.mesh_embed = CfdPool(
input_dim=input_dim,
hidden_dim=gnn_dim,
init_weights=init_weights,
)
# blocks
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights)
for _ in range(enc_depth)
])
# output shape
self.output_shape = (None, enc_dim)
def forward(self, x, mesh_pos, mesh_edges, batch_idx, condition=None, static_tokens=None):
# embed mesh
x = self.mesh_embed(x, mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
# project static_tokens to encoder dim
# static_tokens = self.static_token_proj(static_tokens)
# concat static tokens
# x = torch.cat([static_tokens, x], dim=1)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x, **block_kwargs)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_grid_convnext.py | src/models/encoders/rans_grid_convnext.py | import numpy as np
import einops
import torch
from kappamodules.convolution import ConvNext
from torch import nn
import torch.nn.functional as F
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansGridConvnext(SingleModelBase):
def __init__(
self,
patch_size,
dims,
depths,
kernel_size=7,
depthwise=True,
global_response_norm=True,
drop_path_rate=0.,
drop_path_decay=False,
add_pos_tokens=False,
upsample_size=None,
upsample_mode="nearest",
resolution=None,
concat_pos_to_sdf=None,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.dims = dims
self.depths = depths
self.drop_path_rate = drop_path_rate
self.drop_path_decay = drop_path_decay
self.add_pos_tokens = add_pos_tokens
self.upsample_size = upsample_size
self.upsample_mode = upsample_mode
self.resolution = resolution or self.data_container.get_dataset().grid_resolution
self.ndim = len(self.resolution)
# sdf + grid_pos
concat_pos_to_sdf = concat_pos_to_sdf or self.data_container.get_dataset().concat_pos_to_sdf
if concat_pos_to_sdf:
input_dim = 4
else:
input_dim = 1
self.model = ConvNext(
patch_size=patch_size,
input_dim=input_dim,
dims=dims,
depths=depths,
ndim=self.ndim,
drop_path_rate=drop_path_rate,
drop_path_decay=drop_path_decay,
kernel_size=kernel_size,
depthwise=depthwise,
global_response_norm=global_response_norm,
)
out_resolution = [r // 2 ** (len(depths) - 1) // patch_size for r in self.resolution]
num_output_tokens = int(np.prod(out_resolution))
if add_pos_tokens:
self.pos_tokens = nn.Parameter(torch.empty(size=(1, num_output_tokens, dims[-1])))
else:
self.pos_tokens = None
self.type_token = nn.Parameter(torch.empty(size=(1, 1, dims[-1])))
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = self.ndim
self.output_shape = (num_output_tokens, dims[-1])
def model_specific_initialization(self):
if self.add_pos_tokens:
nn.init.trunc_normal_(self.pos_tokens)
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
modifiers = [ExcludeFromWdByNameModifier(name="type_token")]
if self.add_pos_tokens:
modifiers += [ExcludeFromWdByNameModifier(name="pos_tokens")]
return modifiers
def forward(self, x):
# sdf is passed as dim-last with spatial -> convert to dim-first with spatial
x = einops.rearrange(x, "batch_size height width depth dim -> batch_size dim height width depth")
# upsample
if self.upsample_size is not None:
if self.upsample_mode == "nearest":
x = F.interpolate(x, size=self.upsample_size, mode=self.upsample_mode)
else:
x = F.interpolate(x, size=self.upsample_size, mode=self.upsample_mode, align_corners=True)
# embed
x = self.model(x)
# flatten to tokens
x = einops.rearrange(x, "batch_size dim height width depth -> batch_size (height width depth) dim")
x = x + self.type_token
if self.add_pos_tokens:
x = x + self.pos_tokens.expand(len(x), -1, -1)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_pool_transformer_perceiver.py | src/models/encoders/rans_pool_transformer_perceiver.py | from functools import partial
import torch
from kappamodules.layers import LinearProjection
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock, DitPerceiverPoolingBlock
from kappamodules.vit import DitBlock
from torch import nn
from torch_geometric.utils import to_dense_batch
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_pool import RansPool
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
class RansPoolTransformerPerceiver(SingleModelBase):
def __init__(
self,
gnn_dim,
enc_dim,
perc_dim,
enc_depth,
enc_num_attn_heads,
perc_num_attn_heads,
num_latent_tokens,
use_enc_norm=False,
add_type_token=False,
init_weights="xavier_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.gnn_dim = gnn_dim
self.enc_dim = enc_dim
self.perc_dim = perc_dim
self.enc_depth = enc_depth
self.enc_num_attn_heads = enc_num_attn_heads
self.perc_num_attn_heads = perc_num_attn_heads
self.num_latent_tokens = num_latent_tokens
self.use_enc_norm = use_enc_norm
self.add_type_token = add_type_token
self.init_weights = init_weights
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
self.static_ctx["ndim"] = input_dim
self.mesh_embed = RansPool(
hidden_dim=gnn_dim,
init_weights=init_weights,
ndim=input_dim,
)
# blocks
self.enc_norm = nn.LayerNorm(gnn_dim, eps=1e-6) if use_enc_norm else nn.Identity()
self.enc_proj = LinearProjection(gnn_dim, enc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PrenormBlock
self.blocks = nn.ModuleList([
block_ctor(dim=enc_dim, num_heads=enc_num_attn_heads, init_weights=init_weights)
for _ in range(enc_depth)
])
# perceiver pooling
self.perc_proj = LinearProjection(enc_dim, perc_dim)
if "condition_dim" in self.static_ctx:
block_ctor = partial(
DitPerceiverPoolingBlock,
perceiver_kwargs=dict(
cond_dim=self.static_ctx["condition_dim"],
init_weights=init_weights,
),
)
else:
block_ctor = partial(
PerceiverPoolingBlock,
perceiver_kwargs=dict(init_weights=init_weights),
)
self.perceiver = block_ctor(
dim=perc_dim,
num_heads=perc_num_attn_heads,
num_query_tokens=num_latent_tokens,
)
if add_type_token:
self.type_token = nn.Parameter(torch.empty(size=(1, 1, enc_dim,)))
else:
self.type_token = None
# output shape
self.output_shape = (num_latent_tokens, perc_dim)
def model_specific_initialization(self):
if self.add_type_token:
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
modifiers = [ExcludeFromWdByNameModifier(name="perceiver.query")]
if self.add_type_token:
modifiers += [ExcludeFromWdByNameModifier(name="type_token")]
return modifiers
def forward(self, mesh_pos, mesh_edges, batch_idx):
# embed mesh
x = self.mesh_embed(mesh_pos=mesh_pos, mesh_edges=mesh_edges, batch_idx=batch_idx)
# apply blocks
x = self.enc_norm(x)
x = self.enc_proj(x)
for blk in self.blocks:
x = blk(x)
# perceiver
x = self.perc_proj(x)
x = self.perceiver(kv=x)
# add type token
if self.add_type_token:
x = x + self.type_token
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_grid_unet.py | src/models/encoders/rans_grid_unet.py | import einops
import torch
from torch import nn
from models.base.single_model_base import SingleModelBase
from optimizers.param_group_modifiers.exclude_from_wd_by_name_modifier import ExcludeFromWdByNameModifier
from kappamodules.vit import VitPatchEmbed, VitPosEmbed
from kappamodules.transformer import PerceiverPoolingBlock, PrenormBlock
from kappamodules.unet import UnetGino
from kappamodules.layers import LinearProjection
class RansGridUnet(SingleModelBase):
def __init__(
self,
dim,
num_attn_heads,
num_output_tokens,
depth=4,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
self.num_attn_heads = num_attn_heads
self.num_output_tokens = num_output_tokens
self.resolution = self.data_container.get_dataset().grid_resolution
self.ndim = len(self.resolution)
# sdf + grid_pos
if self.data_container.get_dataset().concat_pos_to_sdf:
input_dim = 4
else:
input_dim = 1
assert dim % depth == 0
self.unet = UnetGino(
input_dim=input_dim,
hidden_dim=dim // depth,
output_dim=dim,
depth=depth,
)
self.perceiver = PerceiverPoolingBlock(
dim=dim,
num_heads=num_attn_heads,
num_query_tokens=num_output_tokens,
perceiver_kwargs=dict(init_weights="truncnormal"),
)
self.type_token = nn.Parameter(torch.empty(size=(1, 1, dim,)))
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = self.ndim
self.output_shape = (num_output_tokens, dim)
def model_specific_initialization(self):
nn.init.trunc_normal_(self.type_token)
def get_model_specific_param_group_modifiers(self):
return [ExcludeFromWdByNameModifier(name="type_token")]
def forward(self, x):
# sdf is passed as dim-last with spatial -> convert to dim-first with spatial
x = einops.rearrange(x, "batch_size height width depth dim -> batch_size dim height width depth")
# embed
x = self.unet(x)
# perceiver
x = einops.rearrange(x, "batch_size dim height width depth -> batch_size (height width depth) dim")
x = self.perceiver(x)
x = x + self.type_token
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/encoders/rans_gino_sdf_og.py | src/models/encoders/rans_gino_sdf_og.py | import numpy as np
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_mesh_to_grid_sdf_og import RansGinoMeshToGridSdfOg
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
class RansGinoSdfOg(SingleModelBase):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.resolution = self.data_container.get_dataset().grid_resolution
self.mesh_to_grid = RansGinoMeshToGridSdfOg(
hidden_dim=hidden_dim,
output_dim=output_dim,
resolution=self.resolution,
)
self.static_ctx["grid_resolution"] = self.resolution
self.static_ctx["ndim"] = len(self.resolution)
self.output_shape = (int(np.prod(self.resolution)), self.mesh_to_grid.output_dim)
def forward(self, mesh_pos, sdf, grid_pos, mesh_to_grid_edges):
return self.mesh_to_grid(mesh_pos=mesh_pos, sdf=sdf, grid_pos=grid_pos, mesh_to_grid_edges=mesh_to_grid_edges)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/conditioners/timestep_velocity_conditioner_pdearena.py | src/models/conditioners/timestep_velocity_conditioner_pdearena.py | from kappamodules.functional.pos_embed import get_sincos_1d_from_seqlen
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from models.base.single_model_base import SingleModelBase
class TimestepVelocityConditionerPdearena(SingleModelBase):
def __init__(self, dim, cond_dim=None, init_weights="xavier_uniform", **kwargs):
super().__init__(**kwargs)
self.num_total_timesteps = self.data_container.get_dataset().getdim_timestep()
self.dim = dim
self.cond_dim = cond_dim or dim * 4
self.init_weights = init_weights
self.static_ctx["condition_dim"] = self.cond_dim
# buffer/modules
self.register_buffer(
"timestep_embed",
get_sincos_1d_from_seqlen(seqlen=self.num_total_timesteps, dim=dim),
)
self.velocity_embed = ContinuousSincosEmbed(dim=dim, ndim=1)
self.timestep_mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, self.cond_dim),
nn.GELU(),
)
self.velocity_mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, self.cond_dim),
nn.GELU(),
)
# init
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, timestep, velocity):
# checks + preprocess
assert timestep.numel() == len(timestep)
assert velocity.numel() == len(velocity)
timestep = timestep.flatten()
velocity = velocity.view(-1, 1).float()
# for rollout timestep is simply initialized as 0 -> repeat to batch dimension
if timestep.numel() == 1:
timestep = timestep.repeat(velocity.numel())
# embed
timestep_embed = self.timestep_mlp(self.timestep_embed[timestep])
velocity_embed = self.velocity_mlp(self.velocity_embed(velocity))
return timestep_embed + velocity_embed
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/conditioners/__init__.py | src/models/conditioners/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/conditioners/timestep_conditioner_pdearena.py | src/models/conditioners/timestep_conditioner_pdearena.py | from kappamodules.functional.pos_embed import get_sincos_1d_from_seqlen
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from models.base.single_model_base import SingleModelBase
class TimestepConditionerPdearena(SingleModelBase):
def __init__(self, dim, cond_dim=None, **kwargs):
super().__init__(**kwargs)
self.num_total_timesteps = self.data_container.get_dataset().getdim_timestep()
self.dim = dim
self.cond_dim = cond_dim or dim * 4
self.static_ctx["condition_dim"] = self.cond_dim
# buffer/modules
self.register_buffer(
"timestep_embed",
get_sincos_1d_from_seqlen(seqlen=self.num_total_timesteps, dim=dim),
)
self.timestep_mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, self.cond_dim),
nn.GELU(),
)
# init
self.reset_parameters()
def reset_parameters(self):
self.apply(init_xavier_uniform_zero_bias)
def forward(self, timestep, velocity):
# checks + preprocess
assert timestep.numel() == len(timestep)
assert velocity.numel() == len(velocity)
timestep = timestep.flatten()
velocity = velocity.view(-1, 1).float()
# for rollout timestep is simply initialized as 0 -> repeat to batch dimension
if timestep.numel() == 1:
timestep = timestep.repeat(velocity.numel())
# embed
timestep_embed = self.timestep_mlp(self.timestep_embed[timestep])
return timestep_embed
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/generic_extractor.py | src/models/extractors/generic_extractor.py | from torch import nn
from .base.extractor_base import ExtractorBase
from .base.forward_hook import ForwardHook
class GenericExtractor(ExtractorBase):
def to_string(self):
return f"GenericExtractor({self.model_path})"
def _get_own_outputs(self):
return {self.model_path: self.outputs[self.model_path]}
def _register_hooks(self, model):
hook = ForwardHook(outputs=self.outputs, output_name=self.model_path, **self.hook_kwargs)
model.register_forward_hook(hook)
self.hooks.append(hook)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/__init__.py | src/models/extractors/__init__.py | from utils.factory import instantiate
def extractor_from_kwargs(kind, **kwargs):
return instantiate(module_names=[f"models.extractors.{kind}"], type_names=[kind], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/base/forward_hook.py | src/models/extractors/base/forward_hook.py | class StopForwardException(Exception):
pass
class ForwardHook:
def __init__(
self,
outputs: dict,
output_name: str,
raise_exception: bool = False,
):
self.outputs = outputs
self.output_name = output_name
self.raise_exception = raise_exception
self.enabled = True
def __call__(self, _, __, output):
if not self.enabled:
return
self.outputs[self.output_name] = output
if self.raise_exception:
raise StopForwardException()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/base/extractor_base.py | src/models/extractors/base/extractor_base.py | from models.extractors.finalizers import finalizer_from_kwargs
from models.extractors.finalizers.concat_finalizer import ConcatFinalizer
from utils.factory import create
from utils.select_with_path import select_with_path
class ExtractorBase:
def __init__(
self,
pooling=None,
raise_exception=False,
finalizer=ConcatFinalizer,
model_path=None,
hook_kwargs=None,
outputs=None,
static_ctx=None,
add_model_path_to_repr=True,
):
self.pooling = pooling
self.raise_exception = raise_exception
# "self.outputs = outputs or {}" does not work here as an empty dictionary evaluates to false
if outputs is None:
self.outputs = {}
else:
self.outputs = outputs
self.hooks = []
self.finalizer = create(finalizer, finalizer_from_kwargs)
self.model_path = model_path
self.static_ctx = static_ctx
self.registered_hooks = False
self.hook_kwargs = hook_kwargs or {}
# model paths cant contain a . if the extractor is registered as part of a module
self.add_model_path_to_repr = add_model_path_to_repr
def __enter__(self):
self.enable_hooks()
def __exit__(self, *_, **__):
self.disable_hooks()
def __repr__(self):
return str(self)
def __str__(self):
if self.add_model_path_to_repr and self.model_path is not None:
model_path = f"{self.model_path}."
else:
model_path = ""
finalize_str = f".{str(self.finalizer)}" if not isinstance(self.finalizer, ConcatFinalizer) else ""
return f"{model_path}{self.to_string()}{finalize_str}"
def to_string(self):
raise NotImplementedError
def register_hooks(self, model):
assert not self.registered_hooks
model = select_with_path(obj=model, path=self.model_path)
assert model is not None, f"model.{self.model_path} is None"
self._register_hooks(model)
self.registered_hooks = True
return self
def _register_hooks(self, model):
raise NotImplementedError
def enable_hooks(self, raise_exception=None):
for hook in self.hooks:
hook.enabled = True
if raise_exception is not None:
hook.raise_exception = raise_exception
def disable_hooks(self):
for hook in self.hooks:
hook.enabled = False
def _get_own_outputs(self):
raise NotImplementedError
def extract(self, finalizer=None, clear_outputs=True):
assert len(self.outputs) > 0, f"no outputs for {self}"
features = [
self.pooling(output, ctx=self.static_ctx)
if self.pooling is not None else
output
for output in self._get_own_outputs().values()
]
if finalizer is not None:
if finalizer == "none":
pass
else:
raise NotImplementedError
elif self.finalizer is not None:
features = self.finalizer(features)
if clear_outputs:
self.outputs.clear()
return features
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/base/__init__.py | src/models/extractors/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/base/without_extractor_hooks.py | src/models/extractors/base/without_extractor_hooks.py | from contextlib import contextmanager
from models.extractors.base.forward_hook import StopForwardException
@contextmanager
def without_extractor_hooks(extractors):
assert isinstance(extractors, (tuple, list))
for pooling in extractors:
pooling.disable_hooks()
try:
yield
except StopForwardException:
for pooling in extractors:
pooling.enable_hooks()
raise
for pooling in extractors:
pooling.enable_hooks()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/finalizers/__init__.py | src/models/extractors/finalizers/__init__.py | from utils.factory import instantiate
def finalizer_from_kwargs(kind, **kwargs):
return instantiate(module_names=[f"models.extractors.finalizers.{kind}"], type_names=[kind], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/extractors/finalizers/concat_finalizer.py | src/models/extractors/finalizers/concat_finalizer.py | import torch
class ConcatFinalizer:
def __init__(self, dim=None):
self.dim = dim
def __call__(self, features):
if self.dim is None:
assert len(features) == 1
return features[0]
return torch.concat(features, dim=self.dim)
def __repr__(self):
return str(self)
def __str__(self):
return type(self).__name__
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/cfd_gino.py | src/models/decoders/cfd_gino.py | import numpy as np
import torch
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_gino_grid_to_mesh import CfdGinoGridToMesh
class CfdGino(SingleModelBase):
def __init__(self, hidden_dim, clamp=None, clamp_mode="log", **kwargs):
super().__init__(**kwargs)
self.clamp = clamp
self.clamp_mode = clamp_mode
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
self.grid_to_mesh = CfdGinoGridToMesh(
input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
ndim=self.static_ctx["ndim"],
)
def forward(self, x, grid_pos, query_pos, grid_to_query_edges):
x = self.grid_to_mesh(
x,
query_pos=query_pos,
grid_to_query_edges=grid_to_query_edges,
)
if self.clamp is not None:
assert self.clamp_mode == "log"
# TODO torch.log1p should be equivalent
x = torch.sign(x) * (self.clamp + torch.log(1 + x.abs()) - np.log(1 + self.clamp))
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/cfd_interpolate.py | src/models/decoders/cfd_interpolate.py | import einops
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from models.base.single_model_base import SingleModelBase
from modules.gno.cfd_interpolate_grid_to_mesh import CfdInterpolateGridToMesh
class CfdInterpolate(SingleModelBase):
def __init__(self, dim=None, clamp=None, clamp_mode="log", **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.clamp = clamp
self.clamp_mode = clamp_mode
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
self.grid_to_mesh = CfdInterpolateGridToMesh()
hidden_dim = dim or input_dim
self.pred = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, output_dim),
)
self.resolution = self.static_ctx["grid_resolution"]
def forward(self, x, grid_pos, query_pos, grid_to_query_edges):
# TODO variable query pos not supported
assert len(query_pos) % len(x) == 0
query_pos = einops.rearrange(
query_pos,
"(batch_size num_query_pos) ndim -> batch_size num_query_pos ndim",
batch_size=len(x),
)
# dim-last without spatial -> dim-first with spatial
x = x.reshape(len(x), *self.resolution, -1)
x = einops.rearrange(x, "batch_size height width dim -> batch_size dim width height")
x = self.grid_to_mesh(x, query_pos=query_pos)
# predict
x = self.pred(x)
if self.clamp is not None:
assert self.clamp_mode == "log"
# TODO torch.log1p should be equivalent
x = torch.sign(x) * (self.clamp + torch.log(1 + x.abs()) - np.log(1 + self.clamp))
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/rans_interpolated.py | src/models/decoders/rans_interpolated.py | import einops
import torch
import torch.nn.functional as F
from torch import nn
from models.base.single_model_base import SingleModelBase
class RansInterpolated(SingleModelBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
# pred (input_dim is the hidden dimension of the latent model so no seperate hidden dim is needed)
self.pred = nn.Sequential(
nn.Linear(input_dim, input_dim),
nn.GELU(),
nn.Linear(input_dim, output_dim),
)
def forward(self, x, query_pos):
assert torch.all(-1 <= query_pos)
assert torch.all(query_pos <= 1)
# dim last without spatial -> dim first with spatial
x = x.reshape(len(x), *self.static_ctx["grid_resolution"], -1)
x = einops.rearrange(x, "batch_size ... dim -> batch_size dim ...")
# grid_sample requires 5d dense tensor
# TODO should be implemented via padding in collator
query_pos = einops.rearrange(
query_pos,
"(batch_size num_query_pos) ndim -> batch_size num_query_pos 1 1 ndim",
num_query_pos=3586,
)
# interpolate to mesh
# x.shape: (batch_size, dim, height, width, depth)
# mesh_pos.shape: (batch_size, num_mesh_pos, 3)
x_hat = F.grid_sample(input=x, grid=query_pos, align_corners=False)
# to sparse tensor
x_hat = einops.rearrange(x_hat, "batch_size dim num_query_pos 1 1 -> (batch_size num_query_pos) dim ")
# predict
x_hat = self.pred(x_hat)
return x_hat
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/lagrangian_perceiver.py | src/models/decoders/lagrangian_perceiver.py | from functools import partial
import einops
import torch
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, DitPerceiverBlock
from torch import nn
from torch_geometric.utils import unbatch
from models.base.single_model_base import SingleModelBase
class LagrangianPerceiver(SingleModelBase):
def __init__(self, dim, num_attn_heads, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
# input/output shape
num_channels, _ = self.output_shape
_, input_dim = self.input_shape
ndim = self.data_container.get_dataset().metadata["dim"]
# input projection
self.proj = LinearProjection(input_dim, dim)
# query tokens (create them from a positional embedding)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=ndim)
self.query_mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim),
)
# perceiver
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitPerceiverBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PerceiverBlock
self.perceiver = block_ctor(dim=dim, num_heads=num_attn_heads)
self.pred = LinearProjection(dim, num_channels)
def model_specific_initialization(self):
self.query_mlp.apply(init_xavier_uniform_zero_bias)
def forward(self, x, query_pos, unbatch_idx, unbatch_select, static_tokens=None, condition=None):
assert x.ndim == 3
# input projection
x = self.proj(x)
# create query
pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(pos_embed)
# perceiver
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.perceiver(q=query, kv=x, **block_kwargs)
x = self.pred(x)
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/rans_gino.py | src/models/decoders/rans_gino.py | from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_grid_to_mesh import RansGinoGridToMesh
class RansGino(SingleModelBase):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
self.grid_to_mesh = RansGinoGridToMesh(
input_dim=input_dim,
hidden_dim=dim,
output_dim=output_dim,
ndim=self.static_ctx["ndim"],
)
def forward(self, x, query_pos, grid_to_query_edges):
return self.grid_to_mesh(
x,
query_pos=query_pos,
grid_to_query_edges=grid_to_query_edges,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/lagrangian_transformer_perceiver.py | src/models/decoders/lagrangian_transformer_perceiver.py | from functools import partial
import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, DitPerceiverBlock
from kappamodules.vit import DitBlock, VitBlock
from torch import nn
from torch_geometric.utils import unbatch
from models.base.single_model_base import SingleModelBase
class LagrangianTransformerPerceiver(SingleModelBase):
def __init__(
self,
dim,
depth,
num_attn_heads,
init_weights="xavier_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.depth = depth
self.num_attn_heads = num_attn_heads
self.init_weights = init_weights
# input/output shape
num_channels, _ = self.output_shape
seqlen, input_dim = self.input_shape
ndim = self.data_container.get_dataset().metadata["dim"]
# input projection
self.input_proj = LinearProjection(input_dim, dim)
# blocks
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = VitBlock
self.blocks = nn.ModuleList([
block_ctor(dim=dim, num_heads=num_attn_heads, init_weights=init_weights)
for _ in range(self.depth)
])
# query tokens (create them from a positional embedding)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=ndim)
self.query_mlp = nn.Sequential(
LinearProjection(dim, dim * 4),
nn.GELU(),
LinearProjection(dim * 4, dim * 4),
nn.GELU(),
LinearProjection(dim * 4, dim),
)
# latent to pixels
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitPerceiverBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PerceiverBlock
self.perceiver = block_ctor(dim=dim, num_heads=num_attn_heads, init_weights=init_weights)
self.pred = LinearProjection(dim, num_channels)
def forward(self, x, query_pos, unbatch_idx, unbatch_select, static_tokens=None, condition=None):
assert x.ndim == 3
# input projection
x = self.input_proj(x)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
for blk in self.blocks:
x = blk(x, **block_kwargs)
# create query
pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(pos_embed)
# latent to pixels
x = self.perceiver(q=query, kv=x, **block_kwargs)
x = self.pred(x)
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/cfd_transformer_perceiver.py | src/models/decoders/cfd_transformer_perceiver.py | from functools import partial
import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, DitPerceiverBlock, DitBlock
from kappamodules.vit import VitBlock
from torch import nn
from torch_geometric.utils import unbatch
from models.base.single_model_base import SingleModelBase
class CfdTransformerPerceiver(SingleModelBase):
def __init__(
self,
dim,
depth,
num_attn_heads,
use_last_norm=False,
perc_dim=None,
perc_num_attn_heads=None,
drop_path_rate=0.0,
clamp=None,
clamp_mode="log",
init_weights="xavier_uniform",
**kwargs,
):
super().__init__(**kwargs)
perc_dim = perc_dim or dim
perc_num_attn_heads = perc_num_attn_heads or num_attn_heads
self.dim = dim
self.depth = depth
self.num_attn_heads = num_attn_heads
self.perc_dim = perc_dim
self.perc_num_attn_heads = perc_num_attn_heads
self.use_last_norm = use_last_norm
self.drop_path_rate = drop_path_rate
self.clamp = clamp
self.clamp_mode = clamp_mode
self.init_weights = init_weights
# input/output shape
_, num_channels = self.output_shape
seqlen, input_dim = self.input_shape
# input projection
self.input_proj = LinearProjection(input_dim, dim, init_weights=init_weights)
# blocks
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = VitBlock
self.blocks = nn.ModuleList([
block_ctor(dim=dim, num_heads=num_attn_heads, init_weights=init_weights, drop_path=drop_path_rate)
for _ in range(self.depth)
])
# query tokens (create them from a positional embedding)
self.pos_embed = ContinuousSincosEmbed(dim=perc_dim, ndim=2)
self.query_mlp = nn.Sequential(
LinearProjection(perc_dim, perc_dim * 4, init_weights=init_weights),
nn.GELU(),
LinearProjection(perc_dim * 4, perc_dim * 4, init_weights=init_weights),
nn.GELU(),
LinearProjection(perc_dim * 4, perc_dim, init_weights=init_weights),
)
# latent to pixels
self.perc_proj = LinearProjection(dim, perc_dim, init_weights=init_weights)
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitPerceiverBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PerceiverBlock
self.perceiver = block_ctor(dim=perc_dim, num_heads=perc_num_attn_heads, init_weights=init_weights)
self.norm = nn.LayerNorm(perc_dim, eps=1e-6) if use_last_norm else nn.Identity()
self.pred = LinearProjection(perc_dim, num_channels, init_weights=init_weights)
def forward(self, x, query_pos, unbatch_idx, unbatch_select, static_tokens=None, condition=None):
assert x.ndim == 3
# input projection
x = self.input_proj(x)
# apply blocks
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
for blk in self.blocks:
x = blk(x, **block_kwargs)
# create query
pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(pos_embed)
# latent to pixels
x = self.perc_proj(x)
x = self.perceiver(q=query, kv=x, **block_kwargs)
x = self.norm(x)
x = self.pred(x)
if self.clamp is not None:
assert self.clamp_mode == "log"
# TODO torch.log1p should be equivalent
x = torch.sign(x) * (self.clamp + torch.log(1 + x.abs()) - np.log(1 + self.clamp))
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/rans_gino_latent.py | src/models/decoders/rans_gino_latent.py | import einops
from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_latent_to_mesh import RansGinoLatentToMesh
class RansGinoLatent(SingleModelBase):
def __init__(self, hidden_dim, bottleneck_dim=None, pred_hidden_dim=None, **kwargs):
super().__init__(**kwargs)
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
self.grid_to_mesh = RansGinoLatentToMesh(
input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
ndim=self.static_ctx["ndim"],
bottleneck_dim=bottleneck_dim,
pred_hidden_dim=pred_hidden_dim,
)
def forward(self, x, query_pos, unbatch_idx, unbatch_select):
return self.grid_to_mesh(x, query_pos=query_pos)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/rans_perceiver.py | src/models/decoders/rans_perceiver.py | import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, Mlp
from torch_geometric.utils import unbatch
from torch import nn
from models.base.single_model_base import SingleModelBase
class RansPerceiver(SingleModelBase):
def __init__(
self,
dim,
num_attn_heads,
init_weights="xavier_uniform",
init_last_proj_zero=False,
use_last_norm=False,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
self.use_last_norm = use_last_norm
# input projection
_, input_dim = self.input_shape
self.proj = LinearProjection(input_dim, dim, init_weights=init_weights)
# query tokens (create them from a positional embedding)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=self.static_ctx["ndim"])
self.query_mlp = Mlp(in_dim=dim, hidden_dim=dim, init_weights=init_weights)
# latent to pixels
self.perceiver = PerceiverBlock(
dim=dim,
num_heads=num_attn_heads,
init_last_proj_zero=init_last_proj_zero,
init_weights=init_weights,
)
_, output_dim = self.output_shape
self.norm = nn.LayerNorm(dim, eps=1e-6) if use_last_norm else nn.Identity()
self.pred = LinearProjection(dim, output_dim, init_weights=init_weights)
def forward(self, x, query_pos, unbatch_idx, unbatch_select):
# input projection
x = self.proj(x)
# create query
query_pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(query_pos_embed)
# decode
x = self.perceiver(q=query, kv=x)
x = self.norm(x)
x = self.pred(x)
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/__init__.py | src/models/decoders/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/rans_gino_og.py | src/models/decoders/rans_gino_og.py | from models.base.single_model_base import SingleModelBase
from modules.gno.rans_gino_grid_to_mesh_og import RansGinoGridToMeshOg
class RansGinoOg(SingleModelBase):
def __init__(self, hidden_dim, bottleneck_dim, **kwargs):
super().__init__(**kwargs)
# input_shape is (None, input_dim)
_, input_dim = self.input_shape
# ouptut_shape is (None, output_dim)
_, output_dim = self.output_shape
self.grid_to_mesh = RansGinoGridToMeshOg(
input_dim=input_dim,
hidden_dim=hidden_dim,
bottleneck_dim=bottleneck_dim,
output_dim=output_dim,
)
def forward(self, x, query_pos, grid_to_query_edges):
return self.grid_to_mesh(
x,
query_pos=query_pos,
grid_to_query_edges=grid_to_query_edges,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/cfd_perceiver.py | src/models/decoders/cfd_perceiver.py | from functools import partial
import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, DitPerceiverBlock
from torch import nn
from torch_geometric.utils import unbatch
from models.base.single_model_base import SingleModelBase
class CfdPerceiver(SingleModelBase):
def __init__(self, dim, num_attn_heads, init_weights="xavier_uniform", **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
self.init_weights = init_weights
# input/output shape
seqlen, input_dim = self.input_shape
# input projection
self.proj = LinearProjection(input_dim, dim)
# query tokens (create them from a positional embedding)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=2)
self.query_mlp = nn.Sequential(
LinearProjection(dim, dim * 4),
nn.GELU(),
LinearProjection(dim * 4, dim * 4),
nn.GELU(),
LinearProjection(dim * 4, dim),
)
# perceiver
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitPerceiverBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PerceiverBlock
self.perceiver = block_ctor(dim=dim, num_heads=num_attn_heads, init_weights=init_weights)
_, num_channels = self.output_shape
self.pred = LinearProjection(dim, num_channels)
def forward(self, x, query_pos, unbatch_idx, unbatch_select, static_tokens=None, condition=None):
assert x.ndim == 3
# input projection
x = self.proj(x)
# create query
pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(pos_embed)
# perceiver
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.perceiver(q=query, kv=x, **block_kwargs)
x = self.pred(x)
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/models/decoders/lagrangian_perceiver_gaussian_sincos_pos.py | src/models/decoders/lagrangian_perceiver_gaussian_sincos_pos.py | from functools import partial
import einops
import torch
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection
from kappamodules.transformer import PerceiverBlock, DitPerceiverBlock
from torch import nn
from torch_geometric.utils import unbatch
from models.base.single_model_base import SingleModelBase
class LagrangianPerceiverGaussianSincosPos(SingleModelBase):
def __init__(self, dim, num_attn_heads, positional_std, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.num_attn_heads = num_attn_heads
# input/output shape
num_channels, _ = self.output_shape
_, input_dim = self.input_shape
ndim = self.data_container.get_dataset().metadata["dim"]
# input projection
self.proj = LinearProjection(input_dim, dim)
# query tokens (create them from a positional embedding)
# Try with other method from https://arxiv.org/pdf/2006.10739.pdf
generator = torch.Generator().manual_seed(42)
self.register_buffer(
"b",
torch.normal(mean=torch.zeros(dim // 2, ndim), std=positional_std, generator=generator)
)
self.query_mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim),
)
# perceiver
if "condition_dim" in self.static_ctx:
block_ctor = partial(DitPerceiverBlock, cond_dim=self.static_ctx["condition_dim"])
else:
block_ctor = PerceiverBlock
self.perceiver = block_ctor(dim=dim, num_heads=num_attn_heads)
self.pred = LinearProjection(dim, num_channels)
def model_specific_initialization(self):
self.query_mlp.apply(init_xavier_uniform_zero_bias)
def forward(self, x, query_pos, unbatch_idx, unbatch_select, static_tokens=None, condition=None):
assert x.ndim == 3
# input projection
x = self.proj(x)
# create query
pos_embed = self.pos_embed(query_pos)
query = self.query_mlp(pos_embed)
# perceiver
block_kwargs = {}
if condition is not None:
block_kwargs["cond"] = condition
x = self.perceiver(q=query, kv=x, **block_kwargs)
x = self.pred(x)
# dense tensor (batch_size, max_num_points, dim) -> sparse tensor (batch_size * num_points, dim)
x = einops.rearrange(x, "batch_size max_num_points dim -> (batch_size max_num_points) dim")
unbatched = unbatch(x, batch=unbatch_idx)
x = torch.concat([unbatched[i] for i in unbatch_select])
return x
def pos_embed(self, pos):
return torch.concat([torch.cos(2.0 * torch.pi * pos @ self.b.T),
torch.sin(2.0 * torch.pi * pos @ self.b.T)], dim=-1)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/run.py | src/distributed/run.py | import logging
import os
import platform
import psutil
import torch
import yaml
from torch.distributed import init_process_group, destroy_process_group, barrier
from torch.multiprocessing import spawn
from .config import (
is_managed,
get_world_size_from_env,
get_rank_from_env,
get_local_rank,
is_custom_managed_run,
is_mpi_managed_run,
get_nodes,
)
def run_managed(accelerator, devices, main_single):
assert is_managed()
if accelerator == "gpu":
# custom managed run doesn't set CUDA_VISIBLE_DEVICES
if is_custom_managed_run() or is_mpi_managed_run() or len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) > 1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(get_local_rank())
_check_single_device_visible()
if devices is None:
world_size = get_world_size_from_env()
if world_size == 1:
_run_managed_singleprocess(accelerator, main_single)
else:
# use all GPUs for training
_run_managed_multiprocess(accelerator, main_single)
else:
# use single GPU (e.g. run_folder from every GPU)
world_size, device_ids = _parse_devices(accelerator, devices)
assert world_size == 1 and len(device_ids) == 1
_log_device_info(accelerator, device_ids)
_run_managed_singleprocess(accelerator, main_single)
def _run_managed_singleprocess(accelerator, main_single):
# single process
logging.info(f"running single process slurm training")
device = _accelerator_to_device(accelerator)
main_single(device=device)
def _run_managed_multiprocess(accelerator, main_single):
# setup MASTER_ADDR & MASTER_PORT
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
# get config from env variables
world_size = get_world_size_from_env()
rank = get_rank_from_env()
# init process group
logging.info(
f"initializing rank={rank} local_rank={get_local_rank()} "
f"nodes={get_nodes()} hostname={platform.uname().node} "
f"master_addr={os.environ['MASTER_ADDR']} master_port={os.environ['MASTER_PORT']} "
f"(waiting for all {world_size} processes to connect)"
)
init_process_group(backend=get_backend(accelerator), init_method="env://", world_size=world_size, rank=rank)
barrier()
# start main_single
device = _accelerator_to_device(accelerator)
main_single(device=device)
destroy_process_group()
def run_single_or_multiprocess(accelerator, devices, main_single, master_port, mig_devices):
logging.info("------------------")
# single node run
assert devices is not None
world_size, device_ids = _parse_devices(accelerator, devices, mig_devices)
if world_size == 1:
# single process
logging.info(f"running single process training")
if accelerator == "gpu":
os.environ["CUDA_VISIBLE_DEVICES"] = device_ids[0]
_check_single_device_visible()
_log_device_info(accelerator, device_ids)
device = _accelerator_to_device(accelerator)
main_single(device=device)
else:
# spawn multi process training
logging.info(
f"running multi process training on {world_size} processes "
f"(devices={devices} host={platform.uname().node})"
)
master_port = _get_free_port(master_port)
logging.info(f"master port: {master_port}")
# dont log device info as this would load torch on device0 and block the VRAM required for this
# log_device_info(accelerator, device_ids)
args = (accelerator, device_ids, master_port, world_size, main_single)
spawn(_run_multiprocess, nprocs=world_size, args=args)
def _run_multiprocess(rank, accelerator, device_ids, master_port, world_size, main_single):
# currently only single node is supported
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(master_port)
if accelerator == "gpu":
os.environ["CUDA_VISIBLE_DEVICES"] = device_ids[rank]
_check_single_device_visible()
from torch.distributed import init_process_group, destroy_process_group
init_process_group(
backend=get_backend(accelerator, device_ids),
init_method="env://",
world_size=world_size,
rank=rank,
)
device = _accelerator_to_device(accelerator)
main_single(device=device)
destroy_process_group()
def get_backend(accelerator, device_ids=None):
if accelerator == "cpu":
# gloo is recommended for cpu multiprocessing
# https://pytorch.org/docs/stable/distributed.html#which-backend-to-use
return "gloo"
if os.name == "nt":
# windows doesn't support nccl (I think)
return "gloo"
# MIG doesn't support NCCL
if device_ids is not None:
for device_id in device_ids:
try:
int(device_id)
except ValueError:
return "gloo"
# nccl is recommended for gpu multiprocessing
# https://pytorch.org/docs/stable/distributed.html#which-backend-to-use
return "nccl"
def _get_free_port(start_port):
taken_ports = set()
for connection in psutil.net_connections():
if connection.laddr.ip == "127.0.0.1":
taken_ports.add(connection.laddr.port)
if len(connection.raddr) > 0 and connection.raddr.ip == "127.0.0.1":
taken_ports.add(connection.raddr.port)
for port in range(start_port, 65535):
if port not in taken_ports:
return port
raise ValueError(f"all ports starting from {start_port} are taken")
def _parse_devices(accelerator, devices, mig_devices=None):
try:
# single process
device_ids = [int(devices)]
except ValueError:
# multi process
device_ids = yaml.safe_load(f"[{devices}]")
msg = f"invalid devices specification '{devices}' (specify multiple devices like this '0,1,2,3')"
assert all(isinstance(d, int) for d in device_ids), msg
# os.environ["CUDA_VISIBLE_DEVICES"] requires string
device_ids = [str(device_id) for device_id in device_ids]
if accelerator == "gpu" and mig_devices is not None:
# map to MIG device ids
hostname = platform.uname().node
if hostname in mig_devices:
for i in range(len(device_ids)):
device_id = int(device_ids[i])
if device_id in mig_devices[hostname]:
mig_device_id = mig_devices[hostname][device_id]
device_ids[i] = mig_device_id
logging.info(f"device_id is MIG device with id {mig_device_id}")
return len(device_ids), device_ids
def _check_single_device_visible():
assert "CUDA_VISIBLE_DEVICES" in os.environ
visible_device_count = torch.cuda.device_count()
assert visible_device_count <= 1, os.environ
def _log_device_info(accelerator, device_ids):
if accelerator == "cpu":
for i in range(len(device_ids)):
logging.info(f"device {i}: cpu")
elif accelerator == "gpu":
# retrieve device names via nvidia-smi because CUDA_VISIBLE_DEVICES needs to be set before calling anything
# in torch.cuda -> only 1 visible device
all_devices = os.popen("nvidia-smi --query-gpu=gpu_name --format=csv,noheader").read().strip().split("\n")
for i, device_id in enumerate(device_ids):
try:
device_id = int(device_id)
logging.info(f"device {i}: {all_devices[device_id]} (id={device_id})")
except ValueError:
# MIG device
logging.info(f"using MIG device")
else:
raise NotImplementedError
def _accelerator_to_device(accelerator):
if accelerator == "cpu":
return "cpu"
elif accelerator == "gpu":
return "cuda"
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/gather.py | src/distributed/gather.py | import einops
import torch
import torch.distributed as dist
from .config import is_distributed, get_world_size
from .functional.all_gather_grad_autograd import AllGatherGradAutograd
from .functional.all_gather_grad_overwrite import AllGatherGradOverwrite
def get_device_and_bfloat16supported():
# gloo cpu -> okay
# gloo cuda -> okay (although https://pytorch.org/docs/stable/distributed.html says it isn't supported)
# nccl cpu -> fail (but gloo anyway recommended for cpu multiprocessing)
# nccl cuda -> okay
# bfloat16 cpu -> fail
if not is_distributed():
return torch.device("cpu"), True
if dist.get_backend() == "nccl":
return torch.device("cuda"), True
if dist.get_backend() == "gloo":
return torch.device("cpu"), False
raise NotImplementedError
def get_bool_gather_supported():
if not is_distributed():
return True
if dist.get_backend() == "nccl":
return True
if dist.get_backend() == "gloo":
return False
raise NotImplementedError
def _prepare_tensor(x):
"""
prepare for distributed communication
- wrap primitive types into tensors
- push tensor onto supported device
- convert bool to float if bool gathering is not supported
- call .contiguous if x is not in a contiguous memory block
"""
device, bfloat16_supported = get_device_and_bfloat16supported()
# I think this doesn't work in some configuration not sure in which though
# note in which configuration and convert back to bool after gather
if isinstance(x, bool):
# x = torch.tensor(x, dtype=torch.float32, device=device)
# og_device = torch.device("cpu")
raise RuntimeError
if isinstance(x, (float, int, list, tuple)):
x = torch.tensor(x, device=device)
og_device = torch.device("cpu")
else:
og_device = x.device
if x.dtype == torch.bfloat16 and not bfloat16_supported:
x = x.type(torch.float32)
# bool gather is not supported in some settings
if x.dtype == torch.bool and not get_bool_gather_supported():
x = x.type(torch.float32)
to_bool = True
else:
to_bool = False
if not x.is_contiguous():
x = x.contiguous()
return x.to(device), og_device, to_bool
def _all_gather_grad(x, all_gather_fn, batch_dim=0):
x, og_device, to_bool = _prepare_tensor(x)
if is_distributed():
result = all_gather_fn(x)
if result[0].ndim == 0:
# scalars can't be concatenated
result = [r.unsqueeze(0) for r in result]
result = torch.concat(result, dim=batch_dim).to(og_device)
else:
result = _all_gather_nondistributed(x, og_device)
if to_bool:
result = result.bool()
return result
def all_gather_grad(x, batch_dim=0):
return _all_gather_grad(x, AllGatherGradAutograd.apply, batch_dim=batch_dim)
# return _all_gather_grad(x, AllGatherGradOverwrite.apply)
def all_gather_grad_autograd(x):
return _all_gather_grad(x, AllGatherGradAutograd.apply)
def all_gather_grad_overwrite(x):
return _all_gather_grad(x, AllGatherGradOverwrite.apply)
@torch.no_grad()
def all_gather_nograd(x):
x, og_device, to_bool = _prepare_tensor(x)
if is_distributed():
result = [torch.zeros_like(x) for _ in range(get_world_size())]
dist.all_gather(result, x)
if result[0].ndim == 0:
# scalars can't be concatenated
result = torch.tensor(result, device=og_device)
else:
result = torch.concat(result).to(og_device)
else:
result = _all_gather_nondistributed(x, og_device).detach()
if to_bool:
result = result.bool()
return result
def _all_gather_nondistributed(x, og_device):
if x.ndim == 0:
# distributed gather adds a dimension to scalars
x = x.unsqueeze(0)
return x.to(og_device)
def all_gather_nograd_clipped(x, max_length):
result = all_gather_nograd(x)
if is_distributed():
# gathering changes the order of the samples -> correct them
# most of the time this is not noeeded (e.g. for metrics) as the order is not important
# for things like predictions it does matter
# 1 GPU: [0, 1, 2, 3, 4, 5, 6, 7]
# 2 GPU: [0, 2, 4, 6] + [1, 3, 5, 7]
# 4 GPU: [0, 4] + [1, 5] + [2, 6] + [3, 7]
result = einops.rearrange(
result,
"(num_gpus len_per_gpu) ... -> (len_per_gpu num_gpus) ...",
num_gpus=get_world_size(),
)
# DistributedSampler pads the dataset to give every GPU the same amount of samples
return result[:max_length]
return result
def all_reduce_sum_nograd(x):
with torch.no_grad():
return all_reduce_sum_grad(x)
def all_reduce_sum_grad(x):
x, og_device, to_bool = _prepare_tensor(x)
if is_distributed():
# all_reduce is differentiable https://github.com/pytorch/pytorch/issues/58005
dist.all_reduce(x, op=dist.ReduceOp.SUM)
x = x.to(og_device)
if to_bool:
x = x.bool()
return x
def all_reduce_mean_grad(x):
x, og_device, to_bool = _prepare_tensor(x)
if is_distributed():
x = all_reduce_sum_grad(x) / get_world_size()
x = x.to(og_device)
if to_bool:
x = x.bool()
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/config.py | src/distributed/config.py | import logging
import os
import torch.distributed as dist
def is_slurm_run():
if os.environ.get("SLURM_JOB_NAME", None) == "interactive":
return False
return "SLURM_PROCID" in os.environ and "SLURM_NTASKS_PER_NODE" in os.environ
def is_mpi_managed_run():
return (
"OMPI_COMM_WORLD_SIZE" in os.environ and
"OMPI_COMM_WORLD_RANK" in os.environ and
"OMPI_COMM_WORLD_LOCAL_RANK" in os.environ and
"OMPI_MCA_orte_num_nodes" in os.environ
)
def is_custom_managed_run():
return (
"CUSTOM_NUM_NODES" in os.environ and
"CUSTOM_WORLD_SIZE" in os.environ and
"CUSTOM_RANK" in os.environ and
"CUSTOM_LOCAL_RANK" in os.environ
)
def is_distributed():
return dist.is_available() and dist.is_initialized()
def get_rank():
if is_distributed():
return dist.get_rank()
return 0
def get_world_size():
if is_distributed():
return dist.get_world_size()
return 1
def get_local_rank():
if get_nodes() == 1 and os.environ.get("SLURM_TASKS_PER_NODE") == "1":
return get_rank()
if "SLURM_LOCALID" in os.environ:
return int(os.environ["SLURM_LOCALID"])
if "CUSTOM_LOCAL_RANK" in os.environ:
return int(os.environ["CUSTOM_LOCAL_RANK"])
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
return int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
return get_rank()
def is_data_rank0():
# data has to be copied in 2 cases
# - is_local_rank0: single-gpu, multi-gpu, multi-gpu SLURM
# - process with is_local_rank0 copies the data
# - other processes have to wait for the copying to finish via barrier
# - get_world_size == 1: SLURM runs that are not using multi-gpu require every process to copy data
# - no guarantee that the processes use the same dataset
# - avoid race conditions
return is_local_rank0() or get_world_size() == 1
def is_managed():
return is_slurm_run() or is_custom_managed_run() or is_mpi_managed_run()
def get_nodes():
if "SLURM_JOB_NUM_NODES" in os.environ:
return int(os.environ["SLURM_JOB_NUM_NODES"])
if "CUSTOM_NUM_NODES" in os.environ:
return int(os.environ["CUSTOM_NUM_NODES"])
if "OMPI_MCA_orte_num_nodes" in os.environ:
return int(os.environ["OMPI_MCA_orte_num_nodes"])
return 1
def get_world_size_from_env():
if "SLURM_NTASKS_PER_NODE" in os.environ:
return get_nodes() * int(os.environ["SLURM_NTASKS_PER_NODE"])
if "CUSTOM_WORLD_SIZE" in os.environ:
return int(os.environ["CUSTOM_WORLD_SIZE"])
if "OMPI_COMM_WORLD_SIZE" in os.environ:
return int(os.environ["OMPI_COMM_WORLD_SIZE"])
raise NotImplementedError
def get_rank_from_env():
if "SLURM_PROCID" in os.environ:
return int(os.environ["SLURM_PROCID"])
if "CUSTOM_RANK" in os.environ:
return int(os.environ["CUSTOM_RANK"])
if "OMPI_COMM_WORLD_RANK" in os.environ:
return int(os.environ["OMPI_COMM_WORLD_RANK"])
raise NotImplementedError
def is_rank0():
return get_rank() == 0
def is_local_rank0():
return get_local_rank() == 0
def barrier():
if is_distributed():
dist.barrier()
def is_own_work(idx):
return idx % get_world_size() == get_rank()
def get_backend():
if is_distributed():
return dist.get_backend()
return None
def log_distributed_config():
logging.info("------------------")
logging.info("DIST CONFIG")
logging.info(f"rank: {get_rank()}")
logging.info(f"local_rank: {get_local_rank()}")
logging.info(f"world_size: {get_world_size()}")
logging.info(f"nodes: {get_nodes()}")
logging.info(f"backend: {get_backend()}")
if "SLURM_JOB_ID" in os.environ:
logging.info(f"slurm job id: {os.environ['SLURM_JOB_ID']}")
if "PBS_JOBID" in os.environ:
logging.info(f"pbs job id: {os.environ['PBS_JOBID']}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/__init__.py | src/distributed/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/functional/all_gather_grad_overwrite.py | src/distributed/functional/all_gather_grad_overwrite.py | import torch
import torch.distributed as dist
# https://discuss.pytorch.org/t/dist-all-gather-and-gradient-preservation-in-multi-gpu-training/120696/2
class AllGatherGradOverwrite:
@staticmethod
def apply(x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
output[dist.get_rank()] = x
return output
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/functional/all_gather_grad_autograd.py | src/distributed/functional/all_gather_grad_autograd.py | import torch
import torch.distributed as dist
# noinspection PyAbstractClass
class AllGatherGradAutograd(torch.autograd.Function):
"""
Gathers tensors from all process and supports backward propagation
for the gradients across processes.
"""
# noinspection PyMethodOverriding
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
# without the tuple call here, the gradient is not propagated for some reason
# (therefore the backward is then not called)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients, op=dist.ReduceOp.SUM)
grad_out = all_gradients[dist.get_rank()]
return grad_out
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/distributed/functional/__init__.py | src/distributed/functional/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/object_from_kwargs.py | src/utils/object_from_kwargs.py | def objects_from_kwargs(kwargs):
if kwargs is None:
return {}
result = {}
for k, v in kwargs.items():
if isinstance(v, dict):
# if no factory type is supplied -> derive from key
# e.g. mask generators are usually unique and defined via the "mask_generator" key
# which can be easily converted to the factory
factory_type = v.pop("factory_type", key_to_factory_type(k))
if factory_type is not None and "kind" in v:
result[k] = object_from_kwargs(factory_type=factory_type, **v)
else:
result[k] = objects_from_kwargs(v)
else:
result[k] = v
return result
def key_to_factory_type(key):
if "mask_generator" in key:
return "mask_generator"
return None
def object_from_kwargs(factory_type, **kwargs):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/collator_from_kwargs.py | src/utils/collator_from_kwargs.py | from utils.factory import instantiate
def collator_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[
f"datasets.collators.{kind}",
f"kappadata.collators.{kind}",
],
type_names=[kind],
**kwargs,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/num_worker_heuristic.py | src/utils/num_worker_heuristic.py | import logging
import os
from distributed.config import is_slurm_run
def get_fair_cpu_count():
total_cpu_count = get_total_cpu_count()
if total_cpu_count == 0:
return 0
device_count = _get_device_count()
# divide cpus among devices
if is_slurm_run():
# slurm already divides cpus among tasks -> assert that
tasks_per_node = int(os.environ["SLURM_NTASKS_PER_NODE"])
if "SLURM_CPUS_PER_TASK" in os.environ:
cpus_per_task = int(os.environ["SLURM_CPUS_PER_TASK"])
elif "SLURM_CPUS_ON_NODE" in os.environ:
cpus_on_node = int(os.environ["SLURM_CPUS_ON_NODE"])
cpus_per_task = cpus_on_node // tasks_per_node
else:
raise NotImplementedError
# currently only 1 GPU per task is supported
assert device_count == tasks_per_node
if total_cpu_count != cpus_per_task:
logging.warning(f"total_cpu_count != cpus_per_task ({total_cpu_count} != {cpus_per_task})")
# 16worker MAE-B 512bs/A100 -> 0.05 data time
# 24worker MAE-B 512bs/A100 -> 0.00 data time
return cpus_per_task - 1
return int(total_cpu_count / device_count)
def _get_device_count():
# get number of devices per node (srun nvidia-smi shows all devices not only the ones assigned for the srun task)
# (if no GPU is available this returns "")
# normal example output:
# GPU 0: NVIDIA A100-PCIE-40GB (UUID: GPU-...)
# GPU 1: NVIDIA A100-PCIE-40GB (UUID: GPU-...)
# MIG example output:
# GPU 0: NVIDIA A100-PCIE-40GB (UUID: GPU-...)
# MIG 3g.20gb Device 0: (UUID: MIG-...)
# MIG 3g.20gb Device 1: (UUID: MIG-...)
# GPU 1: NVIDIA A100-PCIE-40GB (UUID: GPU-...)
# MIG 3g.20gb Device 0: (UUID: MIG-...)
# MIG 3g.20gb Device 1: (UUID: MIG-...)
nvidia_smi_lines = os.popen("nvidia-smi -L").read().strip().split("\n")
# create dict from GPU to MIG devices:
# {
# GPU0: 1 # normal GPU
# GPU1: 2 # split into 2 MIG devices
# }
devices_per_gpu = {}
devices_counter = 0
for i, line in enumerate(nvidia_smi_lines):
if "MIG" in line:
devices_counter += 1
if "GPU" in line and i == 0 and len(nvidia_smi_lines) > 1 and "MIG" in nvidia_smi_lines[i + 1]:
continue
if "GPU" in line or i == len(nvidia_smi_lines) - 1:
if devices_counter == 0:
devices_counter = 1 # normal GPU -> single device
devices_per_gpu[len(devices_per_gpu)] = devices_counter
devices_counter = 0
# count devices
devices_on_node = sum(devices_per_gpu.values())
if devices_on_node == 0:
devices_on_node = 1
return devices_on_node
def get_total_cpu_count():
if os.name == "nt":
cpu_count = os.cpu_count()
assert cpu_count is not None
if cpu_count <= 16:
# don't bother on dev machines
return 0
else:
cpu_count = len(os.sched_getaffinity(0))
return cpu_count
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/env_utils.py | src/utils/env_utils.py | import os
def env_flag_is_true(key):
if key not in os.environ:
return False
return os.environ[key] in ["true", "True", "T"]
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/amp_utils.py | src/utils/amp_utils.py | import logging
import torch
from torch.cuda.amp import GradScaler
FLOAT32_ALIASES = ["float32", 32]
FLOAT16_ALIASES = ["float16", 16]
BFLOAT16_ALIASES = ["bfloat16", "bf16"]
VALID_PRECISIONS = FLOAT32_ALIASES + FLOAT16_ALIASES + BFLOAT16_ALIASES
def get_supported_precision(desired_precision, device, backup_precision=None):
assert desired_precision in VALID_PRECISIONS
if backup_precision is not None:
assert backup_precision in VALID_PRECISIONS
if desired_precision in FLOAT32_ALIASES:
return torch.float32
if desired_precision in FLOAT16_ALIASES:
desired_precision = "float16"
if desired_precision in BFLOAT16_ALIASES:
desired_precision = "bfloat16"
if desired_precision == "bfloat16":
if is_bfloat16_compatible(device):
return torch.bfloat16
else:
# use float16 if it is defined via backup_precision
if backup_precision is not None and backup_precision in FLOAT16_ALIASES:
if is_float16_compatible(device):
logging.info("bfloat16 not supported -> using float16")
return torch.float16
else:
logging.info("bfloat16/float16 not supported -> using float32")
return torch.float32
# use float32 as default (float16 can lead to under-/overflows)
logging.info("bfloat16 not supported -> using float32")
return torch.float32
if desired_precision == "float16":
if is_float16_compatible(device):
return torch.float16
else:
# currently cpu only supports bfloat16
if is_bfloat16_compatible(device):
logging.info(f"float16 not supported -> using bfloat16")
return torch.bfloat16
logging.info(f"float16/bfloat16 not supported -> using float32")
return torch.float32
def _is_compatible(device, dtype):
try:
with torch.autocast(device_type=str(device), dtype=dtype):
pass
except RuntimeError:
return False
return True
def is_bfloat16_compatible(device):
return _is_compatible(device, torch.bfloat16)
def is_float16_compatible(device):
return _is_compatible(device, torch.float16)
class NoopContext:
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
class NoopGradScaler:
@staticmethod
def scale(loss):
return loss
@staticmethod
def unscale_(optimizer):
pass
@staticmethod
def step(optimizer, *args, **kwargs):
optimizer.step(*args, **kwargs)
@staticmethod
def update():
pass
def get_grad_scaler_and_autocast_context(precision, device):
if precision == torch.float32:
return NoopGradScaler(), NoopContext()
if precision == torch.bfloat16:
# GradScaler shouldn't be necessary (https://github.com/pytorch/pytorch/issues/36169)
return NoopGradScaler(), torch.autocast(str(device), dtype=precision)
elif precision == torch.float16:
return GradScaler(), torch.autocast(str(device), dtype=precision)
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/png_utils.py | src/utils/png_utils.py | import einops
import numpy as np
import png
import torch
from PIL import Image
from torchvision.transforms.functional import to_tensor, to_pil_image
VIRIDIS_PALETTE = [
68, 1, 84,
68, 2, 86,
69, 4, 87,
69, 5, 89,
70, 7, 90,
70, 8, 92,
70, 10, 93,
70, 11, 94,
71, 13, 96,
71, 14, 97,
71, 16, 99,
71, 17, 100,
71, 19, 101,
72, 20, 103,
72, 22, 104,
72, 23, 105,
72, 24, 106,
72, 26, 108,
72, 27, 109,
72, 28, 110,
72, 29, 111,
72, 31, 112,
72, 32, 113,
72, 33, 115,
72, 35, 116,
72, 36, 117,
72, 37, 118,
72, 38, 119,
72, 40, 120,
72, 41, 121,
71, 42, 122,
71, 44, 122,
71, 45, 123,
71, 46, 124,
71, 47, 125,
70, 48, 126,
70, 50, 126,
70, 51, 127,
70, 52, 128,
69, 53, 129,
69, 55, 129,
69, 56, 130,
68, 57, 131,
68, 58, 131,
68, 59, 132,
67, 61, 132,
67, 62, 133,
66, 63, 133,
66, 64, 134,
66, 65, 134,
65, 66, 135,
65, 68, 135,
64, 69, 136,
64, 70, 136,
63, 71, 136,
63, 72, 137,
62, 73, 137,
62, 74, 137,
62, 76, 138,
61, 77, 138,
61, 78, 138,
60, 79, 138,
60, 80, 139,
59, 81, 139,
59, 82, 139,
58, 83, 139,
58, 84, 140,
57, 85, 140,
57, 86, 140,
56, 88, 140,
56, 89, 140,
55, 90, 140,
55, 91, 141,
54, 92, 141,
54, 93, 141,
53, 94, 141,
53, 95, 141,
52, 96, 141,
52, 97, 141,
51, 98, 141,
51, 99, 141,
50, 100, 142,
50, 101, 142,
49, 102, 142,
49, 103, 142,
49, 104, 142,
48, 105, 142,
48, 106, 142,
47, 107, 142,
47, 108, 142,
46, 109, 142,
46, 110, 142,
46, 111, 142,
45, 112, 142,
45, 113, 142,
44, 113, 142,
44, 114, 142,
44, 115, 142,
43, 116, 142,
43, 117, 142,
42, 118, 142,
42, 119, 142,
42, 120, 142,
41, 121, 142,
41, 122, 142,
41, 123, 142,
40, 124, 142,
40, 125, 142,
39, 126, 142,
39, 127, 142,
39, 128, 142,
38, 129, 142,
38, 130, 142,
38, 131, 142,
37, 131, 142,
37, 132, 142,
37, 133, 142,
36, 134, 142,
36, 135, 142,
35, 136, 142,
35, 137, 142,
35, 138, 141,
34, 139, 141,
34, 140, 141,
34, 141, 141,
33, 142, 141,
33, 143, 141,
33, 144, 141,
33, 145, 140,
32, 146, 140,
31, 147, 140,
32, 147, 140,
31, 148, 140,
31, 149, 139,
31, 150, 139,
31, 151, 139,
31, 152, 139,
31, 153, 138,
31, 154, 138,
30, 155, 138,
30, 156, 137,
30, 157, 137,
31, 158, 137,
31, 159, 136,
31, 160, 136,
31, 161, 136,
31, 161, 135,
31, 162, 135,
32, 163, 134,
32, 164, 134,
33, 165, 133,
33, 166, 133,
34, 167, 133,
34, 168, 132,
35, 169, 131,
36, 170, 131,
37, 171, 130,
37, 172, 130,
38, 173, 129,
39, 173, 129,
40, 174, 128,
41, 175, 127,
42, 176, 127,
44, 177, 126,
45, 178, 125,
46, 179, 124,
47, 180, 124,
49, 181, 123,
50, 182, 122,
52, 182, 121,
53, 183, 121,
55, 184, 120,
56, 185, 119,
58, 186, 118,
59, 187, 117,
61, 188, 116,
63, 188, 115,
64, 189, 114,
66, 190, 113,
68, 191, 112,
70, 192, 111,
72, 193, 110,
74, 193, 109,
76, 194, 108,
78, 195, 107,
80, 196, 106,
82, 197, 105,
84, 197, 104,
86, 198, 103,
88, 199, 101,
90, 200, 100,
92, 200, 99,
94, 201, 98,
96, 202, 96,
99, 203, 95,
101, 203, 94,
103, 204, 92,
105, 205, 91,
108, 205, 90,
110, 206, 88,
112, 207, 87,
115, 208, 86,
117, 208, 84,
119, 209, 83,
122, 209, 81,
124, 210, 80,
127, 211, 78,
129, 211, 77,
132, 212, 75,
134, 213, 73,
137, 213, 72,
139, 214, 70,
142, 214, 69,
144, 215, 67,
147, 215, 65,
149, 216, 64,
152, 216, 62,
155, 217, 60,
157, 217, 59,
160, 218, 57,
162, 218, 55,
165, 219, 54,
168, 219, 52,
170, 220, 50,
173, 220, 48,
176, 221, 47,
178, 221, 45,
181, 222, 43,
184, 222, 41,
186, 222, 40,
189, 223, 38,
192, 223, 37,
194, 223, 35,
197, 224, 33,
200, 224, 32,
202, 225, 31,
205, 225, 29,
208, 225, 28,
210, 226, 27,
213, 226, 26,
216, 226, 25,
218, 227, 25,
221, 227, 24,
223, 227, 24,
226, 228, 24,
229, 228, 25,
231, 228, 25,
234, 229, 26,
236, 229, 27,
239, 229, 28,
241, 229, 29,
244, 230, 30,
246, 230, 32,
248, 230, 33,
251, 231, 35,
253, 231, 37,
]
VIRIDIS_NP = np.array(VIRIDIS_PALETTE, dtype=np.uint8).reshape(256, 3) / 255
def png_loader_full(path):
with open(path, "rb") as f:
return to_tensor(Image.open(f))
def png_loader_viridis(path):
img = (png_loader_full(path) * 255).squeeze(0).long().numpy()
rgb = np.take(VIRIDIS_NP, img, axis=0)
return einops.rearrange(torch.from_numpy(rgb), "h w c -> c h w")
def png_loader_full_with_info(path):
# might also be possible with Image.open (is possibly faster)
r = png.Reader(filename=str(path))
w, h, reader, info = r.read()
data = []
for row in reader:
data.append(torch.frombuffer(row, dtype=torch.int8))
data = torch.stack(data)
return data.unsqueeze(0) / 255, info
def png_writer_greyscale(img, path):
assert img.ndim == 3 and img.shape[0] == 1
to_pil_image(img, mode="L").save(path)
def png_writer_viridis(img, path):
assert img.ndim == 3 and img.shape[0] == 1
img = to_pil_image(img, mode="L")
img.putpalette(VIRIDIS_PALETTE)
img.save(path)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/loss_utils.py | src/utils/loss_utils.py | import torch
def apply_reduction(tensor, reduction="mean"):
if tensor.dtype == torch.bool:
tensor = tensor.float()
if reduction == "mean":
return tensor.mean()
if reduction == "mean_per_sample":
if tensor.ndim > 1:
return tensor.flatten(start_dim=1).mean(dim=1)
return tensor
if reduction is None or reduction == "none":
return tensor
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/system_info.py | src/utils/system_info.py | import logging
import os
import platform
import shlex
import sys
from pathlib import Path
from distributed.config import get_rank, get_local_rank
from utils.logging_util import log_from_all_ranks
from utils.num_worker_heuristic import get_total_cpu_count
def get_cli_command():
# print the command with which the script was called
# https://stackoverflow.com/questions/37658154/get-command-line-arguments-as-string
script_str = f"python {Path(sys.argv[0]).name}"
argstr = " ".join(map(shlex.quote, sys.argv[1:]))
return f"{script_str} {argstr}"
def get_installed_cuda_version():
nvidia_smi_lines = os.popen("nvidia-smi").read().strip().split("\n")
for line in nvidia_smi_lines:
if "CUDA Version:" in line:
return line[line.index("CUDA Version: ") + len("CUDA Version: "):-1].strip()
return None
def log_system_info():
logging.info("------------------")
logging.info("SYSTEM INFO")
logging.info(f"host name: {platform.uname().node}")
logging.info(f"OS: {platform.platform()}")
logging.info(f"OS version: {platform.version()}")
cuda_version = get_installed_cuda_version()
if cuda_version is not None:
logging.info(f"CUDA version: {cuda_version}")
# print hash of latest git commit (git describe or similar stuff is a bit ugly because it would require the
# git.exe path to be added in path as conda/python do something with the path and don't use the system
# PATH variable by default)
git_hash_file = Path(".git") / "FETCH_HEAD"
if git_hash_file.exists():
with open(git_hash_file) as f:
lines = f.readlines()
if len(lines) == 0:
# this happened when I didn't have internet
logging.warning(f".git/FETCH_HEAD has no content")
else:
git_hash = lines[0][:40]
logging.info(f"current commit hash: {git_hash}")
git_tag = os.popen("git describe --abbrev=0").read().strip()
logging.info(f"latest git tag: {git_tag}")
else:
logging.warning("could not retrieve current git commit hash from ./.git/FETCH_HEAD")
with log_from_all_ranks():
logging.info(
f"initialized process rank={get_rank()} local_rank={get_local_rank()} pid={os.getpid()} "
f"hostname={platform.uname().node}"
)
logging.info(f"total_cpu_count: {get_total_cpu_count()}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/data_container.py | src/utils/data_container.py | import logging
import kappadata as kd
import torch
from kappadata.collators import KDComposeCollator, KDSingleCollatorWrapper
from kappadata.samplers import (
RandomSampler,
DistributedSampler,
SemiSampler,
SequentialSampler,
WeightedSampler,
ClassBalancedSampler,
)
from kappadata.wrappers import ModeWrapper, SubsetWrapper, ShuffleWrapper
from distributed.config import is_distributed
from providers.config_providers.noop_config_provider import NoopConfigProvider
from utils.num_worker_heuristic import get_total_cpu_count, get_fair_cpu_count
from utils.seed import get_random_int
class DataContainer:
def __init__(
self,
num_workers=None,
max_num_workers=None,
pin_memory=None,
prefetch_factor=2,
config_provider=None,
seed=None,
**datasets,
):
self.logger = logging.getLogger(type(self).__name__)
self.num_workers = num_workers
self.max_num_workers = max_num_workers
self.pin_memory = pin_memory
self.prefetch_factor = prefetch_factor
self.config_provider = config_provider or NoopConfigProvider()
self.generator = torch.Generator()
if seed is not None:
self.generator.manual_seed(seed)
self.datasets = datasets
self.persistent_loaders = {}
self.added_to_config_provider = False
# run_type can be adjusted by trainers
self.run_type = "train"
# set first dataset as "train" dataset in place of an actual dataset
if "train" not in self.datasets:
self.datasets["train"] = list(self.datasets.values())[0]
def get_dataset(self, key=None, mode=None, max_size=None, shuffle_seed=None):
key = key or list(self.datasets.keys())[0]
dataset = self.datasets[key]
if shuffle_seed is not None:
dataset = ShuffleWrapper(dataset=dataset, seed=shuffle_seed)
if max_size is not None:
dataset = SubsetWrapper(dataset, end_index=max_size)
if mode is not None:
dataset = ModeWrapper(dataset=dataset, mode=mode, return_ctx=True)
if len(dataset.collators) == 1:
collator = KDSingleCollatorWrapper(
collator=dataset.collators[0],
dataset_mode=dataset.mode,
return_ctx=dataset.return_ctx,
)
elif len(dataset.collators) > 1:
collator = KDComposeCollator(
collators=dataset.collators,
dataset_mode=dataset.mode,
return_ctx=dataset.return_ctx,
)
else:
collator = None
return dataset, collator
return dataset
def get_main_sampler(
self,
train_dataset,
num_repeats=1,
shuffle=True,
num_unlabeled_per_labeled=None,
weighted_size=None,
samples_per_class=None,
getall_item="class",
):
# TODO port to kind + kwargs instead of this if branching solution
if samples_per_class is not None:
assert num_repeats == 1
assert num_unlabeled_per_labeled is None
assert weighted_size is None
return ClassBalancedSampler(
dataset=train_dataset,
samples_per_class=samples_per_class,
shuffle=shuffle,
getall_item=getall_item,
)
if weighted_size is not None:
# weighted sampler
assert num_repeats == 1 and shuffle and num_unlabeled_per_labeled is None
self.logger.info(f"main_sampler: WeightedSampler(size={weighted_size})")
return WeightedSampler(
dataset=train_dataset,
weights=train_dataset.get_sampler_weights(),
size=weighted_size,
)
if num_unlabeled_per_labeled is not None:
# semi-supervised sampler
assert num_repeats == 1 and shuffle and weighted_size is None
self.logger.info(f"main_sampler: SemiSampler(num_unlabeled_per_labeled={num_unlabeled_per_labeled})")
seed = get_random_int(generator=self.generator)
return SemiSampler(
dataset=train_dataset,
num_labeled=1,
num_unlabeled=num_unlabeled_per_labeled,
seed=seed,
)
if is_distributed():
seed = get_random_int(generator=self.generator)
assert num_unlabeled_per_labeled is None and weighted_size is None
self.logger.info(f"main_sampler: DistributedSampler(num_repeats={num_repeats}, shuffle={shuffle})")
# NOTE: drop_last is required as otherwise len(sampler) can be larger than len(dataset)
# which results in unconsumed batches from InterleavedSampler
return DistributedSampler(
train_dataset,
num_repeats=num_repeats,
shuffle=shuffle,
seed=seed,
drop_last=True,
)
if shuffle:
self.logger.info(f"main_sampler: RandomSampler(num_repeats={num_repeats})")
return RandomSampler(train_dataset, num_repeats=num_repeats, generator=self.generator)
else:
self.logger.info(f"main_sampler: SequentialSampler")
return SequentialSampler(train_dataset)
def get_data_loader(
self,
main_sampler,
main_collator,
batch_size,
epochs,
updates,
samples,
configs,
start_epoch=None,
):
sampler = kd.InterleavedSampler(
main_sampler=main_sampler,
batch_size=batch_size,
configs=configs,
main_collator=main_collator,
epochs=epochs,
updates=updates,
samples=samples,
start_epoch=start_epoch,
)
if self.num_workers is None:
num_workers = get_fair_cpu_count()
else:
num_workers = self.num_workers
if self.max_num_workers is not None:
num_workers = min(self.max_num_workers, num_workers)
pin_memory = True if self.pin_memory is None else self.pin_memory
loader = sampler.get_data_loader(
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=self.prefetch_factor,
)
# log properties
self.logger.info(
f"created dataloader (batch_size={batch_size} num_workers={loader.num_workers} "
f"pin_memory={loader.pin_memory} total_cpu_count={get_total_cpu_count()} "
f"prefetch_factor={loader.prefetch_factor})"
)
self.logger.info(f"concatenated dataset properties:")
for dataset in sampler.dataset.datasets:
self.logger.info(f"- mode='{dataset.mode}' len={len(dataset)} root_dataset={dataset.root_dataset}")
# add to wandb config
if not self.added_to_config_provider:
self.config_provider.update({
f"dataloader/num_workers": loader.num_workers,
f"dataloader/pin_memory": loader.pin_memory,
})
self.added_to_config_provider = True
return loader
def dispose(self):
for dataset in self.datasets.values():
dataset.dispose()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/logging_util.py | src/utils/logging_util.py | import logging
import sys
from collections import defaultdict
from contextlib import contextmanager
from distributed.config import is_rank0
def _add_handler(handler, prefix=""):
logger = logging.getLogger()
if prefix != "":
prefix = f"{prefix} "
handler.setFormatter(logging.Formatter(
fmt=f"%(asctime)s %(levelname).1s {prefix}%(message)s",
datefmt="%m-%d %H:%M:%S",
))
logger.handlers.append(handler)
return handler
def add_stdout_handler(prefix=""):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_add_handler(logging.StreamHandler(stream=sys.stdout), prefix=prefix)
def add_global_handlers(log_file_uri):
logger = logging.getLogger()
logger.handlers = []
# add a stdout logger to all ranks to also allow non-rank0 processes to log to stdout
add_stdout_handler()
# add_stdout_handler sets level to logging.INFO
if is_rank0():
if log_file_uri is not None:
_add_handler(logging.FileHandler(log_file_uri, mode="a"))
logging.info(f"log file: {log_file_uri.as_posix()}")
else:
# subprocesses log warnings to stderr --> logging.CRITICAL prevents this
logger.setLevel(logging.CRITICAL)
return _add_handler(MessageCounter())
@contextmanager
def log_from_all_ranks():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
yield
level = logging.INFO if is_rank0() else logging.CRITICAL
logger.setLevel(level)
class MessageCounter(logging.Handler):
def __init__(self):
super().__init__()
self.min_level = logging.WARNING
self.counts = defaultdict(int)
def emit(self, record):
if record.levelno >= self.min_level:
self.counts[record.levelno] += 1
def log(self):
logging.info("------------------")
for level in [logging.WARNING, logging.ERROR]:
logging.info(f"encountered {self.counts[level]} {logging.getLevelName(level).lower()}s")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/noop_tqdm.py | src/utils/noop_tqdm.py | class NoopTqdm:
def __init__(self, iterable):
self.iterable = iterable
def __enter__(self):
return self
def __exit__(self, *_, **__):
pass
def noop(self, *_, **__):
pass
def __getattr__(self, item):
return self.noop
def __iter__(self):
yield from self.iterable
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/wandb_utils.py | src/utils/wandb_utils.py | import logging
import os
import platform
from copy import deepcopy
import torch
import wandb
from configs.wandb_config import WandbConfig
from distributed.config import is_rank0, get_world_size, get_nodes
from providers.config_providers.noop_config_provider import NoopConfigProvider
from providers.config_providers.primitive_config_provider import PrimitiveConfigProvider
from providers.config_providers.wandb_config_provider import WandbConfigProvider
from providers.path_provider import PathProvider
from providers.summary_providers.noop_summary_provider import NoopSummaryProvider
from providers.summary_providers.primitive_summary_provider import PrimitiveSummaryProvider
from providers.summary_providers.wandb_summary_provider import WandbSummaryProvider
from utils.kappaconfig.util import remove_large_collections
def init_wandb(
device: str,
run_name: str,
stage_hp: dict,
wandb_config: WandbConfig,
path_provider: PathProvider,
account_name: str,
tags: list,
notes: str,
group: str,
group_tags: dict,
):
logging.info("------------------")
logging.info(f"initializing wandb (mode={wandb_config.mode})")
# os.environ["WANDB_SILENT"] = "true"
# create config_provider & summary_provider
if not is_rank0():
config_provider = NoopConfigProvider()
summary_provider = NoopSummaryProvider()
return config_provider, summary_provider
elif wandb_config.is_disabled:
config_provider = PrimitiveConfigProvider(path_provider=path_provider)
summary_provider = PrimitiveSummaryProvider(path_provider=path_provider)
else:
config_provider = WandbConfigProvider(path_provider=path_provider)
summary_provider = WandbSummaryProvider(path_provider=path_provider)
config = {
"run_name": run_name,
"stage_name": path_provider.stage_name,
**_lists_to_dict(remove_large_collections(stage_hp)),
}
if not wandb_config.is_disabled:
if wandb_config.mode == "offline":
os.environ["WANDB_MODE"] = "offline"
wandb.login(host=wandb_config.host)
logging.info(f"logged into wandb (host={wandb_config.host})")
name = run_name or "None"
if path_provider.stage_name != "default_stage":
name += f"/{path_provider.stage_name}"
wandb_id = path_provider.stage_id
# can't group by tags -> with group tags you can (by adding it as a field to the config)
# group_tags:
# augmentation: minimal
# ablation: warmup
tags = tags or []
if group_tags is not None and len(group_tags) > 0:
logging.info(f"group tags:")
for group_name, tag in group_tags.items():
logging.info(f" {group_name}: {tag}")
assert tag not in tags, \
f"tag '{tag}' from group_tags is also in tags (group_tags={group_tags} tags={tags})"
tags.append(tag)
config[group_name] = tag
if len(tags) > 0:
logging.info(f"tags:")
for tag in tags:
logging.info(f"- {tag}")
wandb.init(
entity=wandb_config.entity,
project=wandb_config.project,
name=name,
dir=str(path_provider.stage_output_path),
save_code=False,
config=config,
mode=wandb_config.mode,
id=wandb_id,
# add default tag to mark runs which have not been looked at in W&B
# ints need to be cast to string
tags=["new"] + [str(tag) for tag in tags],
notes=notes,
group=group or wandb_id,
)
config_provider.update(config)
# log additional environment properties
additional_config = {}
if str(device) == "cpu":
additional_config["device"] = "cpu"
else:
additional_config["device"] = torch.cuda.get_device_name(0)
additional_config["dist/world_size"] = get_world_size()
additional_config["dist/nodes"] = get_nodes()
# hostname from static config which can be more descriptive than the platform.uname().node (e.g. account name)
additional_config["dist/account_name"] = account_name
additional_config["dist/hostname"] = platform.uname().node
if "SLURM_JOB_ID" in os.environ:
additional_config["dist/jobid"] = os.environ["SLURM_JOB_ID"]
if "PBS_JOBID" in os.environ:
additional_config["dist/jobid"] = os.environ["PBS_JOBID"]
config_provider.update(additional_config)
return config_provider, summary_provider
def _lists_to_dict(root):
""" wandb cant handle lists in configs -> transform lists into dicts with str(i) as key """
# (it will be displayed as [{"kind": "..."}, ...])
root = deepcopy(root)
return _lists_to_dicts_impl(dict(root=root))["root"]
def _lists_to_dicts_impl(root):
if not isinstance(root, dict):
return
for k, v in root.items():
if isinstance(v, list):
root[k] = {str(i): vitem for i, vitem in enumerate(v)}
elif isinstance(v, dict):
root[k] = _lists_to_dicts_impl(root[k])
return root
def finish_wandb(wandb_config: WandbConfig):
if not is_rank0() or wandb_config.is_disabled:
return
wandb.finish()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/naming_util.py | src/utils/naming_util.py | from kappadata.datasets import KDSubset
def join_names(name1, name2):
if name1 is None:
return name2
assert name2 is not None
return f"{name1}.{name2}"
def pascal_to_snake(pascal_case: str) -> str:
"""
convert pascal/camel to snake case https://learn.microsoft.com/en-us/visualstudio/code-quality/ca1709?view=vs-2022
"By convention, two-letter acronyms use all uppercase letters,
and acronyms of three or more characters use Pascal casing."
"""
if len(pascal_case) == 0:
return ""
snake_case = [pascal_case[0].lower()]
upper_counter = 0
for i in range(1, len(pascal_case)):
if pascal_case[i].islower():
snake_case += [pascal_case[i]]
upper_counter = 0
else:
if upper_counter == 2:
upper_counter = 0
if upper_counter == 0:
snake_case += ["_"]
snake_case += [pascal_case[i].lower()]
upper_counter += 1
return "".join(snake_case)
def _type_name(obj, to_name_fn):
if isinstance(obj, KDSubset):
return _type_name(obj.dataset, to_name_fn)
cls = type(obj)
return to_name_fn(cls.__name__)
def lower_type_name(obj):
return _type_name(obj, lambda name: name.lower())
def snake_type_name(obj):
return _type_name(obj, pascal_to_snake)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/cumsum.py | src/utils/cumsum.py | def cumsum_of_sequence(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/functional.py | src/utils/functional.py | import einops
import numpy as np
import torch
import torch.nn.functional as F
def get_powers_of_two(min_value, max_value):
powers_of_two = []
if max_value > 0:
powers_of_two += [2 ** i for i in range(int(np.log2(max_value)) + 1)]
return [p for p in powers_of_two if p >= min_value]
def is_power_of_two(value):
return np.log2(value).is_integer()
def image_to_pyramid(x, num_scales):
scaled_imgs = [x]
_, _, height, width = x.shape
for _ in range(num_scales - 1):
height //= 2
width //= 2
# interpolate is not supported in bfloat16
with torch.autocast(device_type=str(x.device).split(":")[0], enabled=False):
scaled = F.interpolate(x, size=[height, width], mode="bilinear", align_corners=True)
scaled_imgs.append(scaled)
return scaled_imgs
def gram_matrix(x):
_, c, h, w = x.shape
x = einops.rearrange(x, "b c h w -> b c (h w)")
xt = einops.rearrange(x, "b c hw -> b hw c")
gram = torch.bmm(x, xt) / (c * h * w)
return gram
def to_ndim(x, ndim):
return x.reshape(*x.shape, *(1,) * (ndim - x.ndim))
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/schedule_utils.py | src/utils/schedule_utils.py | def get_value_or_default(default, schedule=None, update_counter=None):
if schedule is not None:
assert update_counter is not None
return schedule.get_value(
step=update_counter.cur_checkpoint.update,
total_steps=update_counter.end_checkpoint.update,
)
return default
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/bidict.py | src/utils/bidict.py | class Bidict:
def __init__(self, forward=None, backward=None):
assert forward is None or backward is None
self._forward = {}
self._backward = {}
if forward is not None:
for key, value in forward.items():
self.set_forward(key, value)
if backward is not None:
for key, value in backward.items():
self.set_backward(key, value)
def to_forward(self):
return self._forward.copy()
def to_backward(self):
return self._backward.copy()
def get_forward(self, key):
return self._forward[key]
def get_backward(self, key):
return self._backward[key]
def set_forward(self, key, value):
self._forward[key] = value
self._backward[value] = key
def set_backward(self, key, value):
self._backward[key] = value
self._forward[value] = key
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/param_checking.py | src/utils/param_checking.py | import collections.abc
from itertools import repeat
from pathlib import Path
# adapted from timm (timm/models/layers/helpers.py)
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n
return x
return tuple(repeat(x, n))
return parse
def _is_ntuple(n):
def check(x):
return isinstance(x, tuple) and len(param) == n
return check
def to_ntuple(x, n):
return _ntuple(n=n)(x)
def is_ntuple(x, n):
return _is_ntuple(n=n)(x)
to_2tuple = _ntuple(2)
is_2tuple = _is_ntuple(2)
def float_to_integer_exact(f):
assert f.is_integer()
return int(f)
def check_exclusive(*args):
return sum(arg is not None for arg in args) == 1
def check_inclusive(*args):
return sum(arg is not None for arg in args) in [0, len(args)]
def check_at_least_one(*args):
return sum(arg is not None for arg in args) > 0
def check_at_most_one(*args):
return sum(arg is not None for arg in args) <= 1
def to_path(path):
if path is not None and not isinstance(path, Path):
return Path(path).expanduser()
return path
def to_list_of_values(list_or_item, default_value=None):
if list_or_item is None:
if default_value is None:
return []
else:
return [default_value]
if not isinstance(list_or_item, (tuple, list)):
return [list_or_item]
return list_or_item
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/seed.py | src/utils/seed.py | import logging
import random
from contextlib import ContextDecorator
import numpy as np
import torch
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
logging.info(f"set seed to {seed}")
def get_random_int(generator=None):
return torch.randint(999_999_999, size=(1,), generator=generator).item()
def get_random_states():
return dict(
torch_rng_state=torch.get_rng_state(),
np_rng_state=np.random.get_state(),
py_rng_state=random.getstate(),
)
def set_random_states(torch_rng_state, np_rng_state, py_rng_state):
torch.set_rng_state(torch_rng_state)
np.random.set_state(np_rng_state)
random.setstate(py_rng_state)
def unset_seed():
import time
# current time in milliseconds
t = 1000 * time.time()
seed = int(t) % 2 ** 32
set_seed(seed)
def with_seed(seed):
return WithSeedDecorator(seed)
class WithSeedDecorator(ContextDecorator):
def __init__(self, seed):
self.seed = seed
def __enter__(self):
set_seed(self.seed)
def __exit__(self, exc_type, exc_val, exc_tb):
unset_seed()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.