repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/model_utils.py | src/utils/model_utils.py | import torch
# noinspection PyProtectedMember
from torch.nn.modules.batchnorm import _BatchNorm
def get_paramnames_with_no_gradient(model):
return [name for name, param in model.named_parameters() if param.grad is None and param.requires_grad]
def get_output_shape_of_model(model, forward_fn, **forward_kwargs):
was_in_training_mode = model.training
# change to eval to not change batchnorm layers
model.eval()
# get outputshape from forward pass
x = torch.ones(1, *model.input_shape, device=model.device)
output = forward_fn(x, **forward_kwargs)
if was_in_training_mode:
model.train()
return tuple(output.shape[1:])
@torch.no_grad()
def copy_params(source_model, target_model):
for target_param, source_param in zip(target_model.parameters(), source_model.parameters()):
target_param.copy_(source_param)
for target_buffer, source_buffer in zip(target_model.buffers(), source_model.buffers()):
target_buffer.copy_(source_buffer)
@torch.no_grad()
def update_ema(source_model, target_model, target_factor, copy_buffers):
for target_param, source_param in zip(target_model.parameters(), source_model.parameters()):
target_param.mul_(target_factor).add_(source_param, alpha=1. - target_factor)
if copy_buffers:
for target_buffer, source_buffer in zip(target_model.buffers(), source_model.buffers()):
target_buffer.copy_(source_buffer)
def get_named_models(model):
submodels = model.submodels
if len(submodels) == 1:
# single model
return submodels
else:
# composite model
result = {}
for name, sub_model in submodels.items():
named_submodels = get_named_models(sub_model)
for key, value in named_submodels.items():
result[f"{name}.{key}"] = value
return result
def get_trainable_param_count(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_frozen_param_count(model):
return sum(p.numel() for p in model.parameters() if not p.requires_grad)
def freeze(model):
for p in model.parameters():
p.requires_grad = False
def unfreeze(model):
for p in model.parameters():
p.requires_grad = True
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/checkpoint.py | src/utils/checkpoint.py | import functools
import math
import os
import re
@functools.total_ordering
class Checkpoint:
def __init__(self, epoch=None, update=None, sample=None):
self.epoch = epoch
self.update = update
self.sample = sample
def copy(self):
return Checkpoint(epoch=self.epoch, update=self.update, sample=self.sample)
@property
def is_zero(self):
if self.epoch is not None and self.epoch == 0:
return True
if self.update is not None and self.update == 0:
return True
if self.sample is not None and self.sample == 0:
return True
return False
@property
def specified_properties_count(self):
return sum([self.epoch is not None, self.update is not None, self.sample is not None])
@property
def is_fully_specified(self):
return self.specified_properties_count == 3
@property
def is_minimally_specified(self):
return self.specified_properties_count == 1
def get_n_equal_properties(self, other):
return sum([self.epoch == other.epoch, self.update == other.update, self.sample == other.sample])
def to_fully_specified(self, updates_per_epoch, effective_batch_size):
if self.is_fully_specified:
return Checkpoint(self.epoch, self.update, self.sample)
assert self.is_minimally_specified
if self.update is not None:
total_updates = self.update
elif self.epoch is not None:
total_updates = updates_per_epoch * self.epoch
else:
total_updates = int(self.sample / effective_batch_size)
return Checkpoint(
epoch=int(total_updates / updates_per_epoch),
update=total_updates,
sample=total_updates * effective_batch_size,
)
def scale(self, factor, updates_per_epoch, effective_batch_size, floor):
# convert to updates
if self.update is not None:
updates = self.update
else:
if self.epoch is not None:
updates = self.epoch * updates_per_epoch
elif self.sample is not None:
updates = self.sample / effective_batch_size
else:
raise NotImplementedError
# scale
updates *= factor
# floor or ceil
if floor:
updates = int(updates)
else:
updates = math.ceil(updates)
# convert to original specification
ckpt = Checkpoint()
if self.epoch is not None:
ckpt.epoch = int(updates / updates_per_epoch)
if self.update is not None:
ckpt.update = updates
if self.sample is not None:
ckpt.sample = updates * effective_batch_size
return ckpt
def __eq__(self, other):
return self.epoch == other.epoch and self.update == other.update and self.sample == other.sample
def __hash__(self):
return hash((self.epoch, self.update, self.sample))
def __ge__(self, other):
assert self.has_same_specified_properties(other)
if self.epoch is not None and other.epoch is not None:
if self.epoch < other.epoch: return False
if self.update is not None and other.update is not None:
if self.update < other.update: return False
if self.sample is not None and other.sample is not None:
if self.sample < other.sample: return False
return True
def has_same_specified_properties(self, other):
if not ((self.epoch is None) == (other.epoch is None)): return False
if not ((self.update is None) == (other.update is None)): return False
if not ((self.sample is None) == (other.sample is None)): return False
return True
def __repr__(self):
return str(self)
def __str__(self):
if self.is_minimally_specified:
if self.epoch is not None: return f"Epoch {self.epoch}"
if self.update is not None: return f"Update {self.update}"
if self.sample is not None: return f"Sample {self.sample}"
if isinstance(self.epoch, float):
epoch_str = str(int(self.epoch))
else:
epoch_str = str(self.epoch)
return f"E{epoch_str}_U{self.update}_S{self.sample}"
@staticmethod
def from_checkpoint_string(checkpoint_string):
matches = re.findall("E(\\d*)_U(\\d*)_S(\\d*)", checkpoint_string)
assert len(matches) == 1
epoch_str, update_str, sample_str = matches[0]
return Checkpoint(epoch=int(epoch_str), update=int(update_str), sample=int(sample_str))
@staticmethod
def contains_checkpoint_string(source):
matches = re.findall("E\\d*_U\\d*_S\\d*", source)
return len(matches) > 0
@staticmethod
def find_checkpoint_string(source):
matches = re.findall("E\\d*_U\\d*_S\\d*", source)
assert len(matches) == 1
return matches[0]
@staticmethod
def from_filename(fname):
assert Checkpoint.contains_checkpoint_string(fname)
ckpt_str = Checkpoint.find_checkpoint_string(fname)
return Checkpoint.from_checkpoint_string(ckpt_str)
@staticmethod
def to_fully_specified_from_fnames(ckpt_folder, ckpt, prefix=None, suffix=None):
assert ckpt.is_fully_specified or ckpt.is_minimally_specified
for f in os.listdir(ckpt_folder):
# filter irrelevant files
if prefix is not None and not f.startswith(prefix):
continue
if suffix is not None and not f.endswith(suffix):
continue
if not Checkpoint.contains_checkpoint_string(f):
continue
# extract Checkpoint object from filename
ckpt_from_fname = Checkpoint.from_checkpoint_string(Checkpoint.find_checkpoint_string(f))
# remove unnecesary properties for comparison (e.g. Checkpoint(epoch=5, update=12, samples=123) -->
# Checkpoint(epoch=5) if checkpoint=Checkpoint(epoch=123))
if ckpt_from_fname.to_target_specification(ckpt) == ckpt:
return ckpt_from_fname
raise FileNotFoundError(
f"no checkpoint file found (folder='{ckpt_folder}' checkpoint='{ckpt}' "
f"prefix='{prefix}' suffix='{suffix}')"
)
def to_target_specification(self, target):
"""
removes all overly specified properties of self (depending on the specified properties of target)
e.g.
self=Checkpoint(epoch=6, update=12, sample=123)
target=Checkpoint(epoch=5)
returns a new Checkpoint(epoch=6)
"""
assert target.specified_properties_count <= self.specified_properties_count
kwargs = {}
if target.epoch is not None:
kwargs["epoch"] = self.epoch
if target.update is not None:
kwargs["update"] = self.update
if target.sample is not None:
kwargs["sample"] = self.sample
return Checkpoint(**kwargs)
def __add__(self, other):
assert self.has_same_specified_properties(other)
return Checkpoint(self.epoch + other.epoch, self.update + other.update, self.sample + other.sample)
def __sub__(self, other):
assert self.has_same_specified_properties(other)
epoch = self.epoch - other.epoch if self.epoch is not None else None
update = self.update - other.update if self.update is not None else None
sample = self.sample - other.sample if self.sample is not None else None
return Checkpoint(epoch, update, sample)
def __iter__(self):
# proxy for casting to dict
# https://stackoverflow.com/questions/35282222/in-python-how-do-i-cast-a-class-object-to-a-dict
if self.epoch is not None:
yield "epoch", self.epoch
if self.update is not None:
yield "update", self.update
if self.sample is not None:
yield "sample", self.sample
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/pytorch_cuda_timing.py | src/utils/pytorch_cuda_timing.py | import torch
import torch.distributed as dist
def cuda_start_event():
start_event = torch.cuda.Event(enable_timing=True)
start_event.record()
return start_event
def cuda_end_event(start_event):
if dist.is_available() and dist.is_initialized():
torch.cuda.synchronize()
dist.barrier()
end_event = torch.cuda.Event(enable_timing=True)
end_event.record()
torch.cuda.synchronize()
# torch.cuda.Event.elapsed_time returns milliseconds but kappaprofiler expects seconds
return start_event.elapsed_time(end_event) / 1000
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/log_once.py | src/utils/log_once.py | import logging
_MESSAGE_KEYS = set()
def log_once(log_fn_or_message, key, level=logging.INFO):
if key not in _MESSAGE_KEYS:
if isinstance(log_fn_or_message, str):
logging.log(level=level, msg=log_fn_or_message)
else:
log_fn_or_message()
_MESSAGE_KEYS.add(key)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/save_image_utils.py | src/utils/save_image_utils.py | import einops
import numpy as np
import torch
from PIL import Image
from kappadata import get_denorm_transform, get_norm_transform
from kappadata.wrappers import XTransformWrapper
from matplotlib.pyplot import get_cmap
from torchvision.transforms.functional import to_pil_image
# region concat images
def concat_images_square(images, scale, padding):
columns = int(np.ceil(np.sqrt(len(images))))
rows = int(np.ceil(len(images) / columns))
w, h = images[0].size
if scale != 1:
images = [i.resize((w * scale, h * scale)) for i in images]
w, h = images[0].size
concated = Image.new(images[0].mode, (w * columns + padding * (columns + 1), h * rows + padding * (rows + 1)))
for i in range(len(images)):
col = (i % columns)
row = i // columns
concated.paste(images[i], (w * col + padding * (col + 1), h * row + padding * (row + 1)))
return concated
def concat_images_vertical(images, scale=1):
scale_images(images, scale)
w, h = images[0].size
concated = Image.new(images[0].mode, (w, h * len(images)))
for i in range(len(images)):
concated.paste(images[i], (0, h * i))
return concated
def concat_images_horizontal(images, scale=1):
scale_images(images, scale)
w, h = images[0].size
concated = Image.new(images[0].mode, (w * len(images), h))
for i in range(len(images)):
concated.paste(images[i], (w * i, 0))
return concated
# endregion
def scale_images(images, scale):
if scale != 1:
return [i.resize((i.width * scale, i.height * scale)) for i in images]
return images
def greyscale_to_viridis(tensor) -> Image:
assert tensor.ndim == 3 and len(tensor) == 1
# use only first channel
tensor = tensor[0]
# apply viridis colormap
cm = get_cmap("viridis")
tensor = tensor.cpu().numpy()
tensor = cm(tensor)
tensor = np.uint8(tensor * 255)
return Image.fromarray(tensor) # mode == "RGBA"
def rgba_to_rgb(image: Image) -> Image:
# https://stackoverflow.com/questions/9166400/convert-rgba-png-to-rgb-with-pil
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
def tensor_to_image(tensor, denormalize=None, scale_range_per_image=False):
assert torch.is_tensor(tensor) and tensor.ndim == 3
if denormalize is not None:
tensor = denormalize(tensor)
if scale_range_per_image:
tensor = tensor - tensor.min()
tensor = tensor / tensor.max()
if len(tensor) == 1:
if tensor.min() < 0.:
tensor = tensor - tensor.min()
if tensor.max() > 1.:
tensor = tensor / tensor.max()
return greyscale_to_viridis(tensor)
return to_pil_image(tensor)
def save_image_tensors(
tensors,
out_uri,
denormalize=None,
scale_range_per_image=False,
scale=1.,
padding=2,
transpose_xy=False,
):
assert torch.is_tensor(tensors) and tensors.ndim == 4
if transpose_xy:
tensors = einops.rearrange(tensors, "b c h w -> b c w h")
images = [
tensor_to_image(tensor, denormalize=denormalize, scale_range_per_image=scale_range_per_image)
for tensor in tensors
]
save_images(images, out_uri, scale, padding)
def save_images(images, out_uri, scale=1., padding=2):
assert isinstance(images, list)
w, h = images[0].size
if w == h:
concated = concat_images_square(images, scale, padding)
elif h > w:
concated = concat_images_horizontal(images, scale)
else:
concated = concat_images_vertical(images, scale)
concated.save(out_uri)
def images_to_gif(image_uris, out_uri, duration=200):
if len(image_uris) == 0:
return
imgs = (Image.open(f) for f in image_uris)
img = next(imgs)
img.save(fp=out_uri, format="GIF", append_images=imgs, save_all=True, duration=duration, loop=0)
def get_norm_transform_from_datacontainer(data_container, dataset_key=None):
ds, collator = data_container.get_dataset(key=dataset_key, mode="x")
if collator is not None:
raise NotImplementedError
return get_norm_transform_from_dataset(ds)
def get_norm_transform_from_dataset(dataset):
xtransform_wrapper = dataset.get_wrapper_of_type(XTransformWrapper)
if xtransform_wrapper is None:
return None
return get_norm_transform(xtransform_wrapper.transform)
def get_denorm_from_datacontainer(data_container, dataset_key=None):
ds, collator = data_container.get_dataset(key=dataset_key, mode="x")
if collator is not None:
raise NotImplementedError
return get_denorm_from_dataset(ds)
def get_denorm_from_dataset(dataset):
xtransform_wrapper = dataset.get_wrapper_of_type(XTransformWrapper)
if xtransform_wrapper is None:
return None
return get_denorm_transform(xtransform_wrapper.transform)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/update_counter.py | src/utils/update_counter.py | from utils.checkpoint import Checkpoint
class UpdateCounter:
def __init__(
self,
start_checkpoint: Checkpoint,
end_checkpoint: Checkpoint,
updates_per_epoch: int,
effective_batch_size: int,
):
self.updates_per_epoch = updates_per_epoch
# start_checkpoint should always be fully specified (either E0_U0_S0 or derived from ResumeInitializer)
self.start_checkpoint = start_checkpoint
assert self.start_checkpoint.is_fully_specified
# fully specify end_checkpoint (based on difference between start_checkpoint)
# this allows e.g. starting training with batch_size=512 and resuming with different batch_size
# some things don't work with this
# - schedules are not adjusted to it
# - how are schedules such as inverse sqrt schedule handled?
assert self.start_checkpoint == Checkpoint(epoch=self.start_checkpoint.epoch).to_fully_specified(
updates_per_epoch=updates_per_epoch,
effective_batch_size=effective_batch_size,
)
assert end_checkpoint.is_minimally_specified
delta_ckpt = end_checkpoint - self.start_checkpoint.to_target_specification(end_checkpoint)
fully_specified_delta = delta_ckpt.to_fully_specified(
updates_per_epoch=updates_per_epoch,
effective_batch_size=effective_batch_size,
)
self.end_checkpoint = self.start_checkpoint + fully_specified_delta
assert self.end_checkpoint.is_fully_specified
self.cur_checkpoint = self.start_checkpoint.copy()
self.effective_batch_size = effective_batch_size
@property
def is_full_epoch(self):
assert self.cur_checkpoint.is_fully_specified
return self.update % self.updates_per_epoch == 0
@property
def epoch_as_float(self):
return float(self.cur_checkpoint.update) / self.updates_per_epoch
@property
def epoch(self):
return self.cur_checkpoint.epoch
@property
def update(self):
return self.cur_checkpoint.update
@property
def sample(self):
return self.cur_checkpoint.sample
@property
def is_finished(self):
return self.cur_checkpoint.to_target_specification(self.end_checkpoint) >= self.end_checkpoint
def next_epoch(self):
self.cur_checkpoint.epoch += 1
def next_update(self):
self.cur_checkpoint.update += 1
def add_samples(self, n_samples):
self.cur_checkpoint.sample += n_samples
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/factory.py | src/utils/factory.py | import importlib
import inspect
import logging
from functools import partial
from itertools import product
def create(obj_or_kwargs, from_kwargs_fn, instantiate_if_ctor=True, **kwargs):
"""
avoid boilerplate code when allowing ctor arguments to be either an object or a dict with the object parameters
e.g. a model can be instantiated with either act=torch.nn.ReLU or act=dict(kind='relu') and the ctor has to only
call self.act_ctor = create(act, act_ctor_from_kwargs) instead
"""
if isinstance(obj_or_kwargs, dict):
if len(obj_or_kwargs) == 0:
return None
return from_kwargs_fn(**obj_or_kwargs, **kwargs)
if instantiate_if_ctor and isinstance(obj_or_kwargs, (partial, type)):
# allow passing partials to objects which are then instantiated automatically
# useful for e.g. passing model_ctors to autoencoder and autoencoder ctor passes latent_dim to decoder
if isinstance(obj_or_kwargs, partial):
# partial overwrites already defined kwargs but dict would throw an error
for key in kwargs.keys():
assert key not in obj_or_kwargs.keywords, f"got multiple values for keyword argument {key}"
return obj_or_kwargs(**kwargs)
# don't allow this as this would make the configs no longer 1:1 comparable (e.g. Linear(pooling="cls") would have
# model.pooling == "cls" in the config whereas Linear(pooling=dict(kind="cls")) would have
# model.pooling == dict(kind="cls")
# if isinstance(obj_or_kwargs, str):
# allow setting kind with only a string (e.g. Linear(pooling="cls") will create a pooling object
# return from_kwargs_fn(kind=obj_or_kwargs, **kwargs)
return obj_or_kwargs
def create_collection(collection, from_kwargs_fn, collate_fn=None, **kwargs):
if isinstance(collection, list):
objs = []
for ckwargs in collection:
if isinstance(ckwargs, dict):
objs.append(create({**kwargs, **ckwargs}, from_kwargs_fn))
elif isinstance(ckwargs, (partial, type)):
objs.append(create(ckwargs, from_kwargs_fn, **kwargs))
else:
objs.append(ckwargs)
elif isinstance(collection, dict):
objs = {key: create(ckwargs, from_kwargs_fn, **kwargs) for key, ckwargs in collection.items()}
elif collection is None:
objs = []
else:
raise NotImplementedError(f"invalid collection type {type(collection).__name__} (expected dict or list)")
if collate_fn is not None:
return collate_fn(objs)
return objs
def get_ctor(module_names, type_names, **kwargs):
obj_type = type_from_name(module_names=module_names, type_names=type_names)
return partial(obj_type, **kwargs)
def instantiate(module_names, type_names, error_on_not_found=True, ctor_kwargs=None, optional_kwargs=None, **kwargs):
obj_type = type_from_name(module_names=module_names, type_names=type_names, error_on_not_found=error_on_not_found)
ctor_kwargs = {} if ctor_kwargs is None else dict(ctor_kwargs=ctor_kwargs)
try:
# e.g. pass update_counter to SchedulableLoss but not to e.g. torch.nn.MSELoss
if optional_kwargs is not None:
ctor_kwarg_names = get_all_ctor_kwarg_names(obj_type)
for key in list(optional_kwargs.keys()):
if key not in ctor_kwarg_names:
optional_kwargs.pop(key)
else:
optional_kwargs = {}
return obj_type(**kwargs, **ctor_kwargs, **optional_kwargs)
except TypeError as e:
logging.error(f"error creating object of type {obj_type.__name__}: {e}")
raise
def type_from_name(module_names, type_names, error_on_not_found=True):
"""
tries to import type_name from any of the modules identified by module_names
e.g. module_names=[loss_functions, torch.nn] type_name=bce_loss will import torch.nn.BCELoss
"""
for module_name, type_name in product(module_names, type_names):
module_name = module_name.lower()
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError as e:
# this also fails if some module could not be imported from within the module to import
# (e.g. failed to import torchmetrics when importing accuracy_logger)
if not module_name.startswith(e.name):
raise e
continue
type_ = _get_type_from_module(module, type_name)
if type_ is not None:
return type_
# check if module was set in code (used for unittesting)
# e.g. models.mock_model = test_unit.mock.mock_model
for module_name, type_name in product(module_names, type_names):
module_name = module_name.lower()
parent_module_name = ".".join(module_name.split(".")[:-1])
try:
parent_module = importlib.import_module(parent_module_name)
except ModuleNotFoundError:
continue
if hasattr(parent_module, type_name):
module = getattr(parent_module, type_name)
type_ = _get_type_from_module(module, type_name)
if type_ is not None:
return type_
if error_on_not_found:
# ModuleNotFoundError from above are swollowed for stuff like torchmetrics if packagemanagement is not correct
# but here the error then occours as a module can't be found
raise RuntimeError(f"can't find class {' or '.join(type_names)} in {' or '.join(module_names)}")
else:
return None
def _get_type_from_module(module, type_name):
type_name_lowercase = type_name.lower().replace("_", "")
possible_type_names = list(filter(lambda k: k.lower() == type_name_lowercase, module.__dict__.keys()))
if len(possible_type_names) > 1:
# filter out all caps names (e.g. CIFAR10, SPEECHCOMMANDS)
possible_type_names = [name for name in possible_type_names if not name.isupper()]
assert len(possible_type_names) <= 1, f"error found more than one possible type for {type_name_lowercase}"
if len(possible_type_names) == 1:
return getattr(module, possible_type_names[0])
return None
def get_all_ctor_kwarg_names(cls):
result = set()
_get_all_ctor_kwarg_names(cls, result)
return result
def _get_all_ctor_kwarg_names(cls, result):
for name in inspect.signature(cls).parameters.keys():
result.add(name)
if cls.__base__ is not None:
_get_all_ctor_kwarg_names(cls.__base__, result)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/memory_leak_util.py | src/utils/memory_leak_util.py | import gc
import torch
def get_tensors_in_memory():
# some warning was thrown when calling torch.is_tensor(_reduce_op) with a _reduce_op object
all_objs = gc.get_objects()
all_tensors = []
cuda_tensors = []
for obj in all_objs:
try:
if type(obj).__name__ != "_reduce_op" and torch.is_tensor(obj):
all_tensors.append(obj)
if obj.device != torch.device("cpu"):
cuda_tensors.append(obj)
except ReferenceError:
# with wandb there is some issue where 'ReferenceError: weakly-referenced object no longer exists' is raised
pass
return all_tensors, cuda_tensors
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/__init__.py | src/utils/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/vit_util.py | src/utils/vit_util.py | import einops
from .param_checking import to_2tuple
def get_sequence_lengths(input_shape, patch_size):
assert len(input_shape) == len(patch_size)
ndim = len(patch_size)
assert all(input_shape[i] % patch_size[i] == 0 for i in range(ndim))
seqlens = [input_shape[i] // patch_size[i] for i in range(ndim)]
return seqlens
def sequence_to_2d_with_seqlens(tokens, h_seqlen, w_seqlen, num_aux_tokens):
# transform into image with c=feature_dim h=h_seqlen w=w_seqlen
aux_tokens = tokens[:, :num_aux_tokens]
patch_tokens = tokens[:, num_aux_tokens:]
img = einops.rearrange(
patch_tokens,
"b (h_seqlen w_seqlen) c -> b c h_seqlen w_seqlen",
h_seqlen=h_seqlen,
w_seqlen=w_seqlen,
)
return img, aux_tokens
def patchify_as_1d(x, patch_size):
assert x.ndim - 2 == len(patch_size)
ndim = len(patch_size)
resolution = x.shape[2:]
assert all(resolution[i] % patch_size[i] == 0 for i in range(ndim))
seqlens = [resolution[i] // patch_size[i] for i in range(ndim)]
# generate generic pattern for ndim
# pattern for 2d is: "bs c (h ph) (w pw) -> bs (h w) (ph pw c)"
# pattern for 3d is: "bs c (x px) (y py) (z pz) -> bs (x y z) (px py pz c)"
from_pattern = "c " + " ".join([f"(seqlen{i} patchsize{i})" for i in range(ndim)])
to_pattern1 = " ".join([f"seqlen{i}" for i in range(ndim)])
to_pattern2 = " ".join([f"patchsize{i}" for i in range(ndim)]) + " c"
kwargs = {f"seqlen{i}": seqlens[i] for i in range(ndim)}
x = einops.rearrange(x, f"bs {from_pattern} -> bs ({to_pattern1}) ({to_pattern2})", **kwargs)
return x
def patchify_as_2d(imgs, patch_size):
patch_height, patch_width = to_2tuple(patch_size)
bs, c, img_h, img_w = imgs.shape
assert img_h % patch_height == 0 and img_w % patch_width == 0
# how many patches are along height/width dimension
h = img_h // patch_height
w = img_w // patch_width
# return as "image"
x = einops.rearrange(imgs, "bs c (h ph) (w pw) -> bs (ph pw c) h w", h=h, ph=patch_height, w=w, pw=patch_width)
return x
def unpatchify(patches, patch_size, img_shape=None):
if patches.ndim == 3:
return unpatchify_from_1d(patches=patches, patch_size=patch_size, img_shape=img_shape)
elif patches.ndim == 4:
if patches.shape[1:] == img_shape:
return patches
return unpatchify_from_2d(patches=patches, patch_size=patch_size)
raise NotImplementedError
def unpatchify_from_1d(patches, patch_size, img_shape=None):
remove_channel_dim = False
assert patches.ndim == 3
patch_height, patch_width = to_2tuple(patch_size)
assert patch_height == patch_width or img_shape is not None
if img_shape is not None:
# derive number of patches along height/width from original image shape
if len(img_shape) == 2:
img_h, img_w = img_shape
remove_channel_dim = True
else:
_, img_h, img_w = img_shape
assert img_h % patch_height == 0 and img_w % patch_width == 0
seqlen_h = img_h // patch_height
seqlen_w = img_w // patch_width
else:
# equal number of patches along height/width
seqlen_h = seqlen_w = int(patches.shape[1] ** .5)
img = einops.rearrange(
patches,
"bs (seqlen_h seqlen_w) (ph pw c) -> bs c (seqlen_h ph) (seqlen_w pw)",
ph=patch_height,
pw=patch_width,
seqlen_h=seqlen_h,
seqlen_w=seqlen_w,
)
if remove_channel_dim:
img = einops.rearrange(img, "bs 1 img_h img_w -> bs img_h img_w")
return img
def unpatchify_from_2d(patches, patch_size):
patch_height, patch_width = to_2tuple(patch_size)
return einops.rearrange(patches, "bs (ph pw c) h w -> bs c (h ph) (w pw)", ph=patch_height, pw=patch_width)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/select_with_path.py | src/utils/select_with_path.py | def select_with_path(obj, path):
if path is not None:
for p in path.split("."):
if isinstance(obj, dict):
obj = obj[p]
elif isinstance(obj, list):
obj = obj[int(p)]
else:
obj = getattr(obj, p)
return obj
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/infer_higher_is_better.py | src/utils/infer_higher_is_better.py | import logging
LOWER_IS_BETTER_KEYS = [
"loss",
"delta",
]
HIGHER_IS_BETTER_KEYS = [
"profiling/train_update_time",
"correlation",
"corerlation_time",
]
NEUTRAL_KEYS = [
"optim",
"profiling",
"mask_ratio",
"freezers",
"transform_scale",
"ctx",
"loss_weight",
"gradient",
"detach",
"confidence",
"train_len",
"test_len",
"degree",
]
def is_neutral_key(metric_key):
for higher_is_better_key in HIGHER_IS_BETTER_KEYS:
if metric_key.startswith(higher_is_better_key):
return False
for lower_is_better_key in LOWER_IS_BETTER_KEYS:
if metric_key.startswith(lower_is_better_key):
return False
for neutral_key in NEUTRAL_KEYS:
if metric_key.startswith(neutral_key):
return True
return False
def higher_is_better_from_metric_key(metric_key):
for higher_is_better_key in HIGHER_IS_BETTER_KEYS:
if metric_key.startswith(higher_is_better_key):
return True
for lower_is_better_key in LOWER_IS_BETTER_KEYS:
if metric_key.startswith(lower_is_better_key):
return False
logging.warning(f"{metric_key} has no defined behavior for higher_is_better -> using True")
return True
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/apply_transform_dataset.py | src/utils/apply_transform_dataset.py | from torch.utils.data import Dataset
class ApplyTransformDataset(Dataset):
"""
helper dataset to apply a transform in parallel fashion to some data
applying transforms via the pytorch DataLoader is much faster than applying it via joblib
(on ImageNet10-M3AE logging embeddings of a ViT-B takes 10:40 with joblib vs 2:40 with pytorch)
"""
def __init__(self, data, transform):
super().__init__()
self.data = data
self.transform = transform
def __getitem__(self, idx):
return self.transform(self.data[idx])
def __len__(self):
return len(self.data)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/formatting_util.py | src/utils/formatting_util.py | import numpy as np
import torch
_SI_PREFIXES = ["", "K", "M", "G", "T", "P", "E"]
def short_number_str(number, precision=1):
if number == 0:
return "{short_number:.{precision}f}".format(short_number=0., precision=precision)
if number < 0:
number = -number
sign = "-"
else:
sign = ""
magnitude = int(np.log10(number) / 3)
short_number = int(number / (1000 ** magnitude / 10 ** precision)) / 10 ** precision
return "{sign}{short_number:.{precision}f}{si_unit}".format(
sign=sign,
short_number=short_number,
precision=precision,
si_unit=_SI_PREFIXES[magnitude],
)
def summarize_indices_list(indices):
""" [0, 1, 2, 3, 6, 7, 8] -> ["0-3", "6-8"] """
if indices is None:
return ["all"]
if len(indices) == 0:
return []
if len(indices) == 1:
return [str(indices[0])]
indices = sorted(indices)
result = []
start_idx = end_idx = indices[0]
for idx in indices[1:]:
if end_idx + 1 == idx:
end_idx = idx
else:
result.append(f"{start_idx}-{end_idx}" if start_idx != end_idx else str(start_idx))
start_idx = end_idx = idx
result.append(f"{start_idx}-{end_idx}" if start_idx != end_idx else str(start_idx))
return result
def list_to_string(tensor):
if torch.is_tensor(tensor):
tensor = tensor.numpy()
if isinstance(tensor, list):
tensor = np.array(tensor)
return np.array2string(tensor, precision=2, separator=", ", floatmode="fixed")
def list_to_str_without_space_and_bracket(value):
return ",".join(str(v) for v in value)
def dict_to_string(obj, item_seperator="-"):
""" {epoch: 5, batch_size: 64} --> epoch=5-batchsize=64 """
assert isinstance(obj, dict)
return item_seperator.join(f"{k}={v}" for k, v in obj.items())
def float_to_scientific_notation(value, max_precision, remove_plus=True):
# to default scientific notation (e.g. '3.20e-06')
float_str = "%.*e" % (max_precision, value)
mantissa, exponent = float_str.split('e')
# enforce precision
mantissa = mantissa[:len("0.") + max_precision]
# remove trailing zeros (and '.' if no zeros remain)
mantissa = mantissa.rstrip("0").rstrip(".")
# remove leading zeros
exponent = f"{exponent[0]}{exponent[1:].lstrip('0')}"
if len(exponent) == 1:
exponent += "0"
if remove_plus and exponent[0] == "+":
exponent = exponent[1:]
return f"{mantissa}e{exponent}"
def seconds_to_duration_str(total_seconds):
tenth_milliseconds = int((total_seconds - int(total_seconds)) * 100)
total_seconds = int(total_seconds)
seconds = total_seconds % 60
minutes = total_seconds % 3600 // 60
hours = total_seconds % 86400 // 3600
days = total_seconds // 86400
if days > 0:
return f"{days}-{hours:02}:{minutes:02}:{seconds:02}.{tenth_milliseconds:02}"
return f"{hours:02}:{minutes:02}:{seconds:02}.{tenth_milliseconds:02}"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/version_check.py | src/utils/version_check.py | import logging
import sys
import packaging.version
expected_torch = "2.0.0"
expected_torchvision = "0.15.0"
expected_kappabenchmark = "0.0.10"
expected_kappaconfig = "1.0.29"
expected_kappadata = "1.3.78"
expected_kappamodules = "0.1.24"
expected_kappaprofiler = "1.0.11"
expected_kappaschedules = "0.0.18"
expected_timm = "0.9.2"
expected_torchmetrics_version = "0.11.0"
expected_python_major = 3
expected_python_minor = 9
def check_versions(verbose):
log_fn = logging.info if verbose else lambda _: None
log_fn("------------------")
log_fn("VERSION CHECK")
# print python environment path
executable_log_fn = logging.info if verbose else print
executable_log_fn(f"executable: {sys.executable}")
# python version >= 3.7 for order preserving dict (https://docs.python.org/3/whatsnew/3.7.html)
py_version = sys.version_info
msg = f"upgrade python ({py_version.major}.{py_version.minor} < {expected_python_major}.{expected_python_minor})"
assert py_version.major >= expected_python_major and py_version.minor >= expected_python_minor, msg
log_fn(f"python version: {py_version.major}.{py_version.minor}.{py_version.micro}")
#
import torch
log_fn(f"torch version: {torch.__version__}")
assert packaging.version.parse(torch.__version__) >= packaging.version.parse(expected_torch)
if verbose and torch.cuda.is_available():
log_fn(f"torch.cuda version: {torch.version.cuda}")
import torchvision
assert packaging.version.parse(torchvision.__version__) >= packaging.version.parse(expected_torchvision)
log_fn(f"torchvision.version: {torchvision.__version__}")
def _check_pip_dependency(actual_version, expected_version, pip_dependency_name):
assert packaging.version.parse(actual_version) >= packaging.version.parse(expected_version), (
f"upgrade {pip_dependency_name} with 'pip install {pip_dependency_name} --upgrade' "
f"({actual_version} < {expected_version})"
)
log_fn(f"{pip_dependency_name} version: {actual_version}")
import kappabenchmark
_check_pip_dependency(kappabenchmark.__version__, expected_kappabenchmark, "kappabenchmark")
import kappaconfig
_check_pip_dependency(kappaconfig.__version__, expected_kappaconfig, "kappaconfig")
import kappadata
_check_pip_dependency(kappadata.__version__, expected_kappadata, "kappadata")
import kappamodules
_check_pip_dependency(kappamodules.__version__, expected_kappamodules, "kappamodules")
import kappaprofiler
_check_pip_dependency(kappaprofiler.__version__, expected_kappaprofiler, "kappaprofiler")
import kappaschedules
_check_pip_dependency(kappaschedules.__version__, expected_kappaschedules, "kappaschedules")
import torchmetrics
_check_pip_dependency(torchmetrics.__version__, expected_torchmetrics_version, "torchmetrics")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/invariance_utils.py | src/utils/invariance_utils.py | import torch
from torch.nn.functional import softmax
# Method to calculate the invariance of the latent space representations
# Eq.6 in https://openreview.net/pdf?id=SCD0hn3kMHw
# features_no_aug.shape = (N,d)
# feature_dict_augs: each key represents an original sample -> number of keys = N
# each key contains an array with features with shape (L,d),
# where L is the number of augmentations
def calc_geed_cosine(features_no_aug, feature_dict_augs):
d = features_no_aug.shape[0]
# Calculate the normalization constant M
features_no_aug = features_no_aug / features_no_aug.norm(dim=1, keepdim=True)
sim = features_no_aug @ features_no_aug.T
m_norm = sim.triu(diagonal=1).sum() * 2 / (d ** 2 - d)
avg_sim_in_aug = []
for key in feature_dict_augs:
features = feature_dict_augs[key]
features = features / features.norm(dim=1, keepdim=True)
avg_feature = features.mean(dim=0)
avg_feature = avg_feature / avg_feature.norm()
avg_sim_in_aug.append((features @ avg_feature).mean().item())
geed_unnorm = sum(avg_sim_in_aug) / len(avg_sim_in_aug)
geed_norm = geed_unnorm / m_norm.item()
return {'geed_normalized': geed_norm,
'geed_unnormalized': geed_unnorm,
'norm': m_norm.item()}
# Method to calculate the effective invariance of a classifier without labels
# Eq.1 in https://arxiv.org/abs/2207.07065
# logits_no_augs.shape = (N,n_classes)
# logits_with_augs.shape = (N,n_classes)
def calc_ei(logits_no_augs, logits_with_augs):
probs_no_augs = softmax(logits_no_augs, dim=1)
probs_with_augs = softmax(logits_with_augs, dim=1)
probs_no_augs_correct_idx = probs_no_augs.argmax(dim=1)
probs_with_augs_correct_idx = probs_with_augs.argmax(dim=1)
mask = probs_no_augs_correct_idx == probs_with_augs_correct_idx
combined_probs = (probs_no_augs * probs_with_augs)
all_ei = torch.sqrt(combined_probs[torch.arange(len(probs_no_augs)), probs_no_augs_correct_idx]) * mask
ei = all_ei.mean().item()
return ei
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/knn_predict.py | src/utils/knn_predict.py | import einops
import torch
import torch.nn.functional as F
@torch.no_grad()
def knn_predict(train_x, train_y, test_x, k=10, tau=0.07, batch_normalize=True, eps=1e-6):
# initialize onehot vector per class (used for counting votes in classification)
n_classes = train_y.max().item() + 1
if n_classes <= 1:
return None
class_onehot = torch.diag(torch.ones(n_classes, device=train_x.device))
# batchnorm features
if batch_normalize:
mean = train_x.mean(dim=0)
std = train_x.std(dim=0) + eps
train_x = (train_x - mean) / std
test_x = (test_x - mean) / std
# normalize to length 1 for cosine distance
train_x = F.normalize(train_x, dim=1)
test_x = F.normalize(test_x, dim=1)
# limit k
k = min(k, len(train_x))
# calculate similarity
similarities = test_x @ train_x.T
topk_similarities, topk_indices = similarities.topk(k=k, dim=1)
flat_topk_indices = einops.rearrange(topk_indices, "n_test knn -> (n_test knn)")
flat_nn_labels = train_y[flat_topk_indices]
flat_nn_onehot = class_onehot[flat_nn_labels]
nn_onehot = einops.rearrange(flat_nn_onehot, "(n_test k) n_classes -> k n_test n_classes", k=k)
topk_similarities = (topk_similarities / tau).exp_()
logits = (nn_onehot * einops.rearrange(topk_similarities, "n_test knn -> knn n_test 1")).sum(dim=0)
knn_classes = logits.argmax(dim=1)
return knn_classes
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/commands/copy_command.py | src/utils/commands/copy_command.py | from pathlib import Path
from .base.command_base import CommandBase
class CopyCommand(CommandBase):
def __init__(self, src: str, dst: str, **kwargs):
super().__init__(**kwargs)
self.src = Path(self._resolve_string(src)).expanduser()
self.dst = Path(self._resolve_string(dst)).expanduser()
def __str__(self):
return f"{type(self).__name__}(src={self.src}, dst={self.dst})"
def execute(self):
self.logger.info(f"copying '{self.src}' to '{self.dst}'")
self.dst.parent.mkdir(exist_ok=True, parents=True)
with open(self.src) as f:
src = f.read()
src = self._resolve_string(src)
with open(self.dst, "w") as f:
f.write(src)
self.logger.info(f"copied '{self.src}' to '{self.dst}'")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/commands/__init__.py | src/utils/commands/__init__.py | from utils.factory import instantiate
def command_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"utils.commands.{kind}"],
type_names=[kind.split(".")[-1]],
**kwargs
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/commands/copy_yaml_command.py | src/utils/commands/copy_yaml_command.py | from pathlib import Path
import yaml
from .base.command_base import CommandBase
class CopyYamlCommand(CommandBase):
def __init__(self, src: str, dst: str, prepend: dict = None, **kwargs):
super().__init__(**kwargs)
self.src = Path(self._resolve_string(src)).expanduser()
self.dst = Path(self._resolve_string(dst)).expanduser()
self.preprend = prepend
def __str__(self):
return f"{type(self).__name__}(src={self.src}, dst={self.dst}, prepend={self.preprend})"
def execute(self):
self.logger.info(f"copying '{self.src}' to '{self.dst}' while prepending ({self.preprend})")
self.dst.parent.mkdir(exist_ok=True, parents=True)
with open(self.src) as f:
src = f.read()
src = self._resolve_string(src)
src = yaml.safe_load(src)
if self.preprend is not None:
assert isinstance(src, dict)
src = {**self.preprend, **src}
with open(self.dst, "w") as f:
yaml.safe_dump(src, f, sort_keys=False)
self.logger.info(f"copied '{self.src}' to '{self.dst}'")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/commands/base/command_base.py | src/utils/commands/base/command_base.py | import logging
class CommandBase:
def __init__(self, stage_id, variables=None):
self.logger = logging.getLogger(type(self).__name__)
self.stage_id = stage_id
self.variables = variables or {}
self.variables["stage_id"] = self.stage_id
def __repr__(self):
return str(self)
def __str__(self):
raise NotImplementedError
def _resolve_string(self, string):
for key, value in self.variables.items():
string = string.replace("{{" + key + "}}", value)
return string
def execute(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/commands/base/__init__.py | src/utils/commands/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/schedule_template_postprocessor.py | src/utils/kappaconfig/schedule_template_postprocessor.py | import kappaconfig as kc
from utils.param_checking import to_2tuple
from .testrun_constants import TEST_RUN_EFFECTIVE_BATCH_SIZE, TEST_RUN_UPDATES_PER_EPOCH
# TODO workaround for missing feature of KappaConfig to enable list objects as template
class ScheduleTemplatePostProcessor(kc.Processor):
"""
resolves nested lists like this:
schedule:
schedule:
- kind: ...
- kind: ...
into this:
schedule:
- kind: ...
- kind: ...
"""
def preorder_process(self, node, trace):
if isinstance(node, dict):
for accessor in list(node.keys()):
subnode = node[accessor]
if isinstance(subnode, dict) and len(subnode) == 1 and accessor in subnode:
node[accessor] = subnode[accessor]
return
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/minmodel_postprocessor.py | src/utils/kappaconfig/minmodel_postprocessor.py | import kappaconfig as kc
class MinModelPostProcessor(kc.Processor):
def preorder_process(self, node, trace):
if len(trace) == 0:
return
if isinstance(node, dict):
if "initializers" in node:
i = 0
while i < len(node["initializers"]):
if node["initializers"][i]["kind"] == "pretrained_initializer":
del node["initializers"][i]
else:
i += 1
if len(node["initializers"]) == 0:
node.pop("initializers")
elif node.get("kind", None) == "offline_fid_callback":
node["model"] = "dummy" | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/mindata_preprocessor.py | src/utils/kappaconfig/mindata_preprocessor.py | import kappaconfig as kc
from kappaconfig.entities.wrappers import KCScalar
class MinDataPreProcessor(kc.Processor):
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(parent_accessor, str):
# datasets (reduce initial loading to a minimum for fast startup)
if parent_accessor == "datasets":
pass
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/util.py | src/utils/kappaconfig/util.py | import logging
import shutil
from pathlib import Path
import kappaconfig as kc
import yaml
from utils.factory import create_collection
from utils.processors import processor_from_kwargs
from .mindata_postprocessor import MinDataPostProcessor
from .mindata_preprocessor import MinDataPreProcessor
from .minduration_postprocessor import MinDurationPostProcessor
from .minmodel_postprocessor import MinModelPostProcessor
from .minmodel_preprocessor import MinModelPreProcessor
from .none_postprocessor import NonePostProcessor
from .precision_preprocessor import PrecisionPreProcessor
from .remove_large_collections_postprocessor import RemoveLargeCollectionsProcessor
from .schedule_template_postprocessor import ScheduleTemplatePostProcessor
def _get_hp_file_uri(hp_file):
file_uri = Path(hp_file).expanduser().with_suffix(".yaml")
assert file_uri.exists(), f"hp_file '{file_uri}' doesn't exist"
return file_uri
def save_unresolved_hp(hp_file, out_file_uri):
file_uri = _get_hp_file_uri(hp_file)
shutil.copy(file_uri, out_file_uri)
logging.info(f"copied unresolved hp to {out_file_uri}")
def save_resolved_hp(stage_hp, out_file_uri):
stage_hp = remove_large_collections(stage_hp)
with open(out_file_uri, "w") as f:
yaml.safe_dump(stage_hp, f, sort_keys=False)
logging.info(f"dumped resolved hp to {out_file_uri}")
def get_stage_hp(
hp_file,
template_path=None,
testrun=False,
minmodelrun=False,
mindatarun=False,
mindurationrun=False,
):
file_uri = _get_hp_file_uri(hp_file)
run_hp = kc.from_file_uri(file_uri)
resolver = kc.DefaultResolver(template_path=template_path)
resolver.pre_processors.append(PrecisionPreProcessor())
resolver.post_processors.append(NonePostProcessor())
resolver.post_processors.append(ScheduleTemplatePostProcessor())
if minmodelrun or testrun:
resolver.pre_processors.append(MinModelPreProcessor())
resolver.post_processors.append(MinModelPostProcessor())
if mindatarun or testrun:
resolver.pre_processors.append(MinDataPreProcessor())
resolver.post_processors.append(MinDataPostProcessor())
if mindurationrun or testrun:
resolver.post_processors.append(MinDurationPostProcessor())
resolved = resolver.resolve(run_hp)
# apply custom processors
if "processors" in resolved:
processors = resolved.pop("processors")
for processor in create_collection(processors, processor_from_kwargs):
processor(resolved)
return resolved
def remove_large_collections(stage_hp):
stage_hp = kc.from_primitive(stage_hp)
resolver = kc.Resolver(post_processors=[RemoveLargeCollectionsProcessor()])
resolved = resolver.resolve(stage_hp)
return resolved
def log_stage_hp(stage_hp):
stage_hp = remove_large_collections(stage_hp)
yaml_str = yaml.safe_dump(stage_hp, sort_keys=False)
# safe_dump appends a trailing newline
logging.info(f"------------------\n{yaml_str[:-1]}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/remove_large_collections_postprocessor.py | src/utils/kappaconfig/remove_large_collections_postprocessor.py | import kappaconfig as kc
class RemoveLargeCollectionsProcessor(kc.Processor):
"""
remove large list/dicts for prettier storing of the resolved yaml
"""
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(node, list) and len(node) > 100:
parent[parent_accessor] = f"list with length {len(node)}"
if isinstance(node, list) and len(node) > 100:
parent[parent_accessor] = f"dict with length {len(node)}"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/minduration_postprocessor.py | src/utils/kappaconfig/minduration_postprocessor.py | import kappaconfig as kc
from .testrun_constants import TEST_RUN_EPOCHS, TEST_RUN_UPDATES, TEST_RUN_SAMPLES, TEST_RUN_EFFECTIVE_BATCH_SIZE
class MinDurationPostProcessor(kc.Processor):
""" limit training duration to a minimum by maniuplating the configuration yaml """
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(parent_accessor, str):
# trainer
if parent_accessor == "log_every_n_epochs":
parent[parent_accessor] = 1
elif parent_accessor == "log_every_n_updates":
parent[parent_accessor] = 1
elif parent_accessor == "log_every_n_samples":
parent[parent_accessor] = min(parent[parent_accessor], TEST_RUN_EFFECTIVE_BATCH_SIZE)
elif parent_accessor == "max_epochs":
parent[parent_accessor] = min(parent[parent_accessor], TEST_RUN_EPOCHS)
elif parent_accessor == "max_updates":
parent[parent_accessor] = min(parent[parent_accessor], TEST_RUN_UPDATES)
elif parent_accessor == "max_samples":
parent[parent_accessor] = min(parent[parent_accessor], TEST_RUN_SAMPLES)
# set loggers
elif parent_accessor == "every_n_epochs":
parent[parent_accessor] = 1
elif parent_accessor == "every_n_updates":
parent[parent_accessor] = 1
elif parent_accessor == "every_n_samples":
parent[parent_accessor] = TEST_RUN_EFFECTIVE_BATCH_SIZE
# initializers
if parent_accessor == "initializer":
if parent[parent_accessor]["kind"] == "previous_stage_initializer":
self._process_checkpoint(parent[parent_accessor], "checkpoint")
# schedules
if "schedule" in parent_accessor:
for schedule in parent[parent_accessor]:
if "start_checkpoint" in schedule:
self._process_checkpoint(schedule, "start_checkpoint")
if "end_checkpoint" in schedule:
self._process_checkpoint(schedule, "end_checkpoint")
@staticmethod
def _process_checkpoint(parent, parent_accessor):
# check if checkpoint is string checkpoint
if not isinstance(parent[parent_accessor], dict):
return
# replace epoch/update/sample checkpoint
if "epoch" in parent[parent_accessor]:
parent[parent_accessor] = dict(epoch=TEST_RUN_EPOCHS)
if "update" in parent[parent_accessor]:
parent[parent_accessor] = dict(update=TEST_RUN_UPDATES)
if "sample" in parent[parent_accessor]:
parent[parent_accessor] = dict(sample=TEST_RUN_SAMPLES)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/none_postprocessor.py | src/utils/kappaconfig/none_postprocessor.py | import kappaconfig as kc
class NonePostProcessor(kc.Processor):
def preorder_process(self, node, trace):
if len(trace) == 0:
return
if isinstance(node, str) and node.lower() == "none":
parent, accessor = trace[-1]
parent[accessor] = None
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/precision_preprocessor.py | src/utils/kappaconfig/precision_preprocessor.py | import kappaconfig as kc
from utils.amp_utils import FLOAT32_ALIASES, FLOAT16_ALIASES, BFLOAT16_ALIASES
class PrecisionPreProcessor(kc.Processor):
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(parent_accessor, str):
if parent_accessor == "precision":
# replace precision
actual = parent[parent_accessor].value
if actual in FLOAT32_ALIASES:
precision = "float32"
elif actual in FLOAT16_ALIASES:
precision = "float16"
elif actual in BFLOAT16_ALIASES:
precision = "bfloat16"
else:
raise NotImplementedError
parent[parent_accessor] = kc.from_primitive(precision)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/testrun_constants.py | src/utils/kappaconfig/testrun_constants.py | TEST_RUN_EFFECTIVE_BATCH_SIZE = 8
TEST_RUN_EPOCHS = 2
TEST_RUN_UPDATES_PER_EPOCH = 3
TEST_RUN_UPDATES = TEST_RUN_EPOCHS * TEST_RUN_UPDATES_PER_EPOCH
TEST_RUN_SAMPLES = TEST_RUN_UPDATES * TEST_RUN_EFFECTIVE_BATCH_SIZE
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/__init__.py | src/utils/kappaconfig/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/mindata_postprocessor.py | src/utils/kappaconfig/mindata_postprocessor.py | import kappaconfig as kc
from utils.param_checking import to_2tuple
from .testrun_constants import TEST_RUN_EFFECTIVE_BATCH_SIZE, TEST_RUN_UPDATES_PER_EPOCH
class MinDataPostProcessor(kc.Processor):
"""
hyperparams for specific properties in the dictionary and replace it such that the training duration is
limited to a minimal configuration
"""
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(parent_accessor, str):
# sampelrs
if parent_accessor == "main_sampler_kwargs":
if "weighted_size" in node:
node["weighted_size"] = TEST_RUN_EFFECTIVE_BATCH_SIZE * TEST_RUN_UPDATES_PER_EPOCH
# datasets
if parent_accessor == "datasets":
for key in node.keys():
if node[key]["kind"] in ["mesh_dataset", "cfd_dataset"]:
node[key]["version"] = "v1-2sims"
node[key].pop("max_num_sequences", None)
wrappers = [
dict(
kind="shuffle_wrapper",
seed=0,
),
dict(
kind="subset_wrapper",
end_index=TEST_RUN_EFFECTIVE_BATCH_SIZE * TEST_RUN_UPDATES_PER_EPOCH + 1,
),
]
if "dataset_wrappers" in node[key]:
node[key]["dataset_wrappers"] += wrappers
else:
assert isinstance(node[key], dict), (
"found non-dict value inside 'datasets' node -> probably wrong template "
"parameter (e.g. template.version instead of template.vars.version)"
)
node[key]["dataset_wrappers"] = wrappers
elif parent_accessor in ["effective_batch_size", "effective_labeled_batch_size"]:
parent[parent_accessor] = min(parent[parent_accessor], TEST_RUN_EFFECTIVE_BATCH_SIZE)
elif parent_accessor == "optim":
# decrease lr scaling (e.g. to avoid errors when max_lr < min_lr when using a min_lr with cosine decay)
parent[parent_accessor]["lr_scaler"] = dict(
kind="linear_lr_scaler",
divisor=TEST_RUN_EFFECTIVE_BATCH_SIZE,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/kappaconfig/minmodel_preprocessor.py | src/utils/kappaconfig/minmodel_preprocessor.py | import kappaconfig as kc
from kappaconfig.entities.wrappers import KCScalar
class MinModelPreProcessor(kc.Processor):
def preorder_process(self, node, trace):
if len(trace) == 0:
return
parent, parent_accessor = trace[-1]
if isinstance(parent_accessor, str):
if "model_key" in parent_accessor:
# replace the value before the first _ with "debug"
# e.g. model_key: small --> model_key: debug
# e.g. model_key: small_uneven --> model_key: debug_uneven
actual = parent[parent_accessor].value
postfixes = actual.split("_")[1:]
new_key = "_".join(["debug"] + postfixes)
parent[parent_accessor] = kc.from_primitive(new_key)
# "${select:<KEY>:${yaml:models/vit}} -> "${select:debug:${yaml:models/vit}}"
if isinstance(node, KCScalar) and isinstance(node.value, str):
if "${select:" in node.value and ":${yaml:models/" in node.value:
split = node.value.split(":")
if len(split) == 4:
node.value = f"{split[0]}:debug:{split[2]}:{split[3]}"
elif len(split) == 6:
node.value = f"{split[0]}:{split[1]}:{split[2]}:debug:{split[4]}:{split[5]}"
else:
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/utils/processors/__init__.py | src/utils/processors/__init__.py | from utils.factory import instantiate
def processor_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"utils.processors.{kind}"],
type_names=[kind],
**kwargs,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/__init__.py | src/callbacks/__init__.py | from utils.factory import instantiate
def callback_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[
f"callbacks.{kind}",
f"callbacks.checkpoint_callbacks.{kind}",
f"callbacks.default_callbacks.{kind}",
f"callbacks.monitor_callbacks.{kind}",
f"callbacks.offline_callbacks.{kind}",
f"callbacks.online_callbacks.{kind}",
f"callbacks.retroactive_callbacks.{kind}",
f"callbacks.visualization.{kind}",
],
type_names=[kind],
**kwargs,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_lagrangian_large_t_rollout_mesh_loss_callback.py | src/callbacks/offline_callbacks/offline_lagrangian_large_t_rollout_mesh_loss_callback.py | from torch_geometric.utils import scatter
from kappadata.wrappers import ModeWrapper
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineLagrangianLargeTRolloutMeshLossCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
self.out = self.path_provider.stage_output_path / "rollout"
# properties that are initialized in before_training
self.__config_id = None
bounds = torch.tensor(self.data_container.get_dataset().metadata['bounds'])
self.box = bounds[:, 1] - bounds[:, 0]
# Get index for all timesteps which are predicted
n_pushforward_timesteps = self.data_container.get_dataset().n_pushforward_timesteps
n_vels_traj = self.data_container.get_dataset().n_seq - 1
large_t = n_pushforward_timesteps + 1
time_indicies = [(i, i+1) for i in range(large_t, n_vels_traj-2, large_t)]
self.time_indicies = torch.tensor([item for sublist in time_indicies for item in sublist])
def _before_training(self, trainer, **kwargs):
self.out.mkdir(exist_ok=True)
dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= dataset.getdim_timestep()
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _forward(self, batch, model, trainer, trainer_model):
# prepare data
batch, ctx = batch
# x is needed to encode the first latent
x = ModeWrapper.get_item(mode=trainer.dataset_mode, item="x", batch=batch)
x = x.to(model.device, non_blocking=True)
# all positions of the sequence are needed for decoding
all_pos = ModeWrapper.get_item(mode=trainer.dataset_mode, item="all_pos", batch=batch)
all_pos = all_pos.to(model.device, non_blocking=True)
# all velocities are needed to compare the predictions
all_vel = ModeWrapper.get_item(mode=trainer.dataset_mode, item="all_vel", batch=batch)
all_vel = all_vel.to(model.device, non_blocking=True)
# get the timestep
if 'const_timestep' in trainer.forward_kwargs and trainer.forward_kwargs['const_timestep']:
timestep = None
else:
timestep = ModeWrapper.get_item(mode=trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
# Flatten input
x = einops.rearrange(
x,
"a num_input_timesteps dim -> a (num_input_timesteps dim)",
)
unbatch_idx = ctx["unbatch_idx"].to(model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(model.device, non_blocking=True)
# rollout
with trainer.autocast_context:
vel_predictions = model.rollout_large_t(
x=x,
all_pos=all_pos,
timestep=timestep,
edge_index=edge_index,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select
)
# Prepare target
all_vel = einops.rearrange(
all_vel,
"bs time n_particles dim -> (bs n_particles) time dim"
)
all_vel_target = all_vel[:,self.time_indicies,:]
dt = self.data_container.get_dataset().metadata["dt"] * self.data_container.get_dataset().metadata["write_every"]
dx = self.data_container.get_dataset().metadata["dx"]
dim = self.data_container.get_dataset().metadata["dim"]
# Unnormalize velocity
all_vel_target = self.data_container.get_dataset().unnormalize_vel(all_vel_target)
all_vel_target = all_vel_target
vel_predictions = self.data_container.get_dataset().unnormalize_vel(vel_predictions)
vel_predictions = vel_predictions
# Unbatch
all_vel_target = einops.rearrange(
all_vel_target,
"(bs n_particles) time dim -> bs n_particles time dim",
bs=len(unbatch_select)
)
vel_predictions = einops.rearrange(
vel_predictions,
"(bs n_particles) time dim -> bs n_particles time dim",
bs=len(unbatch_select)
)
# Calculate ekin like in lagrangebench
ekin_predictions = ((vel_predictions / dt) ** 2).sum(dim=(1,3))
ekin_predictions = ekin_predictions * dx**dim
ekin_target = ((all_vel_target / dt) ** 2).sum(dim=(1,3))
ekin_target = ekin_target * dx**dim
diff_norm = (vel_predictions - all_vel_target).norm(dim=3).mean(dim=(1,2))
relative_norm = ((vel_predictions - all_vel_target).norm(dim=3) / all_vel_target.norm(dim=3)).mean(dim=(1,2))
results_dict = {
"predicted": ekin_predictions.mean(dim=1),
"target": ekin_target.mean(dim=1),
"mse": ((ekin_predictions - ekin_target) ** 2).mean(),
"vel_error": diff_norm,
"vel_error_relative": relative_norm
}
if self.rollout_kwargs['save_rollout']:
rollout_dict = {'ekin_target': ekin_target,
'ekin_predictions': ekin_predictions,
'vel_target': all_vel_target,
'vel_predictions': vel_predictions,
'traj_idx': ctx['traj_idx'],
'time_idx': self.time_indicies}
outpath = self.out / f"rollout_results_{str(self.update_counter.cur_checkpoint).lower()}.pt"
torch.save(rollout_dict, outpath)
return results_dict
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# log deltas
for key in results.keys():
metric_identifier = f"{self.dataset_key}/{key}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}"
self.writer.add_scalar(
key=f"ekin/{metric_identifier}",
value=results[key].mean(),
logger=self.logger,
format_str=".10f",
) | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_rollout_mesh_callback.py | src/callbacks/offline_callbacks/offline_rollout_mesh_callback.py | import matplotlib.pyplot as plt
import os
from torchvision.transforms.functional import to_tensor, to_pil_image
from PIL import Image
import io
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
import scipy
from functools import partial
import einops
import torch
from kappadata.wrappers import ModeWrapper
from kappautils.images.png import png_writer_viridis
from torchvision.datasets.folder import default_loader
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
from kappautils.images.points_to_image import coords_to_image
from utils.param_checking import to_2tuple
class OfflineRolloutMeshCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
use_teacher_forcing=False,
rollout_kwargs=None,
resolution=None,
save_gif=False,
save_pngs=False,
save_plots=False,
visualize_pressure=False,
visualize_velocities=False,
visualize_velocity_magnitude=True,
duration_per_frame=100,
visualization_backend="torch",
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.out = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.use_teacher_forcing = use_teacher_forcing
self.rollout_kwargs = rollout_kwargs or {}
self.resolution = resolution
# what to save (gif and/or png)
self.save_gif = save_gif
self.save_pngs = save_pngs
self.save_plots = save_plots
# what to visualize (pressure and/or seperate velocities and/or velocity magnitude)
self.visualize_pressure = visualize_pressure
self.visualize_velocities = visualize_velocities
self.visualize_velocity_magnitude = visualize_velocity_magnitude
# visualization params
self.duration_per_frame = duration_per_frame
self.visualization_backend = visualization_backend
def _before_training(self, **kwargs):
if os.name == "nt" and "KMP_DUPLICATE_LIB_OK" not in os.environ:
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
self.out = self.path_provider.stage_output_path / "rollout"
self.out.mkdir(exist_ok=True)
(self.out / "gifs").mkdir(exist_ok=True)
if self.save_pngs:
(self.out / "pngs").mkdir(exist_ok=True)
if self.save_plots:
(self.out / "plots").mkdir(exist_ok=True)
self.dataset, collator = self.data_container.get_dataset(key=self.dataset_key, mode=self.dataset_mode)
assert isinstance(collator.collator, CfdSimformerCollator)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
@property
def dataset_mode(self):
return "index pos edge_index x geometry2d velocity"
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=self.dataset_mode)
@staticmethod
def _tensor_to_pil_matplotlib(data, progress, pos):
y, x = pos.cpu().unbind(1)
data = data.cpu()
# convert points (3, num_points) to image (3, height, width)
with io.BytesIO() as buffer:
images = []
for i in range(3):
buffer.seek(0)
plt.scatter(x, y, s=0.01, c=data[i].cpu(), cmap="viridis")
plt.axis("off")
plt.xlim(0, 300)
plt.ylim(0, 200)
plt.savefig(buffer, bbox_inches="tight", format="jpg")
images.append(to_tensor(Image.open(buffer)))
data = torch.concat(images, dim=1).to(pos.device)
# add a progress line on top
progress_tensor = torch.zeros(size=(data.size(0), 1, data.size(2)), dtype=data.dtype, device=data.device)
progress_tensor[:, :, :round(progress * data.size(2))] = 1
data = torch.concat([progress_tensor, data], dim=1)
return to_pil_image(data)
def _tensor_to_pil_torch(self, data, progress, pos):
# convert points (3, num_points) to image (3, height, width)
data = torch.stack([
coords_to_image(
coords=pos,
resolution=self.resolution,
weights=data[i],
)
for i in range(3)
])
# normalize to [0, 1]
# data has shape (3, height, width)
data_min = data.flatten(start_dim=1).min(dim=1).values
data -= data_min.view(-1, 1, 1)
data_max = data.flatten(start_dim=1).max(dim=1).values
data /= data_max.view(-1, 1, 1)
# stack images ontop of each other (ground_truth, prediction, delta)
data = einops.rearrange(data, "three height width -> (three height) width")
# add a progress line on top
progress_tensor = torch.zeros(size=(1, data.size(1),), dtype=data.dtype, device=data.device)
progress_tensor[:, :round(progress * data.size(1))] = 1
data = torch.concat([progress_tensor, data])
# to image
temp_out = self.path_provider.get_temp_path() / f"{self.path_provider.stage_id}.png"
png_writer_viridis(data.unsqueeze(0), temp_out)
pil = default_loader(temp_out)
return pil
def tensor_to_pil(self, data, progress, pos):
if self.visualization_backend == "matplotlib":
return self._tensor_to_pil_matplotlib(data=data, progress=progress, pos=pos)
if self.visualization_backend == "torch":
return self._tensor_to_pil_torch(data=data, progress=progress, pos=pos)
raise NotImplementedError
def visualize(self, idx, trajectories, deltas, pos):
if sum([self.visualize_pressure, self.visualize_velocities, self.visualize_velocity_magnitude]) == 0:
return
if sum([self.save_gif, self.save_pngs]) == 0:
return
assert idx.numel() == 1, "only batchsize=1 is supported for now"
# concatenate prediction with delta
# (2 * batch_size * num_points, num_rollout_timesteps, num_channels) ->
# (3 * batch_size * num_points, num_rollout_timesteps, num_channels)
trajectories = torch.concat([trajectories, deltas])
# generate gifs + images
for i, trajectory in enumerate([trajectories]):
prefix = f"{self.dataset_key}_{self.update_counter.cur_checkpoint}_{self.visualization_backend}"
if len(self.rollout_kwargs) > 0:
prefix = f"{prefix}_{dict_to_string(self.rollout_kwargs, item_seperator='-')}"
if self.use_teacher_forcing:
prefix = f"{prefix}_tforced"
prefix = f"{prefix}_idx{idx[i]:04d}"
data = {}
# what to visualize (pressure, velocity magnitude, seperate velocities)
if self.visualize_pressure:
data["pressure"] = trajectory[:, :, 0]
if self.visualize_velocities:
data["v0"] = trajectory[:, :, 1]
data["v1"] = trajectory[:, :, 2]
if self.visualize_velocity_magnitude:
velocity = trajectory[:, :, 1:]
velocity_magnitude = torch.sqrt(torch.sum(velocity ** 2, dim=2))
data["vmag"] = velocity_magnitude
for name, item in data.items():
# generate images
self.logger.info(f"generating {name} images")
# data has shape (3 * height, width) -> normalize each sub-image seperatle
item = einops.rearrange(
item,
"(three num_points) num_rollout_timesteps -> num_rollout_timesteps three num_points",
three=3,
)
imgs = [
self.tensor_to_pil(
item[j],
progress=j / max(1, (len(item) - 1)),
pos=pos,
)
for j in range(len(item))
]
if self.save_gif:
uri = self.out / "gifs" / f"{name}_{prefix}.gif"
self.logger.info(f"generating {name} gif '{uri.as_posix()}'")
imgs[0].save(
fp=uri,
format="GIF",
append_images=imgs[1:],
save_all=True,
duration=self.duration_per_frame,
loop=0,
)
if self.save_pngs:
self.logger.info(f"storing individual {name} pngs")
for j, img in enumerate(imgs):
img.save(self.out / "pngs" / f"{name}_{prefix}_ts{j:04d}.png")
def _forward(self, batch, model, trainer):
# prepare data
batch, ctx = batch
idx = ModeWrapper.get_item(mode=self.dataset_mode, item="index", batch=batch)
x = ModeWrapper.get_item(mode=self.dataset_mode, item="x", batch=batch)
geometry2d = ModeWrapper.get_item(mode=self.dataset_mode, item="geometry2d", batch=batch)
geometry2d = geometry2d.to(model.device, non_blocking=True)
velocity = ModeWrapper.get_item(mode=self.dataset_mode, item="velocity", batch=batch)
velocity = velocity.to(model.device, non_blocking=True)
pos = ModeWrapper.get_item(mode=self.dataset_mode, item="pos", batch=batch)
pos = pos.to(model.device, non_blocking=True)
padded_pos = ctx["padded_pos"].to(model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
unbatch_idx = ctx["unbatch_idx"].to(model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=self.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(model.device, non_blocking=True)
assert x.ndim == 3, "expected data to be of shape (bs * num_points, num_total_timesteps + 1, num_channels)"
if x.size(1) != self.num_rollout_timesteps + 1:
x = x[:, :self.num_rollout_timesteps + 1]
x = x.to(model.device, non_blocking=True)
# rollout
with trainer.autocast_context:
if self.use_teacher_forcing:
assert self.num_rollout_timesteps + 1 == x.size(1)
predictions = model.rollout_teacher_forced(
x=x,
geometry2d=geometry2d,
velocity=velocity,
pos=pos,
padded_pos=padded_pos,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
edge_index=edge_index,
num_rollout_timesteps=self.num_rollout_timesteps,
**self.rollout_kwargs,
)
else:
predictions = model.rollout(
x0=x[:, 0],
geometry2d=geometry2d,
velocity=velocity,
pos=pos,
padded_pos=padded_pos,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
edge_index=edge_index,
num_rollout_timesteps=self.num_rollout_timesteps,
**self.rollout_kwargs,
)
# ground truth excludes t0
ground_truth = x[:, 1:1 + self.num_rollout_timesteps]
# concatenate prediction with ground truth (along height dimension)
# (batch_size * num_points, num_rollout_timesteps, num_channels) ->
# (2 * batch_size * num_points, num_rollout_timesteps, num_channels)
trajectories = torch.concat([ground_truth, predictions])
# calculate normalized normalized_deltas
# dont calculate to save memory
# normalized_deltas = (ground_truth - predictions).abs()
# free memory
del ground_truth
del predictions
# denormalize (from mean=0 std=1 to original value range)
trajectories = einops.rearrange(
trajectories,
"num_points num_timesteps num_channels -> num_timesteps num_channels num_points",
)
trajectories = self.dataset.denormalize(trajectories, inplace=True)
trajectories = einops.rearrange(
trajectories,
"num_timesteps num_channels num_points -> num_points num_timesteps num_channels",
)
# calculate denormalized delta
denormed_ground_truth, denormed_predictions = trajectories.chunk(2)
denormalized_deltas = (denormed_ground_truth - denormed_predictions).abs()
# calculate movement: i.e. how much changes between timesteps (\hat{x}_t - \hat{x}_{t-1})
# denormed_movement = (denormed_predictions - denormed_predictions.roll(shifts=(-1,), dims=(1,)))[:, :-1]
# denormed_movement = denormed_movement.abs()
# generate visualizations
self.visualize(idx=idx, trajectories=trajectories, deltas=denormalized_deltas, pos=pos)
# calculate deltas ("losses")
results = dict(
# overall_normalized_delta=normalized_deltas.flatten(start_dim=1).mean(dim=-1),
overall_denormalized_delta=denormalized_deltas.flatten(start_dim=1).mean(dim=-1),
# overall_denormalized_movement=denormed_movement.flatten(start_dim=1).mean(dim=1),
)
if self.save_plots:
# results.update(
# delta_per_channel=einops.rearrange(
# normalized_deltas,
# "bs num_rollout_steps num_channels ... -> bs num_channels (num_rollout_steps ...)"
# ).mean(dim=-1),
# delta_per_timestep=einops.rearrange(
# normalized_deltas,
# "bs num_rollout_steps num_channels ... -> bs num_rollout_steps (num_channels ...)"
# ).mean(dim=-1),
# delta_per_channel_per_timestep=einops.rearrange(
# normalized_deltas,
# "bs num_rollout_steps num_channels ... -> bs num_rollout_steps num_channels (...)"
# ).mean(dim=-1),
# )
raise NotImplementedError
return results
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# log deltas
metric_identifier = f"{self.dataset_key}/0to{self.num_rollout_timesteps}"
# file_identifier = f"{metric_identifier.replace('/', '_')}_{self.update_counter.cur_checkpoint}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}/{dict_to_string(self.rollout_kwargs)}"
# file_identifier = f"{file_identifier}_{dict_to_string(self.rollout_kwargs, item_seperator='_')}"
if self.use_teacher_forcing:
metric_identifier = f"{metric_identifier}/tforced"
# file_identifier = f"{file_identifier}_tforced"
# overall
# self.writer.add_scalar(
# key=f"delta/{metric_identifier}/overall/normalized",
# value=results["overall_normalized_delta"].mean(),
# logger=self.logger,
# format_str=".10f",
# )
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/denormalized",
value=results["overall_denormalized_delta"].mean(),
logger=self.logger,
format_str=".10f",
)
# self.writer.add_scalar(
# key=f"movement/{metric_identifier}/overall/denormalized",
# value=results["overall_denormalized_movement"].mean(),
# logger=self.logger,
# format_str=".10f",
# )
# plots
if self.save_plots:
# torch.save(
# results["delta_per_channel"].mean(dim=0),
# self.out / "plots" / f"PerChannel_{file_identifier}.th",
# )
# torch.save(
# results["delta_per_timestep"].mean(dim=0),
# self.out / "plots" / f"PerTimestep_{file_identifier}.th",
# )
# torch.save(
# results["delta_per_channel_per_timestep"].mean(dim=0),
# self.out / "plots" / f"PerChannelPerTimestep_{file_identifier}.th",
# )
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_rollout_speed_callback.py | src/callbacks/offline_callbacks/offline_rollout_speed_callback.py | import kappaprofiler as kp
import einops
from functools import partial
import torch
from kappadata.wrappers import ModeWrapper
from callbacks.base.periodic_callback import PeriodicCallback
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
class OfflineRolloutSpeedCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch)
batch, ctx = batch
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 2, "expected data to be of shape (bs * num_points, input_dim)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# concat input timesteps
_, model_input_dim = model.input_shape
_, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_num_points num_channels -> batch_num_points (num_input_timesteps num_channels)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# calculate something to have synchronization point
return x_hat.mean().item()
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
with kp.Stopwatch() as sw:
self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
self.logger.info(f"rollout took: {sw.elapsed_seconds:.3f} seconds") | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_loss_callback.py | src/callbacks/offline_callbacks/offline_loss_callback.py | import torch
from functools import partial
from callbacks.base.periodic_callback import PeriodicCallback
from utils.object_from_kwargs import objects_from_kwargs
class OfflineLossCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
output_patterns_to_log=None,
forward_kwargs=None,
save_losses=False,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.forward_kwargs = objects_from_kwargs(forward_kwargs)
self.output_patterns_to_log = output_patterns_to_log or []
self.save_losses = save_losses
self.__config_id = None
self.out = None
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
self.out = self.path_provider.stage_output_path / "losses"
self.out.mkdir(exist_ok=True)
def _forward(self, batch, trainer_model, trainer):
with trainer.autocast_context:
losses, outputs = trainer_model(batch=batch, reduction="mean_per_sample", **self.forward_kwargs)
losses = {name: loss.cpu() for name, loss in losses.items()}
outputs_to_log = {}
for key, value in outputs.items():
for pattern in self.output_patterns_to_log:
if pattern in key:
outputs_to_log[key] = value.cpu()
return losses, outputs_to_log
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer_model, trainer, batch_size, data_iter, **_):
losses, outputs = self.iterate_over_dataset(
forward_fn=partial(self._forward, trainer_model=trainer_model, trainer=trainer),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# save loss tensors
if self.save_losses:
for loss_name, loss in losses.items():
fname = f"{self.dataset_key}_{loss_name}_{self.update_counter.cur_checkpoint}.th"
torch.save(loss, self.out / fname)
# log losses
for loss_name, loss in losses.items():
assert loss.ndim == 1, "loss has to be calculated sample-wise to avoid errors through batch"
# log loss
mean_loss = loss.mean()
self.writer.add_scalar(
key=f"loss/{self.dataset_key}/{loss_name}",
value=mean_loss,
logger=self.logger,
format_str=".7f",
)
# log difference to train loss
train_loss = self.writer.log_cache.get(f"loss/online/{loss_name}/{self.to_short_interval_string()}", None)
if train_loss is not None:
self.writer.add_scalar(
key=f"lossdiff/{self.dataset_key}/{loss_name}",
value=mean_loss - train_loss,
logger=self.logger,
format_str=".7f",
)
# log outputs
for name, output in outputs.items():
assert output.ndim == 1, f"output has to be calculated sample-wise (name={name} shape={output.shape})"
self.writer.add_scalar(
key=f"{name}/{self.dataset_key}",
value=output.float().mean(),
logger=self.logger,
format_str=".7f",
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_rollout_mesh_gif_callback.py | src/callbacks/offline_callbacks/offline_rollout_mesh_gif_callback.py | import matplotlib.pyplot as plt
import os
from torchvision.transforms.functional import to_tensor, to_pil_image
from PIL import Image
import io
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
import scipy
from functools import partial
import einops
import torch
from kappadata.wrappers import ModeWrapper
from kappautils.images.png import png_writer_viridis
from torchvision.datasets.folder import default_loader
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
from kappautils.images.points_to_image import coords_to_image
from utils.param_checking import to_2tuple
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
from kappadata.wrappers import ModeWrapper
class OfflineRolloutMeshGifCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
resolution,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(batch_size=1, **kwargs)
self.dataset_key = dataset_key
self.resolution = resolution
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
# properties that are initialized in before_training
self.__config_id = None
self.dataset_mode = None
self.dataset = None
self.out = None
def _register_sampler_configs(self, trainer):
self.dataset_mode = ModeWrapper.add_item(mode=trainer.dataset_mode, item="index")
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=self.dataset_mode)
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=self.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.out = self.path_provider.stage_output_path / "rollout"
self.out.mkdir(exist_ok=True)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _tensor_to_pil_torch(self, data, progress, pos):
# convert points (3, num_points) to image (3, height, width)
data = torch.stack([
coords_to_image(
coords=pos,
resolution=self.resolution,
weights=data[i],
)
for i in range(3)
])
# normalize to [0, 1]
# data has shape (3, height, width)
data_min = data.flatten(start_dim=1).min(dim=1).values
data -= data_min.view(-1, 1, 1)
data_max = data.flatten(start_dim=1).max(dim=1).values
data /= data_max.view(-1, 1, 1)
# stack images ontop of each other (ground_truth, prediction, delta)
data = einops.rearrange(data, "three height width -> (three height) width")
# add a progress line on top
progress_tensor = torch.zeros(size=(1, data.size(1),), dtype=data.dtype, device=data.device)
progress_tensor[:, :round(progress * data.size(1))] = 1
data = torch.concat([progress_tensor, data])
# to image
with io.BytesIO() as buffer:
png_writer_viridis(data.unsqueeze(0), buffer, save_format="png")
buffer.seek(0)
img = Image.open(buffer)
pil = img.convert("RGB")
return pil
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch, dataset_mode=self.dataset_mode, mode="rollout")
batch, ctx = batch
idx = ModeWrapper.get_item(mode=self.dataset_mode, item="index", batch=batch)
assert "target" not in data
x = data.pop("x")
assert x.ndim == 3, "expected data to be of shape (bs * num_points, num_total_timesteps + 1, input_dim)"
# cut away excess timesteps
if x.size(1) != self.num_rollout_timesteps + 1:
x = x[:, :self.num_rollout_timesteps + 1]
# concat input timesteps
model_input_dim, _ = model.input_shape
_, _, x_input_dim = x.shape
assert model_input_dim % x_input_dim == 0
num_input_timesteps = model_input_dim // x_input_dim
x0 = einops.repeat(
x[:, 0],
"batch_num_points num_channels ... -> batch_num_points (num_input_timesteps num_channels) ...",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
predictions = model.rollout(
x0=x0,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# ground truth excludes t0
ground_truth = x[:, 1:1 + self.num_rollout_timesteps]
# concatenate prediction with ground truth (along height dimension)
# (batch_size * num_points, num_rollout_timesteps, num_channels) ->
# (2 * batch_size * num_points, num_rollout_timesteps, num_channels)
trajectories = torch.concat([ground_truth, predictions])
# free memory
del ground_truth
del predictions
# denormalize (from mean=0 std=1 to original value range)
trajectories = self.dataset.denormalize(trajectories, inplace=True, dim=2)
# calculate denormalized delta
denormed_ground_truth, denormed_predictions = trajectories.chunk(2)
denormalized_deltas = (denormed_ground_truth - denormed_predictions).abs()
# concatenate prediction with delta
# (2 * num_points, num_rollout_timesteps, num_channels) ->
# (3 * num_points, num_rollout_timesteps, num_channels)
trajectories = torch.concat([trajectories, denormalized_deltas])
# get positions of points
if "mesh_pos" in data:
pos = data["mesh_pos"]
elif "pos" in data:
pos = data["pos"]
else:
raise NotImplementedError
# generate gifs
for i, trajectory in enumerate([trajectories]):
prefix = f"{self.dataset_key}_{self.update_counter.cur_checkpoint}"
if len(self.rollout_kwargs) > 0:
prefix = f"{prefix}_{dict_to_string(self.rollout_kwargs, item_seperator='-')}"
prefix = f"{prefix}_idx{idx[i]:04d}"
# calculate velocity magnitude
velocity = trajectory[:, :, 1:]
velocity_magnitude = torch.sqrt(torch.sum(velocity ** 2, dim=2))
# generate images
self.logger.info(f"generating vmag images")
# data has shape (3 * height, width) -> normalize each sub-image seperatle
velocity_magnitude = einops.rearrange(
velocity_magnitude,
"(three num_points) num_rollout_timesteps -> num_rollout_timesteps three num_points",
three=3,
)
imgs = [
self._tensor_to_pil_torch(
velocity_magnitude[j],
progress= j / max(1, (len(velocity_magnitude) - 1)),
pos=pos,
)
for j in range(len(velocity_magnitude))
]
uri = self.out / f"vmag_{prefix}.gif"
self.logger.info(f"generating vmag gif '{uri.as_posix()}'")
imgs[0].save(
fp=uri,
format="GIF",
append_images=imgs[1:],
save_all=True,
duration=100,
loop=0,
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_correlation_time_interpolated_callback.py | src/callbacks/offline_callbacks/offline_correlation_time_interpolated_callback.py | import einops
from functools import partial
import torch
from kappadata.wrappers import ModeWrapper
from callbacks.base.periodic_callback import PeriodicCallback
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
class OfflineCorrelationTimeInterpolatedCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch)
batch, ctx = batch
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 4, "expected data to be of shape (bs, dim, height, width)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# cut away excess timesteps
if target.size(2) != self.num_rollout_timesteps:
target = target[:, :, :self.num_rollout_timesteps]
# concat input timesteps
_, model_input_dim = model.input_shape
_, _, _, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_size height width dim -> batch_size height width (num_input_timesteps dim)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# mesh data is in sparse format -> iterate over samples in batch
start = 0
mean_corrs_per_timestep = []
num_query_pos = data["query_pos"].size(1)
for _ in range(len(x)):
# select all points of current sample
end = start + num_query_pos
cur_preds = x_hat[start:end]
cur_target = target[start:end]
# calculate correlation time
# https://github.com/microsoft/pdearena/blob/main/pdearena/modules/loss.py#L39
cur_preds_mean = torch.mean(cur_preds, dim=2, keepdim=True)
cur_target_mean = torch.mean(cur_target, dim=2, keepdim=True)
cur_preds_std = torch.std(cur_preds, dim=2, unbiased=False)
cur_target_std = torch.std(cur_target, dim=2, unbiased=False)
# calculate mean correlation per timestep
mean_corr_per_timestep = (
torch.mean((cur_preds - cur_preds_mean) * (cur_target - cur_target_mean), dim=2)
/ (cur_preds_std * cur_target_std).clamp(min=1e-12)
).mean(dim=0)
mean_corrs_per_timestep.append(mean_corr_per_timestep)
start = end
mean_corrs_per_timestep = torch.stack(mean_corrs_per_timestep)
return mean_corrs_per_timestep
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
mean_corrs_per_timestep = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# average correlation over all timesteps
self.writer.add_scalar(
key=f"correlation/{self.dataset_key}",
value=mean_corrs_per_timestep.mean(),
logger=self.logger,
format_str=".4f",
)
# timestep until correlation is above a threshold
for thresh in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
# get timestep where correlation is < thresh
min_values, min_indices = (mean_corrs_per_timestep >= thresh).min(dim=1)
# if correlation is >= thresh all the time min_indices is 0 -> set to num_rollout_timesteps
min_indices[min_values] = self.num_rollout_timesteps
mean_corr_time = min_indices.float().mean()
self.writer.add_scalar(
key=f"correlation_time/thresh{str(thresh).replace('.', '')}/{self.dataset_key}",
value=mean_corr_time,
logger=self.logger,
format_str=".4f",
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_lagrangian_large_t_rollout_speed_callback.py | src/callbacks/offline_callbacks/offline_lagrangian_large_t_rollout_speed_callback.py | from torch_geometric.utils import scatter
import kappaprofiler as kp
from kappadata.wrappers import ModeWrapper
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineLagrangianLargeTRolloutSpeedCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
self.out = self.path_provider.stage_output_path / "rollout"
# properties that are initialized in before_training
self.__config_id = None
bounds = torch.tensor(self.data_container.get_dataset().metadata['bounds'])
self.box = bounds[:, 1] - bounds[:, 0]
def _before_training(self, trainer, **kwargs):
self.out.mkdir(exist_ok=True)
dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= dataset.getdim_timestep()
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _forward(self, batch, model, trainer, trainer_model):
# prepare data
batch, ctx = batch
# x is needed to encode the first latent
x = ModeWrapper.get_item(mode=trainer.dataset_mode, item="x", batch=batch)
x = x.to(model.device, non_blocking=True)
# all positions of the sequence are needed for decoding
all_pos = ModeWrapper.get_item(mode=trainer.dataset_mode, item="all_pos", batch=batch)
all_pos = all_pos.to(model.device, non_blocking=True)
# all velocities are needed to compare the predictions
all_vel = ModeWrapper.get_item(mode=trainer.dataset_mode, item="all_vel", batch=batch)
all_vel = all_vel.to(model.device, non_blocking=True)
# get the timestep
if 'const_timestep' in trainer.forward_kwargs and trainer.forward_kwargs['const_timestep']:
timestep = None
else:
timestep = ModeWrapper.get_item(mode=trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
# inputs are the velocities of all timesteps
x = einops.rearrange(
x,
"a num_input_timesteps dim -> a (num_input_timesteps dim)",
)
unbatch_idx = ctx["unbatch_idx"].to(model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(model.device, non_blocking=True)
# rollout
with trainer.autocast_context:
vel_pred = model.rollout_large_t_timing(
x=x,
all_pos=all_pos,
timestep=timestep,
edge_index=edge_index,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select
)
return vel_pred.mean().item()
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
with kp.Stopwatch() as sw:
self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
self.logger.info(f"rollout took: {sw.elapsed_seconds:.3f} seconds") | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_cfd_rollout_callback.py | src/callbacks/offline_callbacks/offline_cfd_rollout_callback.py | import einops
from functools import partial
import torch
from kappadata.wrappers import ModeWrapper
from callbacks.base.periodic_callback import PeriodicCallback
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
class OfflineCfdRolloutCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
self.out = self.path_provider.stage_output_path / "rollout"
self.counter = 0
self.dataset_mode = None
def _register_sampler_configs(self, trainer):
self.dataset_mode = f"{trainer.dataset_mode} target_t0"
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=self.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.out.mkdir(exist_ok=True)
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch)
batch, ctx = batch
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 2, "expected data to be of shape (bs * num_points, input_dim)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# load t0
target_t0 = ModeWrapper.get_item(mode=self.dataset_mode, item="target_t0", batch=batch)
# cut away excess timesteps
if target.size(2) != self.num_rollout_timesteps:
target = target[:, :, :self.num_rollout_timesteps]
# concat input timesteps
_, model_input_dim = model.input_shape
_, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_num_points num_channels -> batch_num_points (num_input_timesteps num_channels)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# mesh data is in sparse format -> iterate over samples in batch
start = 0
if "batch_idx" in data:
batch_idx = data["batch_idx"]
else:
batch_idx = ctx["batch_idx"].to(model.device)
batch_size = batch_idx.unique().numel()
for i in range(batch_size):
# select all points of current sample
end = start + (batch_idx == i).sum()
cur_preds = x_hat[start:end]
cur_target = target[start:end]
cur_mesh_pos = data["mesh_pos"][start:end]
cur_target_t0 = target_t0[i, start:end]
torch.save(cur_preds.half().clone(), self.out / f"{self.counter:04d}_rollout.th")
torch.save(cur_target.half().clone(), self.out / f"{self.counter:04d}_target.th")
torch.save(cur_mesh_pos.half().clone(), self.out / f"{self.counter:04d}_meshpos.th")
torch.save(cur_target_t0, self.out / f"{self.counter:04d}_t0.th")
self.counter += 1
start = end
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
self.counter = 0
self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_cfd_rollout_mesh_gif_callback.py | src/callbacks/offline_callbacks/offline_cfd_rollout_mesh_gif_callback.py | import io
from functools import partial
import einops
import torch
from PIL import Image
from kappadata.wrappers import ModeWrapper
from kappautils.images.png import png_writer_viridis
from kappautils.images.points_to_image import coords_to_image
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineCfdRolloutMeshGifCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
resolution,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.resolution = resolution
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
self.out = self.path_provider.stage_output_path / "rollout"
# properties that are initialized in before_training
self.dataset = None
self.dataset_mode = None
self.__config_id = None
def _register_sampler_configs(self, trainer):
self.dataset_mode = ModeWrapper.add_item(mode=trainer.dataset_mode, item="index")
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=self.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.out.mkdir(exist_ok=True)
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=self.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _tensor_to_pil_torch(self, data, progress, pos):
# convert points (3, num_points) to image (3, height, width)
data = torch.stack([
coords_to_image(
coords=pos,
resolution=self.resolution,
weights=data[i],
)
for i in range(3)
])
# normalize to [0, 1]
# data has shape (3, height, width)
data_min = data.flatten(start_dim=1).min(dim=1).values
data -= data_min.view(-1, 1, 1)
data_max = data.flatten(start_dim=1).max(dim=1).values
data /= data_max.view(-1, 1, 1)
# stack images ontop of each other (ground_truth, prediction, delta)
data = einops.rearrange(data, "three height width -> (three height) width")
# add a progress line on top
progress_tensor = torch.zeros(size=(1, data.size(1),), dtype=data.dtype, device=data.device)
progress_tensor[:, :round(progress * data.size(1))] = 1
data = torch.concat([progress_tensor, data])
# to image
with io.BytesIO() as buffer:
png_writer_viridis(data.unsqueeze(0), buffer, save_format="png")
buffer.seek(0)
img = Image.open(buffer)
pil = img.convert("RGB")
return pil
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch, dataset_mode=self.dataset_mode)
batch, _ = batch
index = ModeWrapper.get_item(mode=self.dataset_mode, item="index", batch=batch)
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 2, "expected data to be of shape (bs * num_points, input_dim)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# cut away excess timesteps
if target.size(2) != self.num_rollout_timesteps:
target = target[:, :, :self.num_rollout_timesteps]
# concat input timesteps
_, model_input_dim = model.input_shape
_, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_num_points num_channels -> batch_num_points (num_input_timesteps num_channels)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# denormalize shape=(total_num_points, dim, num_rollout_timesteps)
x_hat = self.dataset.denormalize(x_hat, inplace=True)
target = self.dataset.denormalize(target, inplace=True)
# calculate velocity magnitude
# (total_num_points, dim, num_rollout_timesteps) -> (total_num_points, num_rollout_timesteps)
x_hat = x_hat[:, 1:].norm(dim=1)
target = target[:, 1:].norm(dim=1)
# delta shape=(total_num_points, num_rollout_timesteps)
delta = (x_hat - target).abs()
# generate gifs
for i in range(len(index)):
# setup prefix
prefix = f"{self.dataset_key}_{self.update_counter.cur_checkpoint}"
if len(self.rollout_kwargs) > 0:
prefix = f"{prefix}_{dict_to_string(self.rollout_kwargs, item_seperator='-')}"
prefix = f"{prefix}_idx{index[i]:04d}"
# generate images
imgs = []
for t in range(self.num_rollout_timesteps):
img = self._tensor_to_pil_torch(
data=torch.stack([target[:, t], x_hat[:, t], delta[:, t]]),
progress=t / max(1, (self.num_rollout_timesteps - 1)),
pos=data["mesh_pos"],
)
imgs.append(img)
uri = self.out / f"vmag_{prefix}.gif"
imgs[0].save(
fp=uri,
format="GIF",
append_images=imgs[1:],
save_all=True,
duration=100,
loop=0,
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
self.logger.info(f"out: {self.out.as_posix()}")
self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_pred_callback.py | src/callbacks/offline_callbacks/offline_pred_callback.py | import torch
from functools import partial
from callbacks.base.periodic_callback import PeriodicCallback
from utils.object_from_kwargs import objects_from_kwargs
class OfflinePredCallback(PeriodicCallback):
def __init__(self, dataset_key, forward_kwargs=None, **kwargs):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.forward_kwargs = objects_from_kwargs(forward_kwargs)
self.__config_id = None
self.out = None
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
self.out = self.path_provider.stage_output_path / "pred"
self.out.mkdir(exist_ok=True)
@staticmethod
def _forward(batch, trainer_model, trainer, model):
data = trainer_model.prepare(batch)
target = data.pop("target")
with trainer.autocast_context:
outputs = model(**data)
return outputs["x_hat"], target.clone()
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer_model, trainer, batch_size, data_iter, **_):
x_hat, target = self.iterate_over_dataset(
forward_fn=partial(self._forward, trainer_model=trainer_model, trainer=trainer, model=model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
x_hat_uri = self.out / f"pred_{self.dataset_key}_{self.update_counter.cur_checkpoint}.th"
self.logger.info(f"saving predictions to: {x_hat_uri.as_posix()}")
torch.save(x_hat, x_hat_uri)
torch.save(target, self.out / f"target_{self.dataset_key}_{self.update_counter.cur_checkpoint}.th")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_correlation_time_callback.py | src/callbacks/offline_callbacks/offline_correlation_time_callback.py | import einops
from functools import partial
import torch
from kappadata.wrappers import ModeWrapper
from callbacks.base.periodic_callback import PeriodicCallback
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
class OfflineCorrelationTimeCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _before_training(self, trainer, **kwargs):
self.dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch)
batch, ctx = batch
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 2, "expected data to be of shape (bs * num_points, input_dim)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# cut away excess timesteps
if target.size(2) != self.num_rollout_timesteps:
target = target[:, :, :self.num_rollout_timesteps]
# concat input timesteps
_, model_input_dim = model.input_shape
_, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_num_points num_channels -> batch_num_points (num_input_timesteps num_channels)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# mesh data is in sparse format -> iterate over samples in batch
start = 0
mean_corrs_per_timestep = []
if "batch_idx" in data:
batch_idx = data["batch_idx"]
else:
batch_idx = ctx["batch_idx"].to(model.device)
batch_size = batch_idx.unique().numel()
for i in range(batch_size):
# select all points of current sample
end = start + (batch_idx == i).sum()
cur_preds = x_hat[start:end]
cur_target = target[start:end]
# calculate correlation time
# https://github.com/microsoft/pdearena/blob/main/pdearena/modules/loss.py#L39
cur_preds_mean = torch.mean(cur_preds, dim=1, keepdim=True)
cur_target_mean = torch.mean(cur_target, dim=1, keepdim=True)
cur_preds_std = torch.std(cur_preds, dim=1, unbiased=False)
cur_target_std = torch.std(cur_target, dim=1, unbiased=False)
# calculate mean correlation per timestep
mean_corr_per_timestep = (
torch.mean((cur_preds - cur_preds_mean) * (cur_target - cur_target_mean), dim=1)
/ (cur_preds_std * cur_target_std).clamp(min=1e-12)
).mean(dim=0)
mean_corrs_per_timestep.append(mean_corr_per_timestep)
start = end
mean_corrs_per_timestep = torch.stack(mean_corrs_per_timestep)
assert mean_corrs_per_timestep.shape == (batch_size, self.num_rollout_timesteps)
return mean_corrs_per_timestep
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
mean_corrs_per_timestep = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
rollout_kwargs_str = ""
if len(self.rollout_kwargs) > 0:
if self.rollout_kwargs.get("mode", None) == "latent":
rollout_kwargs_str = f"/latent"
# average correlation over all timesteps
self.writer.add_scalar(
key=f"correlation/{self.dataset_key}{rollout_kwargs_str}",
value=mean_corrs_per_timestep.mean(),
logger=self.logger,
format_str=".4f",
)
# timestep until correlation is above a threshold
for thresh in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
# get timestep where correlation is < thresh
min_values, min_indices = (mean_corrs_per_timestep >= thresh).min(dim=1)
# if correlation is >= thresh all the time min_indices is 0 -> set to num_rollout_timesteps
min_indices[min_values] = self.num_rollout_timesteps
mean_corr_time = min_indices.float().mean()
self.writer.add_scalar(
key=f"correlation_time/thresh{str(thresh).replace('.', '')}/{self.dataset_key}{rollout_kwargs_str}",
value=mean_corr_time,
logger=self.logger,
format_str=".4f",
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/__init__.py | src/callbacks/offline_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_rollout2d_callback.py | src/callbacks/offline_callbacks/offline_rollout2d_callback.py | from functools import partial
import einops
import torch
from kappadata.wrappers import ModeWrapper
from kappautils.images.png import png_writer_viridis
from torchvision.datasets.folder import default_loader
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineRollout2dCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
use_teacher_forcing=False,
rollout_kwargs=None,
save_gif=True,
save_pngs=False,
save_plots=False,
visualize_pressure=False,
visualize_velocities=False,
visualize_velocity_magnitude=True,
duration_per_frame=100,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
# properties that are initialized in before_training
self.__config_id = None
self.out = None
self.dataset = None
self.num_rollout_timesteps = num_rollout_timesteps
self.use_teacher_forcing = use_teacher_forcing
self.rollout_kwargs = rollout_kwargs or {}
# what to save (gif and/or png)
self.save_gif = save_gif
self.save_pngs = save_pngs
self.save_plots = save_plots
# what to visualize (pressure and/or seperate velocities and/or velocity magnitude)
self.visualize_pressure = visualize_pressure
self.visualize_velocities = visualize_velocities
self.visualize_velocity_magnitude = visualize_velocity_magnitude
# visualization params
self.duration_per_frame = duration_per_frame
def _before_training(self, **kwargs):
self.out = self.path_provider.stage_output_path / "rollout"
self.out.mkdir(exist_ok=True)
(self.out / "gifs").mkdir(exist_ok=True)
if self.save_pngs:
(self.out / "pngs").mkdir(exist_ok=True)
if self.save_plots:
(self.out / "plots").mkdir(exist_ok=True)
self.dataset, collator = self.data_container.get_dataset(key=self.dataset_key, mode=self.dataset_mode)
assert collator is None
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = self.dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= self.dataset.getdim_timestep()
@property
def dataset_mode(self):
return "index x geometry2d velocity"
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=self.dataset_mode)
def tensor_to_pil(self, data, progress, lower_bound, upper_bound):
# normalize to [0, 1]
# data has shape (3 * height, width)
# - data[0:2]: ground truth + prediction -> normalize with lower_bound from ground_truth
# - data[2]: error -> normalize with min/max of error
error_min = data[2].min()
error_max = data[2].max()
data -= torch.tensor([lower_bound, lower_bound, error_min], device=data.device).view(-1, 1, 1)
data /= torch.tensor([upper_bound, upper_bound, error_max], device=data.device).view(-1, 1, 1)
data = einops.rearrange(data, "three height width -> (three height) width")
# add a progress line on top
progress_tensor = torch.zeros(size=(1, data.size(1),), dtype=data.dtype, device=data.device)
progress_tensor[:, :round(progress * data.size(1))] = 1
data = torch.concat([progress_tensor, data])
# to image
temp_out = self.path_provider.get_temp_path() / f"{self.path_provider.stage_id}.png"
png_writer_viridis(data.unsqueeze(0), temp_out)
pil = default_loader(temp_out)
return pil
def visualize(self, idx, trajectories, deltas):
if sum([self.visualize_pressure, self.visualize_velocities, self.visualize_velocity_magnitude]) == 0:
return
if sum([self.save_gif, self.save_pngs]) == 0:
return
# concatenate prediction with delta (along channel dimension)
# (num_rollout_timesteps, bs, num_channels, 2 * height, width) ->
# (num_rollout_timesteps, bs, num_channels, 3 * height, width)
trajectories = torch.concat([trajectories, deltas], dim=3)
# generate gifs + images
for i, trajectory in enumerate(trajectories):
prefix = f"{self.dataset_key}_{self.update_counter.cur_checkpoint}"
if len(self.rollout_kwargs) > 0:
prefix = f"{prefix}_{dict_to_string(self.rollout_kwargs, item_seperator='-')}"
if self.use_teacher_forcing:
prefix = f"{prefix}_tforced"
prefix = f"{prefix}_idx{idx[i]:04d}"
data = {}
# what to visualize (pressure, velocity magnitude, seperate velocities)
if self.visualize_pressure:
data["pressure"] = trajectory[:, 0]
if self.visualize_velocities:
data["v0"] = trajectory[:, 1]
data["v1"] = trajectory[:, 2]
if self.visualize_velocity_magnitude:
velocity = trajectory[:, 1:]
velocity_magnitude = torch.sqrt(torch.sum(velocity ** 2, dim=1))
data["vmag"] = velocity_magnitude
for name, item in data.items():
# generate images
self.logger.info(f"generating {name} images")
# data has shape (3 * height, width) -> normalize each sub-image seperatle
item = einops.rearrange(
item,
"num_rollout_timesteps (three height) width -> num_rollout_timesteps three height width",
three=3,
)
# calculate upper/lower bounds for visualization from ground truth
if name == "vmag":
lb = item[:, 0].min()
ub = item[:, 0].max()
else:
lb = item[:, 0].min()
ub = item[:, 0].max()
# cant do this (RuntimeError: quantile() input tensor is too large)
# lb = item[:, 0].quantile(q=0.05)
# ub = item[:, 0].quantile(q=0.95)
imgs = [
self.tensor_to_pil(
item[i],
progress=i / max(1, (len(item) - 1)),
lower_bound=lb,
upper_bound=ub,
)
for i in range(len(item))
]
if self.save_gif:
uri = self.out / "gifs" / f"{name}_{prefix}.gif"
self.logger.info(f"generating {name} gif '{uri.as_posix()}'")
imgs[0].save(
fp=uri,
format="GIF",
append_images=imgs[1:],
save_all=True,
duration=self.duration_per_frame,
loop=0,
)
if self.save_pngs:
self.logger.info(f"storing individual {name} pngs")
for j, img in enumerate(imgs):
img.save(self.out / "pngs" / f"{name}_{prefix}_ts{j:04d}.png")
def _forward(self, batch, model, trainer):
# prepare data
batch, ctx = batch
idx = ModeWrapper.get_item(mode=self.dataset_mode, item="index", batch=batch)
x = ModeWrapper.get_item(mode=self.dataset_mode, item="x", batch=batch)
geometry2d = ModeWrapper.get_item(mode=self.dataset_mode, item="geometry2d", batch=batch)
geometry2d = geometry2d.to(model.device, non_blocking=True)
velocity = ModeWrapper.get_item(mode=self.dataset_mode, item="velocity", batch=batch)
velocity = velocity.to(model.device, non_blocking=True)
assert x.ndim == 5 and x.size(1) == self.num_rollout_timesteps + 1, \
"expected data to be of shape (bs, num_total_timesteps + 1, num_channels, height, width)"
x = x.to(model.device, non_blocking=True)
# rollout
with trainer.autocast_context:
if self.use_teacher_forcing:
assert self.num_rollout_timesteps + 1 == x.size(1)
predictions = model.rollout_teacher_forced(
x=x,
geometry2d=geometry2d,
velocity=velocity,
**self.rollout_kwargs,
)
else:
predictions = model.rollout(
x0=x[:, 0],
geometry2d=geometry2d,
velocity=velocity,
num_rollout_timesteps=self.num_rollout_timesteps,
**self.rollout_kwargs,
)
# ground truth excludes t0
ground_truth = x[:, 1:1 + self.num_rollout_timesteps]
# concatenate prediction with ground truth (along height dimension)
# (num_rollout_timesteps, bs, num_channels, height, width) ->
# (num_rollout_timesteps, bs, num_channels, 2 * height, width)
trajectories = torch.concat([ground_truth, predictions], dim=3)
# calculate normalized normalized_deltas
normalized_deltas = (ground_truth - predictions).abs()
# denormalize (from mean=0 std=1 to original value range)
trajectories = self.dataset.denormalize(trajectories)
# calculate denormalized delta
denormed_ground_truth, denormed_predictions = trajectories.chunk(2, dim=3)
denormalized_deltas = (denormed_ground_truth - denormed_predictions).abs()
# calculate movement: i.e. how much changes between timesteps (\hat{x}_t - \hat{x}_{t-1})
denormed_movement = (denormed_predictions - denormed_predictions.roll(shifts=(-1,), dims=(1,)))[:, :-1]
denormed_movement = denormed_movement.abs()
# generate visualizations
self.visualize(idx=idx, trajectories=trajectories, deltas=denormalized_deltas)
# calculate deltas ("losses")
mask = einops.rearrange(1 - geometry2d, "bs ... -> bs 1 1 ...")
results = dict(
overall_normalized_delta=normalized_deltas.flatten(start_dim=1).mean(dim=-1),
overall_normalized_delta_masked=(normalized_deltas * mask).flatten(start_dim=1).mean(dim=-1),
overall_denormalized_delta=denormalized_deltas.flatten(start_dim=1).mean(dim=-1),
overall_denormalized_delta_masked=(denormalized_deltas * mask).flatten(start_dim=1).mean(dim=-1),
overall_denormalized_movement=denormed_movement.flatten(start_dim=1).mean(dim=1),
overall_denormalized_movement_masked=(denormed_movement * mask).flatten(start_dim=1).mean(dim=1),
)
if self.save_plots:
results.update(
delta_per_channel=einops.rearrange(
normalized_deltas,
"bs num_rollout_steps num_channels ... -> bs num_channels (num_rollout_steps ...)"
).mean(dim=-1),
delta_per_timestep=einops.rearrange(
normalized_deltas,
"bs num_rollout_steps num_channels ... -> bs num_rollout_steps (num_channels ...)"
).mean(dim=-1),
delta_per_channel_per_timestep=einops.rearrange(
normalized_deltas,
"bs num_rollout_steps num_channels ... -> bs num_rollout_steps num_channels (...)"
).mean(dim=-1),
delta_per_location=einops.rearrange(
normalized_deltas,
"bs num_rollout_steps num_channels ... -> bs ... (num_rollout_steps num_channels)"
).mean(dim=-1),
)
return results
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# log deltas
metric_identifier = f"{self.dataset_key}/0to{self.num_rollout_timesteps}"
file_identifier = f"{metric_identifier.replace('/', '_')}_{self.update_counter.cur_checkpoint}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}/{dict_to_string(self.rollout_kwargs)}"
file_identifier = f"{file_identifier}_{dict_to_string(self.rollout_kwargs, item_seperator='_')}"
if self.use_teacher_forcing:
metric_identifier = f"{metric_identifier}/tforced"
file_identifier = f"{file_identifier}_tforced"
# overall
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/normalized",
value=results["overall_normalized_delta"].mean(),
logger=self.logger,
format_str=".10f",
)
self.writer.add_scalar(
key=f"delta_masked/{metric_identifier}/overall/normalized",
value=results["overall_normalized_delta_masked"].mean(),
logger=self.logger,
format_str=".10f",
)
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/denormalized",
value=results["overall_denormalized_delta"].mean(),
logger=self.logger,
format_str=".10f",
)
self.writer.add_scalar(
key=f"delta_masked/{metric_identifier}/overall/denormalized",
value=results["overall_denormalized_delta_masked"].mean(),
logger=self.logger,
format_str=".10f",
)
self.writer.add_scalar(
key=f"movement/{metric_identifier}/overall/denormalized",
value=results["overall_denormalized_movement"].mean(),
logger=self.logger,
format_str=".10f",
)
self.writer.add_scalar(
key=f"movement_masked/{metric_identifier}/overall/denormalized",
value=results["overall_denormalized_movement_masked"].mean(),
logger=self.logger,
format_str=".10f",
)
# plots
if self.save_plots:
torch.save(
results["delta_per_channel"].mean(dim=0),
self.out / "plots" / f"PerChannel_{file_identifier}.th",
)
torch.save(
results["delta_per_timestep"].mean(dim=0),
self.out / "plots" / f"PerTimestep_{file_identifier}.th",
)
torch.save(
results["delta_per_channel_per_timestep"].mean(dim=0),
self.out / "plots" / f"PerChannelPerTimestep_{file_identifier}.th",
)
torch.save(
results["delta_per_location"].mean(dim=0),
self.out / "plots" / f"PerLocation_{file_identifier}.th"
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_rollout_mesh_loss_callback.py | src/callbacks/offline_callbacks/offline_rollout_mesh_loss_callback.py | from torch_geometric.utils import scatter
from kappadata.wrappers import ModeWrapper
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineRolloutMeshLossCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
# properties that are initialized in before_training
self.__config_id = None
def _before_training(self, trainer, **kwargs):
dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= dataset.getdim_timestep()
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch, mode="rollout")
batch, ctx = batch
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
assert "target" not in data
x = data.pop("x")
assert x.ndim == 3, "expected data to be of shape (bs * num_points, num_total_timesteps + 1, input_dim)"
# cut away excess timesteps
if x.size(1) != self.num_rollout_timesteps + 1:
x = x[:, :self.num_rollout_timesteps + 1]
# concat input timesteps
model_input_dim, _ = model.input_shape
_, _, x_input_dim = x.shape
assert model_input_dim % x_input_dim == 0
num_input_timesteps = model_input_dim // x_input_dim
x0 = einops.repeat(
x[:, 0],
"batch_num_points num_channels ... -> batch_num_points (num_input_timesteps num_channels) ...",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
predictions = model.rollout(
x0=x0,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# ground truth excludes t0
ground_truth = x[:, 1:1 + self.num_rollout_timesteps]
# normalized delta shape=(total_num_points, num_rollout_timesteps, dim)
normalized_delta = (predictions - ground_truth).abs()
# average over dims shape=(total_num_points, num_rollout_timesteps)
normalized_delta = normalized_delta.mean(dim=2)
# average over timesteps shape=(total_num_points,)
normalized_delta = normalized_delta.mean(dim=1)
# average over points
normalized_delta = scatter(src=normalized_delta, index=batch_idx, reduce="mean")
return dict(
overall_normalized_delta=normalized_delta,
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# log deltas
metric_identifier = f"{self.dataset_key}/0to{self.num_rollout_timesteps}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}/{dict_to_string(self.rollout_kwargs)}"
# overall
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/normalized",
value=results["overall_normalized_delta"].mean(),
logger=self.logger,
format_str=".10f",
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_lagrangian_rollout_mesh_loss_callback.py | src/callbacks/offline_callbacks/offline_lagrangian_rollout_mesh_loss_callback.py | from torch_geometric.utils import scatter
from kappadata.wrappers import ModeWrapper
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineLagrangianRolloutMeshLossCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
self.out = self.path_provider.stage_output_path / "rollout"
# properties that are initialized in before_training
self.__config_id = None
bounds = torch.tensor(self.data_container.get_dataset().metadata['bounds'])
self.box = bounds[:, 1] - bounds[:, 0]
if 'full_rollout' in self.rollout_kwargs:
self.full_rollout = self.rollout_kwargs['full_rollout']
if self.rollout_kwargs['full_rollout']:
self.out = self.out / "full_rollout"
else:
self.out = self.out / "latent_rollout"
else:
self.full_rollout = False
def _before_training(self, trainer, **kwargs):
self.out.mkdir(parents=True, exist_ok=True)
dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= dataset.getdim_timestep()
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _forward(self, batch, model, trainer, trainer_model):
# prepare data
batch, ctx = batch
x = ModeWrapper.get_item(mode=trainer.dataset_mode, item="x", batch=batch)
x = x.to(model.device, non_blocking=True)
timestep = ModeWrapper.get_item(mode=trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(model.device, non_blocking=True)
curr_pos = ModeWrapper.get_item(mode=trainer.dataset_mode, item="curr_pos", batch=batch)
curr_pos = curr_pos.to(model.device, non_blocking=True)
target_pos = ModeWrapper.get_item(mode=trainer.dataset_mode, item="target_pos", batch=batch)
target_pos = target_pos.to(model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
# inputs are the velocities of all timesteps
x = einops.rearrange(
x,
"bs num_input_timesteps num_points -> bs (num_input_timesteps num_points)",
)
# decoder predicts all points
unbatch_idx = ctx["unbatch_idx"].to(model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(model.device, non_blocking=True)
# rollout
with trainer.autocast_context:
x_hat, all_vels = model.rollout(x=x,
timestep=timestep,
curr_pos=curr_pos,
edge_index=edge_index,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
full_rollout=self.full_rollout,
rollout_length=target_pos.shape[1],
predict_velocity=trainer.forward_kwargs['predict_velocity']
)
vel = (x_hat - target_pos)
self.box = self.box.to(x_hat.device)
vel = (vel + self.box * 0.5) % self.box - 0.5 * self.box
mse = vel ** 2
mse = mse.mean(dim=[2,3])
mse2 = mse[:,:2].mean(dim=1)
mse5 = mse[:,:5].mean(dim=1)
mse20 = mse[:,:20].mean(dim=1)
if self.rollout_kwargs['save_rollout']:
rollout_dict = {'x_target': target_pos,
'x_predictions': x_hat,
'v_predictions': all_vels,
'traj_idx': ctx['traj_idx']}
outpath = self.out / f"rollout_results_{str(self.update_counter.cur_checkpoint).lower()}.pt"
torch.save(rollout_dict, outpath)
return dict(
mse2=mse2,
mse5=mse5,
mse20=mse20,
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
if self.full_rollout:
identifier = 'full_rollout'
else:
identifier = 'latent_rollout'
# log deltas
for num_rollout_timesteps in [2,5,20]:
metric_identifier = f"{self.dataset_key}/{identifier}/0to{num_rollout_timesteps}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}"
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/mse",
value=results[f"mse{num_rollout_timesteps}"].mean(),
logger=self.logger,
format_str=".10f",
) | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/offline_callbacks/offline_cfd_rollout_mesh_loss_callback.py | src/callbacks/offline_callbacks/offline_cfd_rollout_mesh_loss_callback.py | from torch_geometric.utils import scatter
from kappadata.wrappers import ModeWrapper
from functools import partial
import einops
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import dict_to_string
class OfflineCfdRolloutMeshLossCallback(PeriodicCallback):
def __init__(
self,
dataset_key,
num_rollout_timesteps=None,
rollout_kwargs=None,
**kwargs,
):
super().__init__(**kwargs)
self.dataset_key = dataset_key
self.num_rollout_timesteps = num_rollout_timesteps
self.rollout_kwargs = rollout_kwargs or {}
# properties that are initialized in before_training
self.__config_id = None
def _before_training(self, trainer, **kwargs):
dataset, _ = self.data_container.get_dataset(key=self.dataset_key, mode=trainer.dataset_mode)
# how many timesteps to roll out?
if self.num_rollout_timesteps is None:
self.num_rollout_timesteps = dataset.getdim_timestep()
else:
assert 0 < self.num_rollout_timesteps <= dataset.getdim_timestep()
def _register_sampler_configs(self, trainer):
self.__config_id = self._register_sampler_config_from_key(key=self.dataset_key, mode=trainer.dataset_mode)
def _forward(self, batch, model, trainer, trainer_model):
data = trainer_model.prepare(batch)
batch, ctx = batch
batch_idx = ctx["batch_idx"].to(model.device, non_blocking=True)
target = data.pop("target")
x = data.pop("x")
assert x.ndim == 2, "expected data to be of shape (bs * num_points, input_dim)"
assert target.ndim == 3, "expected data to be of shape (bs * num_points, input_dim, max_timesteps)"
# cut away excess timesteps
if target.size(2) != self.num_rollout_timesteps:
target = target[:, :, :self.num_rollout_timesteps]
# concat input timesteps
_, model_input_dim = model.input_shape
_, x_dim = x.shape
assert model_input_dim % x_dim == 0
num_input_timesteps = model_input_dim // x_dim
x = einops.repeat(
x,
"batch_num_points num_channels -> batch_num_points (num_input_timesteps num_channels)",
num_input_timesteps=num_input_timesteps,
)
# timestep is manually counted
data.pop("timestep", None)
# rollout
with trainer.autocast_context:
x_hat = model.rollout(
x=x,
num_rollout_timesteps=self.num_rollout_timesteps,
**data,
**self.rollout_kwargs,
)
# normalized delta shape=(total_num_points, dim, num_rollout_timesteps)
normalized_delta = (x_hat - target).abs()
# average over dims shape=(total_num_points,)
normalized_delta = normalized_delta.mean(dim=[1])
# average over tiemsteps and points points
assert self.num_rollout_timesteps > 50
normalized_delta10 = scatter(src=normalized_delta[:, :10].mean(dim=1), index=batch_idx, reduce="mean")
normalized_delta20 = scatter(src=normalized_delta[:, :20].mean(dim=1), index=batch_idx, reduce="mean")
normalized_delta50 = scatter(src=normalized_delta[:, :50].mean(dim=1), index=batch_idx, reduce="mean")
normalized_delta = scatter(src=normalized_delta.mean(dim=1), index=batch_idx, reduce="mean")
return dict(
normalized_delta10=normalized_delta10,
normalized_delta20=normalized_delta20,
normalized_delta50=normalized_delta50,
normalized_delta99=normalized_delta,
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, trainer_model, batch_size, data_iter, **_):
results = self.iterate_over_dataset(
forward_fn=partial(self._forward, model=model, trainer=trainer, trainer_model=trainer_model),
config_id=self.__config_id,
batch_size=batch_size,
data_iter=data_iter,
)
# log deltas
for num_rollout_timesteps in [10, 20, 50, 99]:
metric_identifier = f"{self.dataset_key}/0to{num_rollout_timesteps}"
if len(self.rollout_kwargs) > 0:
metric_identifier = f"{metric_identifier}/{dict_to_string(self.rollout_kwargs)}"
self.writer.add_scalar(
key=f"delta/{metric_identifier}/overall/normalized",
value=results[f"normalized_delta{num_rollout_timesteps}"].mean(),
logger=self.logger,
format_str=".10f",
) | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/checkpoint_callbacks/recover_from_latest_checkpoint_callback.py | src/callbacks/checkpoint_callbacks/recover_from_latest_checkpoint_callback.py | from collections import defaultdict
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.config import is_rank0
from utils.select_with_path import select_with_path
from initializers.resume_initializer import ResumeInitializer
from callbacks.checkpoint_callbacks.checkpoint_callback import CheckpointCallback
class RecoverFromLatestCheckpointCallback(PeriodicCallback):
def __init__(self, save_discarded_checkpoints, max_num_recoveries=5, **kwargs):
super().__init__(**kwargs)
self.save_discarded_checkpoints = save_discarded_checkpoints
self.max_num_recoveries = max_num_recoveries
self.num_recoveries = 0
self.best_loss = float("inf")
self.latest_checkpoint_callback = None
def _before_training(self, trainer, **kwargs):
# TODO should handle saving latest checkpoint on its own instead of relying on other callback
# TODO this would allow e.g. looping 2 epochs back
assert self.every_n_epochs == trainer.log_every_n_epochs
assert self.every_n_updates == trainer.log_every_n_updates
assert self.every_n_samples == trainer.log_every_n_samples
# get indices of RecoverFromLatestCheckpointCallback and CheckpointCallbacks that log the latest checkpoint
recover_idxs = []
ckpt_idxs = []
for i, callback in enumerate(trainer.callbacks):
if isinstance(callback, RecoverFromLatestCheckpointCallback):
recover_idxs.append(i)
if (
isinstance(callback, CheckpointCallback)
and callback.save_latest_weights
and callback.save_latest_optim
):
ckpt_idxs.append(i)
# make sure there is only 1 of each callback
assert len(recover_idxs) == 1
assert len(ckpt_idxs) == 1
# checkpoint callback has to be after restart callback
assert recover_idxs[0] < ckpt_idxs[0]
# remember CheckpointCallback to disable writing after a resume
self.latest_checkpoint_callback = trainer.callbacks[ckpt_idxs[0]]
def state_dict(self):
return dict(
num_recoveries=self.num_recoveries,
best_loss=self.best_loss,
)
def load_state_dict(self, state_dict):
self.num_recoveries = state_dict["num_recoveries"]
self.best_loss = state_dict["best_loss"]
def _periodic_callback(self, model, **kwargs):
# extract loss from log_cache (produced by OnlineLossCallback)
loss = self.writer.log_cache[f"loss/online/total/{self.to_short_interval_string()}"]
if loss > 2 * self.best_loss:
if self.max_num_recoveries is not None and self.num_recoveries >= self.max_num_recoveries:
raise RuntimeError("maximum number of recoveries reached ({self.max_num_recoveries})")
self.num_recoveries += 1
self.logger.warning(
f"loss is higher than 2 * best loss ({loss:.6f} > 2 * {self.best_loss}) "
f"-> recover from latest checkpoint (num_recoveries: {self.num_recoveries})"
)
# save current state
if self.save_discarded_checkpoints:
self.checkpoint_writer.save(
model=model,
checkpoint=f"faulty{self.resume_count}",
save_weights=True,
save_optim=True,
save_frozen_weights=True,
)
# recover from latest checkpoint
initializer = ResumeInitializer(
stage_id=self.path_provider.stage_id,
checkpoint="latest",
load_optim=True,
load_random_states=False,
path_provider=self.path_provider,
)
initializer.init_weights(model)
initializer.init_optim(model)
# prevent checkpoint callback from overwriting the last checkpoint with the current one
if self.every_n_epochs is not None:
self.latest_checkpoint_callback.every_n_epochs = None
else:
raise NotImplementedError
else:
# update best_loss
if loss < self.best_loss:
self.best_loss = loss
# allow checkpoint callback to write new best checkpoint
if self.every_n_epochs is not None:
self.latest_checkpoint_callback.every_n_epochs = self.every_n_epochs
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/checkpoint_callbacks/ema_callback.py | src/callbacks/checkpoint_callbacks/ema_callback.py | from collections import defaultdict
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.config import is_rank0
from utils.select_with_path import select_with_path
class EmaCallback(PeriodicCallback):
def __init__(self, target_factors, model_paths=None, **kwargs):
super().__init__(**kwargs)
self.model_paths = model_paths or [None]
self.target_factors = target_factors
self.parameters = defaultdict(dict)
self.buffers = defaultdict(dict)
def _before_training(self, model, **kwargs):
if not is_rank0():
return
for model_path in self.model_paths:
cur_model = select_with_path(obj=model, path=model_path)
for target_factor in self.target_factors:
for name, param in cur_model.named_parameters():
self.parameters[(model_path, target_factor)][name] = param.clone()
for name, buffer in cur_model.named_buffers():
self.buffers[model_path][name] = buffer.clone()
def _track_after_update_step(self, model, **kwargs):
if not is_rank0():
return
for model_path in self.model_paths:
cur_model = select_with_path(obj=model, path=model_path)
for target_factor in self.target_factors:
for name, param in cur_model.named_parameters():
key = (model_path, target_factor)
self.parameters[key][name].mul_(target_factor).add_(param, alpha=1. - target_factor)
for name, buffer in cur_model.named_buffers():
self.buffers[model_path][name].copy_(buffer)
def _save(self, ckpt, model):
if not is_rank0():
return
for model_path in self.model_paths:
cur_model = select_with_path(obj=model, path=model_path)
for target_factor in self.target_factors:
state_dict = {**self.parameters[(model_path, target_factor)], **self.buffers[model_path]}
ckpt_dict = dict(
state_dict=state_dict,
ctor_kwargs=cur_model.ctor_kwargs,
ckpt=ckpt,
abs_ckpt=dict(self.update_counter.cur_checkpoint),
stage_id=self.path_provider.stage_id,
ema=target_factor,
)
if model_path is None:
cur_model_path = model.name
else:
cur_model_path = f"{model.name}.{model_path}"
fname = f"{cur_model_path} cp={ckpt} ema={target_factor} model.th"
torch.save(ckpt_dict, self.path_provider.checkpoint_path / fname)
def _periodic_callback(self, model, **kwargs):
self._save(ckpt=self.update_counter.cur_checkpoint, model=model)
def _after_training(self, model, **kwargs):
self._save(ckpt="last", model=model)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/checkpoint_callbacks/checkpoint_callback.py | src/callbacks/checkpoint_callbacks/checkpoint_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import short_number_str
from utils.model_utils import get_trainable_param_count, get_frozen_param_count
class CheckpointCallback(PeriodicCallback):
def __init__(
self,
save_weights=True,
save_optim=False,
save_latest_weights=False,
save_latest_optim=False,
model_name=None,
**kwargs,
):
super().__init__(**kwargs)
assert save_weights or save_latest_weights or save_optim or save_latest_optim
self.save_weights = save_weights
self.save_optim = save_optim
self.save_latest_weights = save_latest_weights
self.save_latest_optim = save_latest_optim
self.model_name = model_name
def _before_training(self, model, **kwargs):
frozen_count = get_frozen_param_count(model)
trainable_count = get_trainable_param_count(model)
weight_bytes = (frozen_count + trainable_count) * 4
self.logger.info(f"estimated checkpoint size: {short_number_str(weight_bytes * 3)}B")
self.logger.info(f"estimated weight checkpoint size: {short_number_str(weight_bytes)}B")
# hardcoded for adam/adamw (SGD would have lower size)
self.logger.info(f"estimated optim checkpoint size: {short_number_str(weight_bytes * 2)}B")
# (not 100% accurate...multiple intervals are not considered)
n_checkpoints = 1
if self.every_n_epochs is not None:
n_checkpoints += self.update_counter.end_checkpoint.epoch // self.every_n_epochs
if self.every_n_updates is not None:
n_checkpoints += int(self.update_counter.end_checkpoint.update / self.every_n_updates)
if self.every_n_samples is not None:
n_checkpoints += int(self.update_counter.end_checkpoint.sample / self.every_n_samples)
multiplier = 0
if self.save_weights:
multiplier += 1
if self.save_optim:
multiplier += 2
self.logger.info(
f"estimated size for {n_checkpoints} checkpoints: "
f"{short_number_str(n_checkpoints * weight_bytes * multiplier)}B"
)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, trainer, **kwargs):
self.checkpoint_writer.save(
model=model,
trainer=trainer,
checkpoint=self.update_counter.cur_checkpoint,
save_weights=self.save_weights,
save_optim=self.save_optim,
save_latest_weights=self.save_latest_weights,
save_latest_optim=self.save_latest_optim,
model_name_to_save=self.model_name,
save_frozen_weights=True,
)
def _after_training(self, model, trainer, **kwargs):
self.checkpoint_writer.save(
model=model,
trainer=trainer,
checkpoint="last",
save_weights=self.save_weights,
save_optim=self.save_optim,
save_latest_weights=self.save_latest_weights,
save_latest_optim=self.save_latest_optim,
model_name_to_save=self.model_name,
save_frozen_weights=True,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/checkpoint_callbacks/__init__.py | src/callbacks/checkpoint_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/checkpoint_callbacks/best_checkpoint_callback.py | src/callbacks/checkpoint_callbacks/best_checkpoint_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
from utils.infer_higher_is_better import higher_is_better_from_metric_key
class BestCheckpointCallback(PeriodicCallback):
def __init__(
self,
metric_key,
save_frozen_weights=True,
save_optim=False,
tolerances=None,
model_name=None,
**kwargs,
):
super().__init__(**kwargs)
self.metric_key = metric_key
self.model_name = model_name
self.higher_is_better = higher_is_better_from_metric_key(self.metric_key)
self.best_metric_value = -float("inf") if self.higher_is_better else float("inf")
self.save_frozen_weights = save_frozen_weights
self.save_optim = save_optim
# save multiple best models based on tolerance
self.tolerances_is_exceeded = {tolerance: False for tolerance in tolerances or []}
self.tolerance_counter = 0
self.metric_at_exceeded_tolerance = {}
def state_dict(self):
return dict(
best_metric_value=self.best_metric_value,
tolerances_is_exceeded=self.tolerances_is_exceeded,
tolerance_counter=self.tolerance_counter,
metric_at_exceeded_tolerance=self.metric_at_exceeded_tolerance,
)
def load_state_dict(self, state_dict):
if "best_metric_value" in state_dict:
self.best_metric_value = state_dict["best_metric_value"]
if "tolerances_is_exceeded" in state_dict:
self.tolerances_is_exceeded = state_dict["tolerances_is_exceeded"]
if "tolerance_counter" in state_dict:
self.tolerance_counter = state_dict["tolerance_counter"]
if "metric_at_exceeded_tolerance" in state_dict:
self.metric_at_exceeded_tolerance = state_dict["metric_at_exceeded_tolerance"]
def _before_training(self, **kwargs):
if len(self.tolerances_is_exceeded) > 0 and self.update_counter.cur_checkpoint.sample > 0:
raise NotImplementedError(f"{type(self).__name__} with tolerances resuming not implemented")
def _is_new_best_model(self, metric_value):
if self.higher_is_better:
return metric_value > self.best_metric_value
return metric_value < self.best_metric_value
# noinspection PyMethodOverriding
def _periodic_callback(self, trainer, model, **kwargs):
assert self.metric_key in self.writer.log_cache, (
f"couldn't find metric_key {self.metric_key} (valid metric keys={list(self.writer.log_cache.keys())}) -> "
f"make sure the callback that produces the metric_key is called at the same (or higher) frequency and "
f"is ordered before the {type(self).__name__}"
)
metric_value = self.writer.log_cache[self.metric_key]
if self._is_new_best_model(metric_value):
# one could also track the model and save it after training
# this is better in case runs crash or are terminated
# the runtime overhead is neglegible
self.logger.info(f"new best model ({self.metric_key}): {self.best_metric_value} --> {metric_value}")
self.checkpoint_writer.save(
model=model,
checkpoint=f"best_model.{self.metric_key.replace('/', '.')}",
save_optim=self.save_optim,
model_name_to_save=self.model_name,
save_frozen_weights=self.save_frozen_weights,
)
self.best_metric_value = metric_value
self.tolerance_counter = 0
# log tolerance checkpoints
for tolerance, is_exceeded in self.tolerances_is_exceeded.items():
if is_exceeded:
continue
self.checkpoint_writer.save(
model=model,
checkpoint=f"best_model.{self.metric_key.replace('/', '.')}.tolerance{tolerance}",
save_optim=self.save_optim,
model_name_to_save=self.model_name,
)
else:
self.tolerance_counter += 1
for tolerance, is_exceeded in self.tolerances_is_exceeded.items():
if is_exceeded:
continue
if tolerance >= self.tolerance_counter:
self.tolerances_is_exceeded[tolerance] = True
self.metric_at_exceeded_tolerance[tolerance] = metric_value
def _after_training(self, **kwargs):
# best metric doesn't need to be logged as it is summarized anyways
for tolerance, value in self.metric_at_exceeded_tolerance.items():
self.logger.info(f"best {self.metric_key} with tolerance={tolerance}: {value}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/online_callbacks/num_supernodes_callback.py | src/callbacks/online_callbacks/num_supernodes_callback.py | from collections import defaultdict
import numpy as np
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.gather import all_reduce_mean_grad
class NumSupernodesCallback(PeriodicCallback):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_first_log = True
self.trainer_batch_size = None
self.hook = None
self.num_nodes_history = []
self.num_supernodes_history = []
class NumSupernodesHook:
def __init__(self):
self.num_nodes = None
self.num_supernodes = None
self.enabled = False
def __call__(self, module, module_input, module_output):
if self.enabled:
if self.num_nodes is None:
self.num_nodes = len(module_input[0])
if self.num_supernodes is None:
self.num_supernodes = len(module_output[0])
def _before_training(self, trainer_batch_size, model, **kwargs):
self.trainer_batch_size = trainer_batch_size
if hasattr(model.encoder, "mesh_embed"):
if hasattr(model.encoder.mesh_embed, "pool"):
self.hook = self.NumSupernodesHook()
model.encoder.mesh_embed.pool.register_forward_hook(self.hook)
def before_every_accumulation_step(self, **kwargs):
if self.hook is None:
return
self.hook.enabled = True
def _track_after_accumulation_step(self, **kwargs):
if self.hook is None:
return
self.hook.enabled = False
self.num_nodes_history.append(self.hook.num_nodes)
self.num_supernodes_history.append(self.hook.num_supernodes)
self.hook.num_nodes = None
self.hook.num_supernodes = None
if self.is_first_log:
self.logger.info(
f"num_nodes: per_device={self.num_nodes_history[-1]} "
f"per_sample={self.num_nodes_history[-1] // self.trainer_batch_size}"
)
self.logger.info(
f"num_supernodes: per_device={self.num_supernodes_history[-1]} "
f"per_sample={self.num_supernodes_history[-1] // self.trainer_batch_size}"
)
self.is_first_log = False
def _periodic_callback(self, **_):
if self.hook is None:
return
# log averages
self.writer.add_scalar(
key=f"num_nodes/{self.to_short_interval_string()}",
value=float(np.mean(self.num_nodes_history) / self.trainer_batch_size),
)
self.writer.add_scalar(
key=f"num_supernodes/{self.to_short_interval_string()}",
value=float(np.mean(self.num_supernodes_history) / self.trainer_batch_size),
)
self.num_nodes_history.clear()
self.num_supernodes_history.clear()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/online_callbacks/update_output_callback.py | src/callbacks/online_callbacks/update_output_callback.py | from collections import defaultdict
import numpy as np
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.gather import all_reduce_mean_grad, all_gather_nograd
class UpdateOutputCallback(PeriodicCallback):
def __init__(
self,
keys=None,
patterns=None,
verbose=False,
reduce="mean",
log_output=True,
save_output=False,
**kwargs,
):
super().__init__(**kwargs)
assert keys is None or (isinstance(keys, list) and all(isinstance(k, str) for k in keys))
assert patterns is None or (isinstance(patterns, list) and all(isinstance(p, str) for p in patterns))
self.patterns = patterns or []
self.keys = keys or []
assert len(self.keys) > 0 or len(self.patterns) > 0
self.verbose = verbose
self.tracked_values = defaultdict(list)
assert reduce in ["mean", "last"]
self.reduce = reduce
self.log_output = log_output
self.save_output = save_output
if save_output:
self.out = self.path_provider.stage_output_path / "update_outputs"
self.out.mkdir(exist_ok=True)
else:
self.out = None
def _to_string(self):
return f", keys={self.keys}, patterns={self.patterns}"
def _track_after_accumulation_step(self, update_outputs, **kwargs):
if self.reduce == "last" and self.updates_till_next_log > 1:
return
if len(self.keys) > 0:
for key in self.keys:
value = update_outputs[key]
if torch.is_tensor(value):
value = value.detach()
self.tracked_values[key].append(value)
if len(self.patterns) > 0:
for key, value in update_outputs.items():
for pattern in self.patterns:
if pattern in key:
value = update_outputs[key]
if torch.is_tensor(value):
value = value.detach()
self.tracked_values[key].append(value)
def _periodic_callback(self, **_):
for key, tracked_values in self.tracked_values.items():
if self.reduce == "mean":
if torch.is_tensor(tracked_values[0]):
reduced_value = torch.stack(tracked_values).float().mean()
else:
reduced_value = float(np.mean(tracked_values))
reduced_value = all_reduce_mean_grad(reduced_value)
elif self.reduce == "last":
# len(tracked_values) is equal to accumulation_steps
reduced_value = all_gather_nograd(torch.concat(tracked_values))
else:
raise NotImplementedError
if self.log_output:
assert reduced_value.numel() == 1
self.writer.add_scalar(
key=f"{key}/{self.to_short_interval_string()}",
value=reduced_value,
logger=self.logger if self.verbose else None,
format_str=".5f",
)
if self.save_output:
uri = self.out / f"{key}_{self.to_short_interval_string()}_{self.update_counter.cur_checkpoint}.th"
torch.save(reduced_value, uri)
self.tracked_values.clear()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/online_callbacks/__init__.py | src/callbacks/online_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/callback_base.py | src/callbacks/base/callback_base.py | import logging
from collections import defaultdict
import kappaprofiler as kp
import torch
from distributed.gather import all_gather_nograd
from providers.config_providers.base.config_provider_base import ConfigProviderBase
from providers.config_providers.noop_config_provider import NoopConfigProvider
from providers.path_provider import PathProvider
from providers.summary_providers.base.summary_provider_base import SummaryProviderBase
from providers.summary_providers.noop_summary_provider import NoopSummaryProvider
from utils.data_container import DataContainer
from utils.formatting_util import list_to_string
from utils.naming_util import snake_type_name
from utils.update_counter import UpdateCounter
from .writers.checkpoint_writer import CheckpointWriter
from .writers.log_writer import LogWriter
class CallbackBase:
log_writer_singleton = None
@property
def writer(self):
if CallbackBase.log_writer_singleton is None:
CallbackBase.log_writer_singleton = LogWriter(
path_provider=self.path_provider,
update_counter=self.update_counter,
)
return CallbackBase.log_writer_singleton
@staticmethod
def flush():
if CallbackBase.log_writer_singleton is not None:
CallbackBase.log_writer_singleton.flush()
@staticmethod
def finish():
if CallbackBase.log_writer_singleton is not None:
CallbackBase.log_writer_singleton.finish()
def __init__(
self,
data_container: DataContainer = None,
config_provider: ConfigProviderBase = None,
summary_provider: SummaryProviderBase = None,
path_provider: PathProvider = None,
update_counter: UpdateCounter = None,
):
self.data_container = data_container
self.config_provider = config_provider or NoopConfigProvider()
self.summary_provider = summary_provider or NoopSummaryProvider()
self.path_provider = path_provider
self.update_counter = update_counter
self.total_data_time = defaultdict(float)
self.total_forward_time = defaultdict(float)
# these things are initialized on property access because they require the name/full_name
# (which can be set from child classes)
self._callback = None
# trainer checkpoint requires gathering random states -> all ranks have a checkpoint writer
self.checkpoint_writer = CheckpointWriter(path_provider=self.path_provider, update_counter=update_counter)
# check that children only override their implementation methods
assert type(self).before_training == CallbackBase.before_training
assert type(self).after_training == CallbackBase.after_training
def __repr__(self):
return str(self)
def __str__(self):
return type(self).__name__
def state_dict(self):
return None
def load_state_dict(self, state_dict):
pass
@property
def logger(self):
if self._callback is None:
self._callback = logging.getLogger(str(self))
return self._callback
@torch.no_grad()
def before_training(self, **kwargs):
if type(self)._before_training == CallbackBase._before_training:
return
with kp.named_profile(f"{self}.before_training"):
self._before_training(**kwargs)
@torch.no_grad()
def after_training(self, **kwargs):
for dataset_key in self.total_data_time.keys():
total_data_time = all_gather_nograd(self.total_data_time[dataset_key])
total_forward_time = all_gather_nograd(self.total_forward_time[dataset_key])
self.logger.info("------------------")
self.logger.info(f"{snake_type_name(self)} dataset_key={dataset_key}")
self.logger.info(f"total_data_time: {list_to_string(total_data_time)}")
self.logger.info(f"total_forward_time: {list_to_string(total_forward_time)}")
if type(self)._after_training == CallbackBase._after_training:
return
with kp.named_profile(f"{self}.after_training"):
self._after_training(**kwargs)
def _before_training(self, **kwargs):
pass
def _after_training(self, **kwargs):
pass
def register_root_datasets(self, dataset_config_provider=None, is_mindatarun=False):
pass
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/periodic_callback.py | src/callbacks/base/periodic_callback.py | import math
import kappaprofiler as kp
import numpy as np
import torch
from kappadata.samplers import InterleavedSamplerConfig
from kappadata.wrappers import ModeWrapper
from torch.utils.data import SequentialSampler, DistributedSampler
from tqdm import tqdm
from distributed.config import is_distributed, is_managed, is_rank0
from distributed.gather import all_gather_nograd_clipped
from utils.naming_util import snake_type_name
from utils.noop_tqdm import NoopTqdm
from .callback_base import CallbackBase
class PeriodicCallback(CallbackBase):
def __init__(
self,
every_n_epochs: int = None,
every_n_updates: int = None,
every_n_samples: int = None,
batch_size: int = None,
**kwargs,
):
super().__init__(**kwargs)
self.every_n_epochs = every_n_epochs
self.every_n_updates = every_n_updates
self.every_n_samples = every_n_samples
self.batch_size = batch_size
self._sampler_configs = {}
self._sampler_config_names = {}
# whenever a callback wants to iterate over a dataset -> check if it does so in the correct order
# (order of iteration needs to be the same order as registration)
self.__sampler_configs_counter = 0
# if stuff is tracked -> multiple interval types lead to inconsistent results
if (
type(self)._track_after_accumulation_step != PeriodicCallback._track_after_accumulation_step or
type(self)._track_after_update_step != PeriodicCallback._track_after_update_step
):
assert sum([
self.every_n_epochs is not None,
self.every_n_updates is not None,
self.every_n_samples is not None,
]) <= 1, "tracking callbacks can't have multiple interval types"
# periodic callback requires update_counter
assert self.update_counter is not None
# check that children only override their implementation methods
assert type(self).track_after_accumulation_step == PeriodicCallback.track_after_accumulation_step
assert type(self).track_after_update_step == PeriodicCallback.track_after_update_step
assert type(self).after_update == PeriodicCallback.after_update
assert type(self).after_epoch == PeriodicCallback.after_epoch
assert type(self).register_sampler_configs == PeriodicCallback.register_sampler_configs
# this might be confused with register_sampler_configs method and accidentally overwritten
assert type(self)._register_sampler_config_from_key == PeriodicCallback._register_sampler_config_from_key
def __str__(self):
detail_str = self._to_string() or ""
return f"{type(self).__name__}({self.get_interval_string_verbose()}{detail_str})"
def _to_string(self):
return None
def _register_sampler_config_from_key(self, key, mode):
dataset, collator = self.data_container.get_dataset(key, mode=mode)
return self.__register_sampler_config(dataset=dataset, mode=mode, name=key, collator=collator)
def _register_sampler_config_from_dataset(self, dataset, mode, name):
assert not isinstance(dataset, ModeWrapper)
dataset = ModeWrapper(dataset=dataset, mode=mode, return_ctx=True)
return self.__register_sampler_config(dataset=dataset, mode=mode, name=name)
def __register_sampler_config(self, dataset, mode, name, collator=None):
assert len(dataset) > 0
config = InterleavedSamplerConfig(
sampler=DistributedSampler(dataset, shuffle=False) if is_distributed() else SequentialSampler(dataset),
every_n_epochs=self.every_n_epochs,
every_n_updates=self.every_n_updates,
every_n_samples=self.every_n_samples,
collator=collator,
batch_size=self.batch_size,
)
config_id = len(self._sampler_configs)
self._sampler_configs[config_id] = config
self._sampler_config_names[config_id] = f"{name}.{mode.replace(' ', '.')}"
return config_id
def register_sampler_configs(self, trainer):
assert len(self._sampler_configs) == 0
self._register_sampler_configs(trainer)
return self._sampler_configs.values(), self._sampler_config_names.values()
def _register_sampler_configs(self, trainer):
pass
def should_log_after_epoch(self, checkpoint):
if self.every_n_epochs is not None:
return checkpoint.epoch % self.every_n_epochs == 0
return False
def should_log_after_update(self, checkpoint):
if self.every_n_updates is not None:
return checkpoint.update % self.every_n_updates == 0
return False
def should_log_after_sample(self, checkpoint, effective_batch_size):
if self.every_n_samples is not None:
last_update_samples = checkpoint.sample - effective_batch_size
prev_log_step = int(last_update_samples / self.every_n_samples)
cur_log_step = int(checkpoint.sample / self.every_n_samples)
if cur_log_step > prev_log_step:
return True
return False
def before_every_update(self, **kwargs):
pass
def before_every_accumulation_step(self, **kwargs):
pass
def after_every_backward(self, **kwargs):
pass
def before_every_optim_step(self, **kwargs):
pass
def _track_after_accumulation_step(self, **kwargs):
pass
def _track_after_update_step(self, **kwargs):
pass
def _periodic_callback(self, interval_type, **kwargs):
pass
@torch.no_grad()
def track_after_accumulation_step(self, **kwargs):
if type(self)._track_after_accumulation_step == PeriodicCallback._track_after_accumulation_step:
return
with kp.named_profile(f"{self}.track_after_accumulation_step"):
self._track_after_accumulation_step(**kwargs)
@torch.no_grad()
def track_after_update_step(self, **kwargs):
if type(self)._track_after_update_step == PeriodicCallback._track_after_update_step:
return
with kp.named_profile(f"{self}.track_after_update_step"):
self._track_after_update_step(**kwargs)
@torch.no_grad()
def after_epoch(self, **kwargs):
if type(self)._periodic_callback == PeriodicCallback._periodic_callback:
return
if self.should_log_after_epoch(self.update_counter.cur_checkpoint):
with kp.named_profile(f"{self}.after_epoch"):
self._periodic_callback(interval_type="epoch", **kwargs)
@torch.no_grad()
def after_update(self, effective_batch_size, **kwargs):
if type(self)._periodic_callback == PeriodicCallback._periodic_callback:
return
if self.should_log_after_update(self.update_counter.cur_checkpoint):
with kp.named_profile(f"{self}.after_update"):
self._periodic_callback(interval_type="update", **kwargs)
if self.should_log_after_sample(self.update_counter.cur_checkpoint, effective_batch_size):
with kp.named_profile(f"{self}.after_sample"):
self._periodic_callback(interval_type="sample", **kwargs)
@property
def updates_till_next_log(self):
updates_per_log_interval = self.updates_per_log_interval
return updates_per_log_interval - self.update_counter.cur_checkpoint.update % updates_per_log_interval
@property
def updates_per_log_interval(self):
if self.every_n_epochs is not None:
assert self.every_n_updates is None and self.every_n_samples is None
return self.update_counter.updates_per_epoch * self.every_n_epochs
if self.every_n_updates is not None:
assert self.every_n_epochs is None and self.every_n_samples is None
return self.every_n_updates
if self.every_n_samples is not None:
assert self.every_n_epochs is None and self.every_n_updates is None
# NOTE: uneven every_n_samples not supported
assert self.every_n_samples % self.update_counter.effective_batch_size == 0
return int(self.every_n_samples / self.update_counter.effective_batch_size)
raise RuntimeError
def get_interval_string_verbose(self):
results = []
if self.every_n_epochs is not None:
results.append(f"every_n_epochs={self.every_n_epochs}")
if self.every_n_updates is not None:
results.append(f"every_n_updates={self.every_n_updates}")
if self.every_n_samples is not None:
results.append(f"every_n_samples={self.every_n_samples}")
return ",".join(results)
def to_short_interval_string(self):
results = []
if self.every_n_epochs is not None:
results.append(f"E{self.every_n_epochs}")
if self.every_n_updates is not None:
results.append(f"U{self.every_n_updates}")
if self.every_n_samples is not None:
results.append(f"S{self.every_n_samples}")
return "_".join(results)
def iterate_over_dataset(
self,
forward_fn,
config_id,
batch_size,
data_iter=None,
use_collate_fn=True,
):
assert config_id == self.__sampler_configs_counter
config = self._sampler_configs[self.__sampler_configs_counter]
dataset_name = self._sampler_config_names[self.__sampler_configs_counter]
if isinstance(config.sampler, DistributedSampler):
global_dataset_len = len(config.sampler.dataset)
else:
global_dataset_len = len(config.sampler)
local_dataset_len = len(config.sampler)
n_batches = math.ceil(local_dataset_len / (config.batch_size or batch_size))
self.__sampler_configs_counter = (self.__sampler_configs_counter + 1) % len(self._sampler_configs)
# iterate
data_times = []
forward_times = []
forward_results = []
pbar_ctor = NoopTqdm if is_managed() or not is_rank0() else tqdm
for _ in pbar_ctor(iterable=range(n_batches)):
# load data
with kp.Stopwatch() as data_sw:
batch = next(data_iter)
data_times.append(data_sw.elapsed_seconds)
# forward
with kp.Stopwatch() as forward_sw:
forward_result = forward_fn(batch)
forward_times.append(forward_sw.elapsed_seconds)
forward_results.append(forward_result)
# profiling book keeping
mean_data_time = float(np.mean(data_times))
mean_forward_time = float(np.mean(forward_times))
prefix = f"profiling/{snake_type_name(self)}/{dataset_name}"
self.logger.info(f"{prefix}: data={mean_data_time:.2f} forward={mean_forward_time:.2f}")
if self.update_counter is not None:
self.writer.add_scalar(f"{prefix}/data_time", mean_data_time)
self.writer.add_scalar(f"{prefix}/forward_times", mean_forward_time)
self.total_data_time[dataset_name] += mean_data_time
self.total_forward_time[dataset_name] += mean_forward_time
# collate
if use_collate_fn:
single_output = False
if not isinstance(forward_results[0], tuple):
forward_results = [(fwr,) for fwr in forward_results]
single_output = True
collated = [
self._collate_result(result, global_dataset_len=global_dataset_len)
for result in zip(*forward_results)
]
if single_output:
return collated[0]
else:
collated = forward_results
return collated
@staticmethod
def _collate_tensors(tensors):
if tensors[0].ndim == 0:
return torch.stack(tensors)
return torch.concat(tensors)
@staticmethod
def _collate_result(result, global_dataset_len):
if isinstance(result[0], dict):
# tuple[dict] -> dict[tensor]
result = {k: PeriodicCallback._collate_tensors([r[k] for r in result]) for k in result[0].keys()}
# gather
result = {k: all_gather_nograd_clipped(v, global_dataset_len) for k, v in result.items()}
else:
if isinstance(result[0], list):
# List[List[Tensor]] -> List[Tensor]
result = [torch.concat(item) for item in zip(*result)]
result = [all_gather_nograd_clipped(item, global_dataset_len) for item in result]
elif result[0] is None:
return None
else:
if torch.is_tensor(result[0]):
# List[Tensor] -> Tensor
result = torch.concat(result)
else:
result = torch.tensor(result)
result = all_gather_nograd_clipped(result, global_dataset_len)
return result
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/__init__.py | src/callbacks/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/writers/log_writer.py | src/callbacks/base/writers/log_writer.py | import logging
from collections import defaultdict
from contextlib import contextmanager
import torch
import wandb
import yaml
from distributed.config import is_rank0
from providers.path_provider import PathProvider
from utils.update_counter import UpdateCounter
class LogWriter:
def __init__(self, path_provider: PathProvider, update_counter: UpdateCounter):
self.logger = logging.getLogger(type(self).__name__)
self.path_provider = path_provider
self.update_counter = update_counter
self.log_entries = []
self.log_cache = None
self.is_wandb = wandb.run is not None
self._postfix = None
def finish(self):
if len(self.log_entries) == 0 or not is_rank0():
return
entries_uri = self.path_provider.primitive_entries_uri
self.logger.info(f"writing {len(self.log_entries)} log entries to {entries_uri}")
# convert into {<key>: {<update0>: <value0>, <update1>: <value1>}}
result = defaultdict(dict)
for entry in self.log_entries:
# update is used instead of wandb's _step
update = entry["update"]
for key, value in entry.items():
if key == "update":
continue
result[key][update] = value
with open(entries_uri, "w") as f:
yaml.safe_dump(dict(result), f)
def _log(self, key, value, logger=None, format_str=None):
if self.log_cache is None:
self.log_cache = dict(
epoch=self.update_counter.epoch,
update=self.update_counter.update,
sample=self.update_counter.sample,
)
if self._postfix is not None:
key = f"{key}/{self._postfix}"
self.log_cache[key] = value
if logger is not None:
if format_str is not None:
value = f"{value:{format_str}}"
logger.info(f"{key}: {value}")
def flush(self):
if self.log_cache is None:
return
if self.is_wandb:
wandb.log(self.log_cache)
# wandb doesn't support querying offline logfiles so offline mode would have no way to summarize stages
# also fetching the summaries from the online version potentially takes a long time, occupying GPU servers
# for primitive tasks
# -------------------
# wandb also has weird behavior when lots of logs are done seperately -> collect all log values and log once
# -------------------
# check that every log is fully cached (i.e. no update is logged twice)
if len(self.log_entries) > 0:
assert self.log_cache["update"] > self.log_entries[-1]["update"]
# don't keep histograms for primitive logging
self.log_entries.append({k: v for k, v in self.log_cache.items() if not isinstance(v, wandb.Histogram)})
self.log_cache = None
def add_scalar(self, key, value, logger=None, format_str=None):
if torch.is_tensor(value):
value = value.item()
self._log(key, value, logger=logger, format_str=format_str)
def add_histogram(self, key, data):
if self.is_wandb:
self._log(key, wandb.Histogram(data))
def add_previous_entry(self, entry):
# only add to wandb as primitive entries are currently based on updates
# add_previous_entry is only used to copy graphs from other runs into
# the current run so primitive logging is not needed anyways
if self.is_wandb:
wandb.log(entry)
@contextmanager
def with_postfix(self, postfix):
prev_postfix = self._postfix
if self._postfix is not None:
self._postfix = f"{self._postfix}/{postfix}"
else:
self._postfix = postfix
yield
self._postfix = prev_postfix
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/writers/__init__.py | src/callbacks/base/writers/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/base/writers/checkpoint_writer.py | src/callbacks/base/writers/checkpoint_writer.py | import logging
import torch
import yaml
from torch.nn.parallel import DistributedDataParallel
from distributed.config import is_rank0
from models.base.composite_model_base import CompositeModelBase
from models.base.single_model_base import SingleModelBase
from providers.path_provider import PathProvider
from utils.checkpoint import Checkpoint
from utils.update_counter import UpdateCounter
class CheckpointWriter:
def __init__(self, path_provider: PathProvider, update_counter: UpdateCounter):
self.logger = logging.getLogger(type(self).__name__)
self.path_provider = path_provider
self.update_counter = update_counter
def _to_ckpt_dict(self, model, ckpt):
if isinstance(ckpt, Checkpoint):
ckpt = dict(ckpt)
return dict(
state_dict=model.state_dict(),
ctor_kwargs=model.ctor_kwargs,
ckpt=ckpt,
abs_ckpt=dict(self.update_counter.cur_checkpoint),
stage_id=self.path_provider.stage_id,
)
def save(
self,
model,
checkpoint,
trainer=None,
save_weights=True,
save_optim=True,
save_latest_weights=False,
save_latest_optim=False,
model_name_to_save=None,
save_frozen_weights=False,
):
# NOTE: this has to be called from all ranks because random states are gathered to rank0
trainer_sd = trainer.state_dict() if trainer is not None else None
if is_rank0():
self._save_seperate_models(
name=model.name,
model=model,
ckpt=checkpoint,
save_weights=save_weights,
save_optim=save_optim,
save_latest_weights=save_latest_weights,
save_latest_optim=save_latest_optim,
model_name_to_save=model_name_to_save,
save_frozen_weights=save_frozen_weights,
)
if trainer_sd is not None:
if save_weights or save_optim:
trainer_out_path = self.path_provider.checkpoint_path / f"trainer cp={checkpoint}.th"
torch.save(trainer_sd, trainer_out_path)
self.logger.info(f"saved trainer state_dict to {trainer_out_path}")
if save_latest_weights or save_latest_optim:
latest_trainer_out_path = self.path_provider.checkpoint_path / f"trainer cp=latest.th"
torch.save(trainer_sd, latest_trainer_out_path)
self.logger.info(f"saved trainer state_dict to {latest_trainer_out_path}")
def _save_seperate_models(
self,
name,
model,
ckpt,
save_weights,
save_optim,
save_latest_weights,
save_latest_optim,
model_name_to_save,
save_frozen_weights,
):
assert not isinstance(model, DistributedDataParallel)
if isinstance(model, SingleModelBase):
if model.is_frozen and not save_frozen_weights:
return
if model_name_to_save is not None and name != model_name_to_save:
return
# save weights with ctor_kwargs
if save_weights:
model_uri = self.path_provider.checkpoint_path / f"{name} cp={ckpt} model.th"
torch.save(self._to_ckpt_dict(model=model, ckpt=ckpt), model_uri)
self.logger.info(f"saved {name} to {model_uri}")
if save_latest_weights:
# save only latest weights (and overwrite old latest weights)
model_uri = self.path_provider.checkpoint_path / f"{name} cp=latest model.th"
torch.save(self._to_ckpt_dict(model=model, ckpt=ckpt), model_uri)
self.logger.info(f"saved {name} to {model_uri}")
# save optim
if model.optim is not None:
if save_optim:
optim_uri = self.path_provider.checkpoint_path / f"{name} cp={ckpt} optim.th"
torch.save(model.optim.state_dict(), optim_uri)
self.logger.info(f"saved {name} optim to {optim_uri}")
if save_latest_optim:
# save only latest optim (and overwrite old latest optim)
optim_uri = self.path_provider.checkpoint_path / f"{name} cp=latest optim.th"
torch.save(model.optim.state_dict(), optim_uri)
self.logger.info(f"saved {name} optim to {optim_uri}")
# save ctor kwargs
# if save_weights:
# kwargs_uri = self.path_provider.checkpoint_path / f"{name} kwargs.yaml"
# if not kwargs_uri.exists():
# with open(kwargs_uri, "w") as f:
# yaml.safe_dump(model.ctor_kwargs, f)
elif isinstance(model, CompositeModelBase):
for k, v in model.submodels.items():
self._save_seperate_models(
name=f"{name}.{k}",
model=v,
ckpt=ckpt,
save_weights=save_weights,
save_optim=save_optim,
save_latest_weights=save_latest_weights,
save_latest_optim=save_latest_optim,
model_name_to_save=model_name_to_save,
save_frozen_weights=save_frozen_weights,
)
# save ctor kwargs
if model_name_to_save is not None:
kwargs_uri = self.path_provider.checkpoint_path / f"{name} kwargs.yaml"
if not kwargs_uri.exists():
with open(kwargs_uri, "w") as f:
yaml.safe_dump(model.ctor_kwargs, f)
else:
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/retroactive_callbacks/__init__.py | src/callbacks/retroactive_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/visualization/__init__.py | src/callbacks/visualization/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/gradient_monitor_callback.py | src/callbacks/monitor_callbacks/gradient_monitor_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
class GradientMonitorCallback(PeriodicCallback):
def before_every_optim_step(self, model, **kwargs):
for name, param in model.named_parameters():
if not param.requires_grad:
continue
grad_norm = param.grad.norm()
grad_absmax = param.grad.abs().max()
grad_mean = param.grad.abs().mean()
grad_std = param.grad.std()
self.writer.add_scalar(f"gradient/{name}/norm", grad_norm)
self.writer.add_scalar(f"gradient/{name}/absmax", grad_absmax)
self.writer.add_scalar(f"gradient/{name}/mean", grad_mean)
if grad_std.numel() > 1:
self.writer.add_scalar(f"gradient/{name}/std", grad_std)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/kill_on_loss_spike_callback.py | src/callbacks/monitor_callbacks/kill_on_loss_spike_callback.py | from collections import defaultdict
import torch
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.config import is_rank0, get_rank
from utils.select_with_path import select_with_path
from initializers.resume_initializer import ResumeInitializer
from callbacks.checkpoint_callbacks.checkpoint_callback import CheckpointCallback
class KillOnLossSpikeCallback(PeriodicCallback):
def __init__(self, tolerance_factor=10, recovery_tolerance=5, **kwargs):
super().__init__(**kwargs)
self.tolerance_factor = tolerance_factor
self.recovery_tolerance = recovery_tolerance
self.best_loss = float("inf")
self.recovery_tolerance_counter = 0
def state_dict(self):
return dict(
best_loss=self.best_loss,
recovery_tolerance_counter=self.recovery_tolerance_counter,
)
def load_state_dict(self, state_dict):
if state_dict is None:
self.logger.error(f"state_dict of KillOnLossSpikeCallback is None on rank {get_rank()} -> skip loading")
return
self.best_loss = state_dict["best_loss"]
self.recovery_tolerance_counter = state_dict["recovery_tolerance_counter"]
def _periodic_callback(self, model, **kwargs):
# extract loss from log_cache (produced by OnlineLossCallback)
loss = self.writer.log_cache[f"loss/online/total/{self.to_short_interval_string()}"]
if self.recovery_tolerance_counter > 0:
# loss has <recovery_tolerance> log intervals time to recover after a loss spike
if loss < self.best_loss:
# loss recovered
self.recovery_tolerance_counter = 0
self.best_loss = loss
self.logger.info(f"loss recovered from spike")
else:
# loss didnt recover -> increase counter
self.recovery_tolerance_counter += 1
self.logger.info(f"loss hasnt recovered from spike")
self.logger.info(f"tolerance: {self.recovery_tolerance_counter}/{self.recovery_tolerance}")
if self.recovery_tolerance_counter >= self.recovery_tolerance:
# tolerance exceeded
raise RuntimeError(f"couldnt recover from loss spike within tolerance")
elif loss > self.tolerance_factor * self.best_loss:
# detect loss spikes
if self.recovery_tolerance == 0:
# no tolerance -> instantly kill
raise RuntimeError(
f"loss is higher than {self.tolerance_factor} * best loss "
f"({loss:.6f} > {self.tolerance_factor * self.best_loss:.6f})"
)
# start tracking tolerance log intervals
self.recovery_tolerance_counter += 1
self.logger.warning(f"detected loss spike -> loss has {self.recovery_tolerance} intervals to recover")
else:
# update best_loss
if loss < self.best_loss:
self.best_loss = loss
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/activation_monitor_callback.py | src/callbacks/monitor_callbacks/activation_monitor_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
from utils.factory import create_collection
from models.extractors import extractor_from_kwargs
from models.extractors.generic_extractor import GenericExtractor
class ActivationMonitorCallback(PeriodicCallback):
def __init__(self, model_paths, **kwargs):
super().__init__(**kwargs)
self.extractors = [GenericExtractor(model_path=model_path) for model_path in model_paths]
def _before_training(self, model, **kwargs):
for extractor in self.extractors:
extractor.register_hooks(model)
def before_every_accumulation_step(self, model, **kwargs):
for extractor in self.extractors:
extractor.enable_hooks()
def before_every_optim_step(self, **kwargs):
for extractor in self.extractors:
act = extractor.extract()
if isinstance(act, tuple):
act = act[0]
name = extractor.model_path
self.writer.add_scalar(f"act/{name}/norm", act.norm(p=2))
self.writer.add_scalar(f"act/{name}/avgnorm", act.norm(p=2, dim=-1).mean())
self.writer.add_scalar(f"act/{name}/absmax", act.abs().max())
self.writer.add_scalar(f"act/{name}/absmin", act.abs().min())
self.writer.add_scalar(f"act/{name}/mean", act.mean())
if act.numel() > 1:
self.writer.add_scalar(f"act/{name}/std", act.std())
extractor.disable_hooks()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/weight_monitor_callback.py | src/callbacks/monitor_callbacks/weight_monitor_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
class WeightMonitorCallback(PeriodicCallback):
def _track_after_update_step(self, model, **kwargs):
for name, param in model.named_parameters():
if not param.requires_grad:
continue
param_norm = param.norm()
param_absmax = param.abs().max()
param_mean = param.abs().mean()
self.writer.add_scalar(f"weight/{name}/norm", param_norm)
self.writer.add_scalar(f"weight/{name}/absmax", param_absmax)
self.writer.add_scalar(f"weight/{name}/mean", param_mean)
if param.numel() > 1:
param_std = param.std()
self.writer.add_scalar(f"weight/{name}/std", param_std)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/gradient_spike_monitor_callback.py | src/callbacks/monitor_callbacks/gradient_spike_monitor_callback.py | from collections import defaultdict, deque
import torch
from callbacks.base.callback_base import CallbackBase
class GradientSpikeMonitorCallback(CallbackBase):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.verbose = verbose
def _before_training(self, model, **kwargs):
for name, module in model.named_modules():
module.register_full_backward_hook(self.NanMonitorHook(name, logger=self.logger))
class NanMonitorHook:
def __init__(self, name: str, logger):
self.name = name
self.logger = logger
self.num_history_steps = 3
self.history = defaultdict(lambda: deque([], self.num_history_steps))
def _log_and_print(self, msg):
self.logger.info(msg)
print(msg)
def _log(self, module, module_input, module_output, tensor_name):
self.logger.error(f"encountered high gradient magnitude in module {self.name} ({tensor_name})")
self.logger.info(f"parameters:")
for name, param in module.named_parameters():
self._log_and_print(f"{name}.abs().max(): {param.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {param.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {param.mean().item()}")
self._log_and_print(f"{name}.std(): {param.std().item()}")
for i in range(len(module_input)):
tensor = module_input[i].flatten()
tensor = tensor[~torch.isnan(tensor)]
name = f"module_input[{i}]"
self._log_and_print(f"{name}.abs().max(): {tensor.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {tensor.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {tensor.mean().item()}")
self._log_and_print(f"{name}.std(): {tensor.std().item()}")
for i in range(len(module_output)):
tensor = module_output[i].flatten()
tensor = tensor[~torch.isnan(tensor)]
name = f"module_output[{i}]"
self._log_and_print(f"{name}.abs().max(): {tensor.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {tensor.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {tensor.mean().item()}")
self._log_and_print(f"{name}.std(): {tensor.std().item()}")
self.logger.error(f"encountered high gradient magnitude in module {self.name} ({tensor_name})")
exit(0)
def __call__(self, module, grad_input, grad_output):
assert isinstance(grad_input, tuple)
for i in range(len(grad_input)):
if grad_input[i] is None:
continue
if grad_input[i].dtype == torch.long:
continue
key = f"grad_input[{i}]"
history = self.history[key]
mag = grad_input[i].abs().mean()
# check burn-in
if len(history) < self.num_history_steps:
history.append(mag)
continue
# check for outliers
history_mean = torch.stack(list(history)).mean()
if mag > history_mean * 2:
self._log(
module=module,
module_input=grad_input,
module_output=grad_output,
tensor_name=key
)
history.append(mag)
assert isinstance(grad_output, tuple)
for i in range(len(grad_output)):
if grad_output[i] is None:
continue
key = f"grad_output[{i}]"
history = self.history[key]
mag = grad_output[i].abs().mean()
# check burn-in
if len(history) < self.num_history_steps:
history.append(mag)
continue
# check for outliers
history_mean = torch.stack(list(history)).mean()
if mag > history_mean * 2:
self._log(
module=module,
module_input=grad_input,
module_output=grad_output,
tensor_name=key
)
history.append(mag)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/skip_loss_spikes_callback.py | src/callbacks/monitor_callbacks/skip_loss_spikes_callback.py | import torch
from callbacks.base.periodic_callback import PeriodicCallback
from collections import deque
class SkipLossSpikesCallback(PeriodicCallback):
def __init__(self, max_skipped_updates_in_a_row=100, queue_size=50, tolerance_factor=0.2, **kwargs):
super().__init__(**kwargs)
self.max_skipped_updates_in_a_row = max_skipped_updates_in_a_row
self.queue_size = queue_size
self.tolerance_factor = tolerance_factor
self.queue = deque([], maxlen=queue_size)
self.skipped_updates_in_a_row = 0
self.accumulation_queue = []
def state_dict(self):
return dict(
queue=list(self.queue),
skipped_updates_in_a_row=self.skipped_updates_in_a_row,
)
def load_state_dict(self, state_dict):
for item in state_dict["queue"]:
self.queue.append(item)
self.skipped_updates_in_a_row = state_dict["skipped_updates_in_a_row"]
def after_every_backward(self, total_loss, **kwargs):
self.accumulation_queue.append(total_loss.detach())
def before_every_optim_step(self, model, **kwargs):
if self.skipped_updates_in_a_row > self.max_skipped_updates_in_a_row:
raise RuntimeError(f"skipped {self.max_skipped_updates_in_a_row} in a row -> kill")
# average loss over accumulation steps
if len(self.accumulation_queue) > 1:
total_loss = torch.stack(self.accumulation_queue).mean()
else:
total_loss = self.accumulation_queue[0]
self.accumulation_queue.clear()
# check if loss is a spike
if len(self.queue) == self.queue.maxlen:
queue_avg = torch.stack(list(self.queue)).mean()
max_tolerable_loss = queue_avg * (1 + self.tolerance_factor)
if total_loss > max_tolerable_loss:
self.skipped_updates_in_a_row += 1
self.logger.info(
f"{self.update_counter.cur_checkpoint} skipping batch due to high loss "
f"(total_loss={total_loss.item()} max_tolerable_loss={max_tolerable_loss.item()} "
f"tolerance_factor={self.tolerance_factor} skipped_in_a_row={self.skipped_updates_in_a_row})"
)
# set all gradients to None (will skip optim.step and also avoid adamw momentum updates)
for p in model.parameters():
p.grad = None
else:
self.skipped_updates_in_a_row = 0
else:
# burnin phase -> disable skipping
pass
# add to queue
self.queue.append(total_loss)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/skip_nan_loss_callback.py | src/callbacks/monitor_callbacks/skip_nan_loss_callback.py | import torch
from callbacks.base.periodic_callback import PeriodicCallback
from collections import deque
class SkipNanLossCallback(PeriodicCallback):
def __init__(self, max_skipped_updates_in_a_row=50, **kwargs):
super().__init__(**kwargs)
self.max_skipped_updates_in_a_row = max_skipped_updates_in_a_row
self.skipped_updates_in_a_row = 0
self.skip_next_update = False
def state_dict(self):
return dict(skipped_updates_in_a_row=self.skipped_updates_in_a_row)
def load_state_dict(self, state_dict):
self.skipped_updates_in_a_row = state_dict["skipped_updates_in_a_row"]
def after_every_backward(self, total_loss, **kwargs):
if torch.isnan(total_loss):
self.logger.info(f"encountered NaN loss -> skip update")
self.skip_next_update = True
def before_every_optim_step(self, model, **kwargs):
if self.skip_next_update:
if self.skipped_updates_in_a_row > self.max_skipped_updates_in_a_row:
raise RuntimeError(f"skipped {self.max_skipped_updates_in_a_row} in a row due to NaN loss -> exit")
self.skipped_updates_in_a_row += 1
else:
self.skipped_updates_in_a_row = 0
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/nan_monitor_callback.py | src/callbacks/monitor_callbacks/nan_monitor_callback.py | import torch
from callbacks.base.callback_base import CallbackBase
class NanMonitorCallback(CallbackBase):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.verbose = verbose
def _before_training(self, model, **kwargs):
for name, module in model.named_modules():
module.register_forward_hook(self.NanMonitorHook(name, logger=self.logger))
class NanMonitorHook:
def __init__(self, name: str, logger):
self.name = name
self.logger = logger
def _log_and_print(self, msg):
self.logger.info(msg)
print(msg)
def _log(self, module, module_input, module_output, nan_tensor_name):
self.logger.error(f"encountered nan in module {self.name} ({nan_tensor_name})")
self.logger.info(f"parameters:")
for name, param in module.named_parameters():
self._log_and_print(f"{name}.abs().max(): {param.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {param.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {param.mean().item()}")
self._log_and_print(f"{name}.std(): {param.std().item()}")
for i in range(len(module_input)):
tensor = module_input[i].flatten()
tensor = tensor[~torch.isnan(tensor)]
name = f"module_input[{i}]"
self._log_and_print(f"{name}.abs().max(): {tensor.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {tensor.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {tensor.mean().item()}")
self._log_and_print(f"{name}.std(): {tensor.std().item()}")
for i in range(len(module_output)):
tensor = module_output[i].flatten()
tensor = tensor[~torch.isnan(tensor)]
name = f"module_output[{i}]"
self._log_and_print(f"{name}.abs().max(): {tensor.abs().max().item()}")
self._log_and_print(f"{name}.abs().min(): {tensor.abs().min().item()}")
self._log_and_print(f"{name}.mean(): {tensor.mean().item()}")
self._log_and_print(f"{name}.std(): {tensor.std().item()}")
self.logger.error(f"encountered nan in module {self.name} ({nan_tensor_name})")
exit(0)
def __call__(self, module, module_input, module_output):
assert isinstance(module_input, tuple)
for i in range(len(module_input)):
if torch.is_tensor(module_input[i]) and torch.any(torch.isnan(module_input[i])):
self._log(
module=module,
module_input=module_input,
module_output=module_output,
nan_tensor_name=f"module_input[{i}]"
)
if isinstance(module_output, tuple):
for i in range(len(module_output)):
if module_output[i] is None:
continue
assert torch.is_tensor(module_output[i])
if torch.any(torch.isnan(module_output[i])):
self._log(
module=module,
module_input=module_input,
module_output=module_output,
nan_tensor_name=f"module_output[{i}]"
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/__init__.py | src/callbacks/monitor_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/monitor_callbacks/debug_monitor_callback.py | src/callbacks/monitor_callbacks/debug_monitor_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
class DebugMonitorCallback(PeriodicCallback):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.verbose = verbose
def before_every_update(self, model, **kwargs):
for name, param in model.named_parameters():
norm = param.norm().item()
absmax = param.abs().max().item()
mean = param.mean().item()
self.writer.add_scalar(f"param/{name}/norm", norm, logger=self.logger if self.verbose else None)
self.writer.add_scalar(f"param/{name}/absmax", absmax, logger=self.logger if self.verbose else None)
self.writer.add_scalar(f"param/{name}/mean", mean, logger=self.logger if self.verbose else None)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/copy_previous_config_callback.py | src/callbacks/default_callbacks/copy_previous_config_callback.py | from callbacks.base.callback_base import CallbackBase
from initializers.previous_run_initializer import PreviousRunInitializer
from models.base.composite_model_base import CompositeModelBase
from utils.model_utils import get_named_models
class CopyPreviousConfigCallback(CallbackBase):
@staticmethod
def _should_include_key(key):
# exclude irrelevant stuff (e.g. device or dataloader params are irrelevant)
if key in ["stage_name"]:
return False
# dependent on the hardware which produced the checkpoint
if key in ["device", "trainer/accumulation_steps", "trainer/batch_size"]:
return False
if key.startswith("dataloader/") or key.startswith("dist/") or key.startswith("code/"):
return False
return True
def _before_training(self, model, **_):
# collect configs
configs = {}
for model_name, model in get_named_models(model).items():
if isinstance(model, CompositeModelBase):
continue
for initializer in model.initializers:
if not isinstance(initializer, PreviousRunInitializer):
continue
config = self.config_provider.get_config_of_previous_stage(
stage_name=initializer.stage_name,
stage_id=initializer.stage_id,
)
if config is None:
self.logger.info(
f"no config found for initializer of {model_name} (stage_name='{initializer.stage_name}' "
f"stage_id={initializer.stage_id}) -> don't copy anything"
)
continue
if initializer.stage_name in configs:
self.logger.info(
f"duplicate stage_name when copying configs from {PreviousRunInitializer.__name__} "
"-> using first config"
)
if config != configs[initializer.stage_name]:
self.logger.warning(f"configs are not the same -> only first configs is copied")
continue
configs[initializer.stage_name] = config
# add to config
for previous_stage_name, config in configs.items():
# check validity of previous_stage_name
if previous_stage_name in self.config_provider:
self.logger.warning(f"'{previous_stage_name}' already exists in config_provider -> skip copying")
continue
# filter unnecessary keys
config = {k: v for k, v in config.items() if self._should_include_key(k)}
self.config_provider[previous_stage_name] = config
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/copy_previous_summary_callback.py | src/callbacks/default_callbacks/copy_previous_summary_callback.py | from callbacks.base.callback_base import CallbackBase
from initializers.previous_run_initializer import PreviousRunInitializer
from models.base.composite_model_base import CompositeModelBase
from utils.model_utils import get_named_models
class CopyPreviousSummaryCallback(CallbackBase):
@staticmethod
def _should_include_key(key):
if key.startswith("profiler/") or key.startswith("profiling/") or key.startswith("lr/"):
return False
return True
def _before_training(self, model, **_):
# collect summaries
summaries = {}
for model_name, model in get_named_models(model).items():
if isinstance(model, CompositeModelBase):
continue
for initializer in model.initializers:
if not isinstance(initializer, PreviousRunInitializer):
continue
summary = self.summary_provider.get_summary_of_previous_stage(
stage_name=initializer.stage_name,
stage_id=initializer.stage_id,
)
if summary is None:
self.logger.info(
f"no summary found for initializer of {model_name} (stage_name='{initializer.stage_name}' "
f"stage_id={initializer.stage_id}) -> don't copy anything"
)
continue
if initializer.stage_name in summaries:
self.logger.info(
f"duplicate stage_name when copying summaries from {PreviousRunInitializer.__name__} "
"-> using first summary"
)
if summary != summaries[initializer.stage_name]:
self.logger.warning(f"summaries are not the same -> only first summary is copied")
continue
summaries[initializer.stage_name] = summary
# add to summary
for previous_stage_name, summary in summaries.items():
# filter unnecessary keys
summary = {k: v for k, v in summary.items() if self._should_include_key(k)}
for key, value in summary.items():
new_key = f"{previous_stage_name}/{key}"
if new_key in self.summary_provider:
self.logger.warning(f"'{new_key}' already exists in summary_provider -> skip")
continue
self.summary_provider[new_key] = value
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/eta_callback.py | src/callbacks/default_callbacks/eta_callback.py | import logging
from datetime import datetime, timedelta
import numpy as np
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.config import is_rank0
from utils.formatting_util import short_number_str, seconds_to_duration_str
class EtaCallback(PeriodicCallback):
class LoggerWasCalledHandler(logging.Handler):
def __init__(self):
super().__init__()
self.was_called = False
def emit(self, _):
self.was_called = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.total_time = 0.
self.time_since_last_log = 0.
self.handler = self.LoggerWasCalledHandler()
self.epoch_format = None
self.update_format = None
self._start_time = None
def _before_training(self, **kwargs):
assert is_rank0(), "only use EtaCallback on rank0 process"
self.epoch_format = f"{int(np.log10(max(1, self.update_counter.end_checkpoint.epoch))) + 1}d"
self.update_format = f"{int(np.log10(self.update_counter.end_checkpoint.update)) + 1}d"
self.every_n_epochs_format = f"{int(np.log10(self.every_n_epochs)) + 1}d" if self.every_n_epochs else None
self.every_n_updates_format = f"{int(np.log10(self.every_n_updates)) + 1}d" if self.every_n_updates else None
if self.every_n_epochs:
self.updates_per_log_interval_format = f"{int(np.log10(self.update_counter.updates_per_epoch)) + 1}d"
elif self.every_n_updates:
self.updates_per_log_interval_format = self.every_n_updates_format
elif self.every_n_samples:
self.updates_per_every_n_samples = np.ceil(self.every_n_samples / self.update_counter.effective_batch_size)
self.updates_per_log_interval_format = f"{int(np.log10(self.updates_per_every_n_samples)) + 1}d"
else:
self.updates_per_log_interval_format = None
self._start_time = datetime.now()
def _track_after_update_step(self, times, **kwargs):
cur_epoch = self.update_counter.cur_checkpoint.epoch - self.update_counter.start_checkpoint.epoch
cur_update = self.update_counter.cur_checkpoint.update - self.update_counter.start_checkpoint.update
cur_sample = self.update_counter.cur_checkpoint.sample - self.update_counter.start_checkpoint.sample
now = datetime.now()
# reset time_since_last_log on new log interval
if self.should_log_after_epoch(self.update_counter.cur_checkpoint) and self.update_counter.is_full_epoch:
self.time_since_last_log = 0.
if self.should_log_after_update(self.update_counter.cur_checkpoint):
self.time_since_last_log = 0.
if self.should_log_after_sample(self.update_counter.cur_checkpoint, self.update_counter.effective_batch_size):
self.time_since_last_log = 0.
if self.every_n_epochs:
last_epoch = self.every_n_epochs * (cur_epoch // self.every_n_epochs)
updates_at_last_log = last_epoch * self.update_counter.updates_per_epoch
updates_since_last_log = cur_update - updates_at_last_log
updates_per_log_interval = self.every_n_epochs * self.update_counter.updates_per_epoch
if updates_since_last_log == 0:
updates_since_last_log = updates_per_log_interval
elif self.every_n_updates:
updates_since_last_log = cur_update % self.every_n_updates
updates_per_log_interval = self.every_n_updates
elif self.every_n_samples:
samples_since_last_log = cur_sample % self.every_n_samples
samples_at_last_log = cur_sample - samples_since_last_log
updates_at_last_log = samples_at_last_log // self.update_counter.effective_batch_size
superflous_samples_at_last_log = samples_at_last_log % self.update_counter.effective_batch_size
updates_since_last_log = cur_update - updates_at_last_log
samples_for_cur_log_interval = self.every_n_samples - superflous_samples_at_last_log
updates_per_log_interval = int(
np.ceil(samples_for_cur_log_interval / self.update_counter.effective_batch_size)
)
else:
updates_since_last_log = None
updates_per_log_interval = None
# add time
time_increment = times["data_time"] + times["update_time"]
self.total_time += time_increment
self.time_since_last_log += time_increment
average_update_time = self.total_time / cur_update
# training ETA
progress = (
(self.update_counter.cur_checkpoint.update - self.update_counter.start_checkpoint.update) /
(self.update_counter.end_checkpoint.update - self.update_counter.start_checkpoint.update)
)
past_training_time = now - self._start_time
estimated_duration = past_training_time / progress
training_eta = self._start_time + estimated_duration
remaining_training_time = training_eta - now
logstr = (
f"E {format(cur_epoch, self.epoch_format)}/{self.update_counter.end_checkpoint.epoch} "
f"U {format(cur_update, self.update_format)}/{self.update_counter.end_checkpoint.update} "
f"S {short_number_str(cur_sample):>6}/"
f"{short_number_str(self.update_counter.end_checkpoint.sample)} | "
)
# log interval ETA
if self.updates_per_log_interval_format is not None:
updates_till_next_log = updates_per_log_interval - updates_since_last_log
time_till_next_log = timedelta(seconds=updates_till_next_log * average_update_time)
next_log_eta = now + time_till_next_log
# convert to datetime for formatting
past_next_log_time = datetime.utcfromtimestamp(self.time_since_last_log)
time_till_next_log = datetime.utcfromtimestamp(time_till_next_log.total_seconds())
logstr += (
f"next_log {format(updates_since_last_log, self.updates_per_log_interval_format)}/"
f"{format(updates_per_log_interval, self.updates_per_log_interval_format)} | "
f"next_log_eta {next_log_eta.strftime('%H:%M:%S')} "
f"({time_till_next_log.strftime('%M:%S')}->{past_next_log_time.strftime('%M:%S')}) | "
)
logstr += (
f"training_eta {training_eta.strftime('%d-%H:%M:%S')} "
f"({seconds_to_duration_str(remaining_training_time.total_seconds())}->"
f"{seconds_to_duration_str(past_training_time.total_seconds())}) | "
f"avg_update {average_update_time:.2f}s"
)
if self.handler.was_called:
print(logstr)
self.handler.was_called = False
else:
print(logstr, end="\r")
def _periodic_callback(self, **_):
print()
def _after_training(self, **_):
logging.getLogger().removeHandler(self.handler)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/online_loss_callback.py | src/callbacks/default_callbacks/online_loss_callback.py | from collections import defaultdict
import numpy as np
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.gather import all_reduce_mean_grad
class OnlineLossCallback(PeriodicCallback):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.verbose = verbose
self.tracked_losses = defaultdict(list)
def _track_after_accumulation_step(self, losses, **kwargs):
for name, loss in losses.items():
self.tracked_losses[name].append(loss.item())
def _periodic_callback(self, **_):
for name, tracked_loss in self.tracked_losses.items():
mean_loss = np.mean(tracked_loss)
mean_loss = all_reduce_mean_grad(mean_loss)
self.writer.add_scalar(
key=f"loss/online/{name}/{self.to_short_interval_string()}",
value=mean_loss,
logger=self.logger if self.verbose else None,
)
self.tracked_losses.clear()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/dataset_stats_callback.py | src/callbacks/default_callbacks/dataset_stats_callback.py | import torch
from kappadata.utils.class_counts import get_class_counts
from kappadata.wrappers import ModeWrapper, LabelSmoothingWrapper
from callbacks.base.callback_base import CallbackBase
class DatasetStatsCallback(CallbackBase):
def _before_training(self, **_):
for dataset_key, dataset in self.data_container.datasets.items():
self._log_size(dataset_key, dataset)
def _log_size(self, dataset_key, dataset):
self.summary_provider[f"ds_stats/{dataset_key}/len"] = len(dataset)
self.logger.info(f"{dataset_key}: {len(dataset)} samples")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/train_time_callback.py | src/callbacks/default_callbacks/train_time_callback.py | import numpy as np
from callbacks.base.periodic_callback import PeriodicCallback
from distributed.gather import all_gather_nograd
from utils.formatting_util import list_to_string
class TrainTimeCallback(PeriodicCallback):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.train_data_times = []
self.update_times = []
self.total_train_data_time = 0.
self.total_update_time = 0.
def _track_after_update_step(self, **kwargs):
times = kwargs["times"]
self.train_data_times.append(times["data_time"])
self.update_times.append(times["update_time"])
def _periodic_callback(self, interval_type, **_):
sum_data_time = np.sum(self.train_data_times)
sum_update_time = np.sum(self.update_times)
mean_data_time = sum_data_time / len(self.train_data_times)
mean_update_time = sum_update_time / len(self.update_times)
self.total_train_data_time += sum_data_time
self.total_update_time += sum_update_time
self.train_data_times.clear()
self.update_times.clear()
# gather for all devices
mean_data_times = all_gather_nograd(mean_data_time)
mean_update_times = all_gather_nograd(mean_update_time)
for i, (mean_data_time, mean_update_time) in enumerate(zip(mean_data_times, mean_update_times)):
# ideally this would have a key like system/<key> but wandb doesn't like that
self.writer.add_scalar(f"profiling/train_data_time/{i}/{interval_type}", mean_data_time)
self.writer.add_scalar(f"profiling/train_update_time/{i}/{interval_type}", mean_update_time)
self.logger.info(f"data={list_to_string(mean_data_times)} update={list_to_string(mean_update_times)}")
def _after_training(self, **_):
total_data_time = all_gather_nograd(self.total_train_data_time)
total_update_time = all_gather_nograd(self.total_update_time)
self.logger.info("------------------")
self.logger.info(f"total_train_data_time: {list_to_string(total_data_time)}")
self.logger.info(f"total_update_time: {list_to_string(total_update_time)}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/copy_previous_entries_callback.py | src/callbacks/default_callbacks/copy_previous_entries_callback.py | import yaml
from callbacks.base.callback_base import CallbackBase
from initializers.previous_run_initializer import PreviousRunInitializer
from models.base.composite_model_base import CompositeModelBase
from utils.checkpoint import Checkpoint
from utils.model_utils import get_named_models
class CopyPreviousEntriesCallback(CallbackBase):
@staticmethod
def _should_include_key(key):
if key.startswith("profiling/"):
return False
return True
def _before_training(self, model, trainer, **_):
# collect entries
all_entries = {}
for model_name, model in get_named_models(model).items():
if isinstance(model, CompositeModelBase):
continue
for initializer in model.initializers:
if not isinstance(initializer, PreviousRunInitializer):
continue
entries_uri = self.path_provider.get_primitive_entries_uri(
stage_name=initializer.stage_name,
stage_id=initializer.stage_id,
)
if entries_uri is None:
self.logger.info(
f"no entries found for initializer of {model_name} (stage_name='{initializer.stage_name}' "
f"stage_id={initializer.stage_id}) -> don't copy anything"
)
continue
if not entries_uri.exists():
self.logger.info(f"entries uri {entries_uri.as_posix()} doesn't exist -> don't copy anything")
continue
with open(entries_uri) as f:
entries = yaml.safe_load(f)
if initializer.stage_name in all_entries:
self.logger.info(
f"duplicate stage_name when copying entries from {PreviousRunInitializer.__name__} "
"-> using first entries"
)
if entries != all_entries[initializer.stage_name]:
self.logger.warning(f"entries are not the same -> only first entries is copied")
continue
all_entries[initializer.stage_name] = entries
# add to config
for stage_name, entries in all_entries.items():
epochs = entries.pop("epoch")
updates = list(epochs.keys())
samples = entries.pop("sample")
for update in updates:
entry = {
f"epoch_{stage_name}": epochs[update],
f"update_{stage_name}": update,
f"sample_{stage_name}": samples[update],
}
entry.update({
f"{stage_name}/{key}": value[update]
for key, value in entries.items()
if self._should_include_key(key)
})
self.writer.add_previous_entry(entry)
if trainer.initializer is not None:
entries_uri = self.path_provider.get_primitive_entries_uri(
stage_name=trainer.initializer.stage_name,
stage_id=trainer.initializer.stage_id,
)
if entries_uri is None:
self.logger.info(
f"no entries found for trainer.initializer (stage_name='{trainer.initializer.stage_name}' "
f"stage_id={trainer.initializer.stage_id}) -> don't copy anything"
)
return
if not entries_uri.exists():
self.logger.info(f"entries uri {entries_uri.as_posix()} doesn't exist -> don't copy anything")
return
with open(entries_uri) as f:
entries = yaml.safe_load(f)
epochs = entries.pop("epoch")
updates = list(epochs.keys())
samples = entries.pop("sample")
if samples[updates[0]] // updates[0] != trainer.effective_batch_size:
self.logger.warning(
f"found different effective_batch_size when resuming trainer "
f"(current={trainer.effective_batch_size} old={samples[updates[0]]}) "
f"-> don't copy entries"
)
return
for update in updates:
ckpt = Checkpoint(epoch=epochs[update], update=update, sample=samples[update])
if ckpt > trainer.start_checkpoint:
break
entry = {
f"epoch": epochs[update],
f"update": update,
f"sample": samples[update],
}
entry.update({
key: value[update]
for key, value in entries.items()
if self._should_include_key(key)
})
self.writer.add_previous_entry(entry)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/lr_callback.py | src/callbacks/default_callbacks/lr_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
from models.base.composite_model_base import CompositeModelBase
from optimizers.interleaved_optimizer import InterleavedOptimizer
from utils.model_utils import get_named_models
class LrCallback(PeriodicCallback):
def should_log_after_update(self, checkpoint):
if checkpoint.update == 1:
return True
return super().should_log_after_update(checkpoint)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, **_):
for model_name, model in get_named_models(model).items():
if isinstance(model, CompositeModelBase) or model.optim is None:
continue
if isinstance(model.optim, InterleavedOptimizer):
optim = model.optim.get_optim_for_previous_step()
else:
optim = model.optim
for param_group in optim.torch_optim.param_groups:
group_name = f"/{param_group['name']}" if "name" in param_group else ""
if optim.schedule is not None:
lr = param_group["lr"]
self.writer.add_scalar(f"optim/lr/{model_name}{group_name}", lr)
if optim.weight_decay_schedule is not None:
wd = param_group["weight_decay"]
self.writer.add_scalar(f"optim/wd/{model_name}{group_name}", wd)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/progress_callback.py | src/callbacks/default_callbacks/progress_callback.py | from datetime import datetime
from callbacks.base.periodic_callback import PeriodicCallback
from utils.formatting_util import seconds_to_duration_str
class ProgressCallback(PeriodicCallback):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._start_time = None
self._last_log_time = None
self._last_log_samples = None
def _before_training(self, **kwargs):
self._start_time = self._last_log_time = datetime.now()
self._last_log_samples = 0
# noinspection PyMethodOverriding
def _periodic_callback(self, trainer, interval_type, **_):
if trainer.end_checkpoint.epoch is not None:
total_updates = trainer.end_checkpoint.epoch * self.update_counter.updates_per_epoch
elif trainer.end_checkpoint.update is not None:
total_updates = trainer.end_checkpoint.update
elif trainer.end_checkpoint.sample is not None:
total_updates = self.update_counter.cur_checkpoint.sample // self.update_counter.effective_batch_size
else:
raise NotImplementedError
self.logger.info("------------------")
if interval_type == "epoch":
self.logger.info(
f"Epoch {self.update_counter.cur_checkpoint.epoch}/{trainer.end_checkpoint.epoch} "
f"({self.update_counter.cur_checkpoint})"
)
elif interval_type == "update":
self.logger.info(
f"Update {self.update_counter.cur_checkpoint.update}/{total_updates} "
f"({self.update_counter.cur_checkpoint})"
)
elif interval_type == "sample":
self.logger.info(
f"Sample {self.update_counter.cur_checkpoint.sample}/{trainer.end_checkpoint.sample} "
f"({self.update_counter.cur_checkpoint})"
)
else:
raise NotImplementedError
progress = self.update_counter.cur_checkpoint.update / total_updates
now = datetime.now()
estimated_duration = (now - self._start_time) / progress
seconds_since_last_log = (now - self._last_log_time).total_seconds()
samples_since_last_log = self.update_counter.cur_checkpoint.sample - self._last_log_samples
updates_since_last_log = samples_since_last_log // self.update_counter.effective_batch_size
self.logger.info(
f"ETA: {(self._start_time + estimated_duration).strftime('%m.%d %H.%M.%S')} "
f"estimated_duration: {seconds_to_duration_str(estimated_duration.total_seconds())} "
f"time_since_last_log: {seconds_to_duration_str(seconds_since_last_log)} "
f"time_per_update: {seconds_to_duration_str(seconds_since_last_log / updates_since_last_log)} "
)
self.writer.add_scalar(f"profiling/time_since_last_log/{interval_type}", seconds_since_last_log)
self._last_log_time = now
self._last_log_samples = self.update_counter.cur_checkpoint.sample
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/__init__.py | src/callbacks/default_callbacks/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/freezer_callback.py | src/callbacks/default_callbacks/freezer_callback.py | from callbacks.base.periodic_callback import PeriodicCallback
from models.base.composite_model_base import CompositeModelBase
from utils.model_utils import get_named_models
class FreezerCallback(PeriodicCallback):
def should_log_after_update(self, checkpoint):
if checkpoint.update == 1:
return True
return super().should_log_after_update(checkpoint)
# noinspection PyMethodOverriding
def _periodic_callback(self, model, **_):
for model_name, model in get_named_models(model).items():
if isinstance(model, CompositeModelBase) or model.freezers is None:
continue
for freezer in model.freezers:
if freezer.schedule is None:
continue
self.writer.add_scalar(f"freezers/{model_name}/{freezer}/is_frozen", not freezer.requires_grad)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/callbacks/default_callbacks/param_count_callback.py | src/callbacks/default_callbacks/param_count_callback.py | import numpy as np
from callbacks.base.callback_base import CallbackBase
from models.base.composite_model_base import CompositeModelBase
from utils.model_utils import get_trainable_param_count, get_frozen_param_count
from utils.naming_util import join_names, snake_type_name
class ParamCountCallback(CallbackBase):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.verbose = verbose
@staticmethod
def _get_param_counts(model, trace=None):
if isinstance(model, CompositeModelBase):
result = []
immediate_children = []
for name, submodel in model.submodels.items():
subresult = ParamCountCallback._get_param_counts(submodel, trace=join_names(trace, name))
result += subresult
immediate_children.append(subresult[0])
trainable_sum = sum(count for _, count, _ in immediate_children)
frozen_sum = sum(count for _, _, count in immediate_children)
return [(trace, trainable_sum, frozen_sum)] + result
else:
return [
(
join_names(trace, snake_type_name(model)),
get_trainable_param_count(model),
get_frozen_param_count(model),
)
]
def _before_training(self, model, **_):
param_counts = self._get_param_counts(model)
_, total_trainable, total_frozen = param_counts[0]
max_trainable_digits = int(np.log10(total_trainable)) + 1 if total_trainable > 0 else 1
max_frozen_digits = int(np.log10(total_frozen)) + 1 if total_frozen > 0 else 1
# add space for thousand seperators
max_trainable_digits += int(max_trainable_digits / 3)
max_frozen_digits += int(max_frozen_digits / 3)
# generate format strings
tformat = f">{max_trainable_digits},"
fformat = f">{max_frozen_digits},"
self.logger.info(f"parameter counts (trainable | frozen)")
new_summary_entries = {}
for name, tcount, fcount in param_counts:
name = name or "total"
self.logger.info(f"{format(tcount, tformat)} | {format(fcount, fformat)} | {name}")
new_summary_entries[f"param_count/{name}/trainable"] = tcount
new_summary_entries[f"param_count/{name}/frozen"] = fcount
self.summary_provider.update(new_summary_entries)
# detailed number of params
if self.verbose:
self.logger.info("detailed parameters")
for name, param in model.named_parameters():
self.logger.info(f"{np.prod(param.shape):>10,} {'train' if param.requires_grad else 'frozen'} {name}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/dataset_config_provider.py | src/providers/dataset_config_provider.py | import platform
from pathlib import Path
class DatasetConfigProvider:
def __init__(
self,
global_dataset_paths,
local_dataset_path=None,
data_source_modes=None,
):
self.global_dataset_paths = global_dataset_paths
self.local_dataset_path = local_dataset_path
self.data_source_modes = data_source_modes
def get_global_dataset_path(self, dataset_identifier):
path = self.global_dataset_paths[dataset_identifier]
path = Path(path).expanduser()
# enforce path exists (e.g. mnist/cifar are downloaded automatically)
assert path.exists(), f"path to '{dataset_identifier}' doesn't exist ({path})"
return path
def get_local_dataset_path(self):
if self.local_dataset_path is None:
return None
path = Path(self.local_dataset_path).expanduser()
assert path.exists(), f"local_dataset_path '{path}' doesn't exist"
return path
def get_data_source_mode(self, dataset_identifier):
if self.get_local_dataset_path() is None:
return "global"
if self.data_source_modes is None or dataset_identifier not in self.data_source_modes:
return None
data_source_mode = self.data_source_modes[dataset_identifier]
assert data_source_mode in ["global", "local"], \
f'data_source_mode {data_source_mode} not in ["global", "local"]'
return data_source_mode
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/__init__.py | src/providers/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/path_provider.py | src/providers/path_provider.py | from pathlib import Path
class PathProvider:
def __init__(
self,
output_path: Path,
model_path: Path,
stage_name: str,
stage_id: str,
temp_path: Path = None,
):
self.output_path = output_path
self.model_path = model_path
self.stage_name = stage_name
self.stage_id = stage_id
self._temp_path = temp_path
@staticmethod
def _mkdir(path: Path) -> Path:
path.mkdir(exist_ok=True, parents=True)
return path
def get_stage_output_path(self, stage_name: str, stage_id: str, mkdir=True) -> Path:
stage_output_path = self.output_path / stage_name / stage_id
return self._mkdir(stage_output_path) if mkdir else stage_output_path
@property
def stage_output_path(self) -> Path:
return self.get_stage_output_path(stage_name=self.stage_name, stage_id=self.stage_id)
@property
def stage_output_path_exists(self) -> bool:
return self.get_stage_output_path(stage_name=self.stage_name, stage_id=self.stage_id, mkdir=False).exists()
@property
def logfile_uri(self) -> Path:
return self.stage_output_path / "log.txt"
def get_primitive_output_path(self, stage_name: str, stage_id: str) -> Path:
stage_output_path = self.get_stage_output_path(stage_name=stage_name, stage_id=stage_id)
return self._mkdir(stage_output_path / "primitive")
@property
def primitive_output_path(self) -> Path:
return self.get_primitive_output_path(stage_name=self.stage_name, stage_id=self.stage_id)
def get_primitive_config_uri(self, stage_name: str, stage_id: str) -> Path:
return self.get_primitive_output_path(stage_name=stage_name, stage_id=stage_id) / "config.yaml"
@property
def primitive_config_uri(self) -> Path:
return self.get_primitive_config_uri(stage_name=self.stage_name, stage_id=self.stage_id)
def get_primitive_entries_uri(self, stage_name: str, stage_id: str) -> Path:
return self.get_primitive_output_path(stage_name=stage_name, stage_id=stage_id) / "entries.yaml"
@property
def primitive_entries_uri(self) -> Path:
return self.get_primitive_entries_uri(stage_name=self.stage_name, stage_id=self.stage_id)
def get_primitive_summary_uri(self, stage_name: str, stage_id: str) -> Path:
return self.get_primitive_output_path(stage_name=stage_name, stage_id=stage_id) / "summary.yaml"
@property
def primitive_summary_uri(self) -> Path:
return self.get_primitive_summary_uri(stage_name=self.stage_name, stage_id=self.stage_id)
def get_stage_checkpoint_path(self, stage_name: str, stage_id: str):
return self._mkdir(self.get_stage_output_path(stage_name=stage_name, stage_id=stage_id) / "checkpoints")
@property
def checkpoint_path(self):
return self.get_stage_checkpoint_path(stage_name=self.stage_name, stage_id=self.stage_id)
def get_temp_path(self):
self._temp_path.mkdir(exist_ok=True)
return self._temp_path
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/wandb_config_provider.py | src/providers/config_providers/wandb_config_provider.py | import wandb
from .base.config_provider_base import ConfigProviderBase
from .primitive_config_provider import PrimitiveConfigProvider
from ..path_provider import PathProvider
class WandbConfigProvider(ConfigProviderBase):
def __init__(self, path_provider: PathProvider):
super().__init__()
self.primitive_config_provider = PrimitiveConfigProvider(path_provider=path_provider)
def update(self, *args, **kwargs):
wandb.config.update(*args, **kwargs)
self.primitive_config_provider.update(*args, **kwargs)
def __setitem__(self, key, value):
wandb.config[key] = value
self.primitive_config_provider[key] = value
def __contains__(self, key):
return key in self.primitive_config_provider
def get_config_of_previous_stage(self, stage_name, stage_id):
return self.primitive_config_provider.get_config_of_previous_stage(stage_name=stage_name, stage_id=stage_id)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/primitive_config_provider.py | src/providers/config_providers/primitive_config_provider.py | import yaml
from .base.config_provider_base import ConfigProviderBase
from ..path_provider import PathProvider
class PrimitiveConfigProvider(ConfigProviderBase):
def __init__(self, path_provider: PathProvider):
super().__init__()
self.path_provider = path_provider
self.config = {}
def update(self, *args, **kwargs):
self.config.update(*args, **kwargs)
self._save_config_as_yaml()
def __setitem__(self, key, value):
self.config[key] = value
self._save_config_as_yaml()
def __contains__(self, key):
return key in self.config
def get_config_of_previous_stage(self, stage_name, stage_id):
config_uri = self.path_provider.get_primitive_config_uri(stage_name=stage_name, stage_id=stage_id)
if not config_uri.exists():
return None
with open(config_uri) as f:
return yaml.safe_load(f)
def _save_config_as_yaml(self):
with open(self.path_provider.primitive_config_uri, "w") as f:
yaml.safe_dump(dict(self.config.items()), f)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/__init__.py | src/providers/config_providers/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/noop_config_provider.py | src/providers/config_providers/noop_config_provider.py | from .base.config_provider_base import ConfigProviderBase
class NoopConfigProvider(ConfigProviderBase):
def update(self, *args, **kwargs):
pass
def __setitem__(self, key, value):
pass
def __contains__(self, key):
return False
def get_config_of_previous_stage(self, stage_name, stage_id):
return {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/base/config_provider_base.py | src/providers/config_providers/base/config_provider_base.py | class ConfigProviderBase:
def update(self, *args, **kwargs):
raise NotImplementedError
def __setitem__(self, key, value):
raise NotImplementedError
def __contains__(self, key):
raise NotImplementedError
def get_config_of_previous_stage(self, stage_name, stage_id):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/config_providers/base/__init__.py | src/providers/config_providers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/wandb_summary_provider.py | src/providers/summary_providers/wandb_summary_provider.py | import wandb
from .base.summary_provider_base import SummaryProviderBase
from .primitive_summary_provider import PrimitiveSummaryProvider
from ..path_provider import PathProvider
class WandbSummaryProvider(SummaryProviderBase):
def __init__(self, path_provider: PathProvider):
super().__init__()
self.primitive_summary_provider = PrimitiveSummaryProvider(path_provider=path_provider)
def update(self, *args, **kwargs):
wandb.run.summary.update(*args, **kwargs)
self.primitive_summary_provider.update(*args, **kwargs)
def __setitem__(self, key, value):
wandb.run.summary[key] = value
self.primitive_summary_provider[key] = value
def __getitem__(self, key):
return self.primitive_summary_provider[key]
def __contains__(self, key):
return key in self.primitive_summary_provider
def keys(self):
return self.primitive_summary_provider.keys()
def get_summary_of_previous_stage(self, stage_name, stage_id):
return self.primitive_summary_provider.get_summary_of_previous_stage(stage_name=stage_name, stage_id=stage_id)
def flush(self):
self.primitive_summary_provider.flush()
def summarize_logvalues(self):
minmax_dict = self.primitive_summary_provider.summarize_logvalues()
self.logger.info(f"pushing summarized logvalues to wandb")
if minmax_dict is not None:
wandb.run.summary.update(minmax_dict)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.