id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
155,372 | from dataclasses import dataclass
from functools import partial, wraps
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, cast
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torchmetrics import Metric
from typing_extensions import TypedDict, override
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0, _TORCH_GREATER_EQUAL_2_0
from lightning.pytorch.utilities.data import extract_batch_size
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_1_0_0
from lightning.pytorch.utilities.memory import recursive_detach
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_warn
from lightning.pytorch.utilities.warnings import PossibleUserWarning
The provided code snippet includes necessary dependencies for implementing the `_get_default_dtype` function. Write a Python function `def _get_default_dtype() -> torch.dtype` to solve the following problem:
The default dtype for new tensors, but no lower than float32.
Here is the function:
def _get_default_dtype() -> torch.dtype:
"""The default dtype for new tensors, but no lower than float32."""
dtype = torch.get_default_dtype()
return dtype if dtype in (torch.float32, torch.float64) else torch.float32 | The default dtype for new tensors, but no lower than float32. |
155,373 | import lightning.pytorch as pl
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.trainer.states import TrainerFn
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
def __verify_train_val_loop_configuration(trainer: "pl.Trainer", model: "pl.LightningModule") -> None:
# verify minimum training requirements
has_training_step = is_overridden("training_step", model)
if not has_training_step:
raise MisconfigurationException(
"No `training_step()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
has_optimizers = is_overridden("configure_optimizers", model)
if not has_optimizers:
raise MisconfigurationException(
"No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
# verify minimum validation requirements
has_val_loader = trainer.fit_loop.epoch_loop.val_loop._data_source.is_defined()
has_val_step = is_overridden("validation_step", model)
if has_val_loader and not has_val_step:
rank_zero_warn("You passed in a `val_dataloader` but have no `validation_step`. Skipping val loop.")
if has_val_step and not has_val_loader:
rank_zero_warn(
"You defined a `validation_step` but have no `val_dataloader`. Skipping val loop.",
category=PossibleUserWarning,
)
# check legacy hooks are not present
if callable(getattr(model, "training_epoch_end", None)):
raise NotImplementedError(
f"Support for `training_epoch_end` has been removed in v2.0.0. `{type(model).__name__}` implements this"
" method. You can use the `on_train_epoch_end` hook instead. To access outputs, save them in-memory as"
" instance attributes."
" You can find migration examples in https://github.com/Lightning-AI/lightning/pull/16520."
)
if callable(getattr(model, "validation_epoch_end", None)):
raise NotImplementedError(
f"Support for `validation_epoch_end` has been removed in v2.0.0. `{type(model).__name__}` implements this"
" method. You can use the `on_validation_epoch_end` hook instead. To access outputs, save them in-memory as"
" instance attributes."
" You can find migration examples in https://github.com/Lightning-AI/lightning/pull/16520."
)
def __verify_eval_loop_configuration(model: "pl.LightningModule", stage: str) -> None:
step_name = "validation_step" if stage == "val" else f"{stage}_step"
has_step = is_overridden(step_name, model)
# predict_step is not required to be overridden
if stage == "predict":
if model.predict_step is None:
raise MisconfigurationException("`predict_step` cannot be None to run `Trainer.predict`")
if not has_step and not is_overridden("forward", model):
raise MisconfigurationException("`Trainer.predict` requires `forward` method to run.")
else:
# verify minimum evaluation requirements
if not has_step:
trainer_method = "validate" if stage == "val" else stage
raise MisconfigurationException(f"No `{step_name}()` method defined to run `Trainer.{trainer_method}`.")
# check legacy hooks are not present
epoch_end_name = "validation_epoch_end" if stage == "val" else "test_epoch_end"
if callable(getattr(model, epoch_end_name, None)):
raise NotImplementedError(
f"Support for `{epoch_end_name}` has been removed in v2.0.0. `{type(model).__name__}` implements this"
f" method. You can use the `on_{epoch_end_name}` hook instead. To access outputs, save them in-memory"
" as instance attributes."
" You can find migration examples in https://github.com/Lightning-AI/lightning/pull/16520."
)
def __verify_manual_optimization_support(trainer: "pl.Trainer", model: "pl.LightningModule") -> None:
if model.automatic_optimization:
return
if trainer.gradient_clip_val is not None and trainer.gradient_clip_val > 0:
raise MisconfigurationException(
"Automatic gradient clipping is not supported for manual optimization."
f" Remove `Trainer(gradient_clip_val={trainer.gradient_clip_val})`"
" or switch to automatic optimization."
)
if trainer.accumulate_grad_batches != 1:
raise MisconfigurationException(
"Automatic gradient accumulation is not supported for manual optimization."
f" Remove `Trainer(accumulate_grad_batches={trainer.accumulate_grad_batches})`"
" or switch to automatic optimization."
)
def __warn_dataloader_iter_limitations(model: "pl.LightningModule") -> None:
"""Check if `dataloader_iter is enabled`."""
if any(
is_param_in_hook_signature(step_fn, "dataloader_iter", explicit=True)
for step_fn in (model.training_step, model.validation_step, model.predict_step, model.test_step)
if step_fn is not None
):
rank_zero_warn(
"You are using the `dataloader_iter` step flavor. If you consume the iterator more than once per step, the"
" `batch_idx` argument in any hook that takes it will not match with the batch index of the last batch"
" consumed. This might have unforeseen effects on callbacks or code that expects to get the correct index."
" This will also not work well with gradient accumulation. This feature is very experimental and subject to"
" change. Here be dragons.",
category=PossibleUserWarning,
)
def __verify_configure_model_configuration(model: "pl.LightningModule") -> None:
if is_overridden("configure_sharded_model", model):
name = type(model).__name__
if is_overridden("configure_model", model):
raise RuntimeError(
f"Both `{name}.configure_model`, and `{name}.configure_sharded_model` are overridden. The latter is"
f" deprecated and it should be replaced with the former."
)
rank_zero_deprecation(
f"You have overridden `{name}.configure_sharded_model` which is deprecated. Please override the"
" `configure_model` hook instead. Instantiation with the newer hook will be created on the device right"
" away and have the right data type depending on the precision setting in the Trainer."
)
class TrainerFn(LightningEnum):
"""Enum for the user-facing functions of the :class:`~lightning.pytorch.trainer.trainer.Trainer` such as
:meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` and
:meth:`~lightning.pytorch.trainer.trainer.Trainer.test`."""
FITTING = "fit"
VALIDATING = "validate"
TESTING = "test"
PREDICTING = "predict"
The provided code snippet includes necessary dependencies for implementing the `_verify_loop_configurations` function. Write a Python function `def _verify_loop_configurations(trainer: "pl.Trainer") -> None` to solve the following problem:
r"""Checks that the model is configured correctly before the run is started. Args: trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration.
Here is the function:
def _verify_loop_configurations(trainer: "pl.Trainer") -> None:
r"""Checks that the model is configured correctly before the run is started.
Args:
trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration.
"""
model = trainer.lightning_module
if trainer.state.fn is None:
raise ValueError("Unexpected: Trainer state fn must be set before validating loop configuration.")
if trainer.state.fn == TrainerFn.FITTING:
__verify_train_val_loop_configuration(trainer, model)
__verify_manual_optimization_support(trainer, model)
elif trainer.state.fn == TrainerFn.VALIDATING:
__verify_eval_loop_configuration(model, "val")
elif trainer.state.fn == TrainerFn.TESTING:
__verify_eval_loop_configuration(model, "test")
elif trainer.state.fn == TrainerFn.PREDICTING:
__verify_eval_loop_configuration(model, "predict")
__verify_configure_model_configuration(model)
__warn_dataloader_iter_limitations(model) | r"""Checks that the model is configured correctly before the run is started. Args: trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration. |
155,374 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _call_teardown_hook(trainer: "pl.Trainer") -> None:
assert trainer.state.fn is not None
fn = trainer.state.fn
if trainer.datamodule is not None:
_call_lightning_datamodule_hook(trainer, "teardown", stage=fn)
_call_callback_hooks(trainer, "teardown", stage=fn)
_call_lightning_module_hook(trainer, "teardown", stage=fn)
trainer.lightning_module._current_fx_name = None
# these could have become stale if metrics are defined in `setup`
trainer.lightning_module._metric_attributes = None
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu kill loggers.
for logger in trainer.loggers:
logger.finalize("success")
# summarize profile results
trainer.profiler.describe()
def _call_callback_hooks(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
monitoring_callbacks: Optional[bool] = None,
**kwargs: Any,
) -> None:
log.debug(f"{trainer.__class__.__name__}: calling callback hook: {hook_name}")
pl_module = trainer.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
callbacks = trainer.callbacks
if monitoring_callbacks is True:
# the list of "monitoring callbacks" is hard-coded to these two. we could add an API to define this
callbacks = [cb for cb in callbacks if isinstance(cb, (EarlyStopping, Checkpoint))]
elif monitoring_callbacks is False:
callbacks = [cb for cb in callbacks if not isinstance(cb, (EarlyStopping, Checkpoint))]
for callback in callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
with trainer.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
fn(trainer, trainer.lightning_module, *args, **kwargs)
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
class TrainerStatus(LightningEnum):
"""Enum for the status of the :class:`~lightning.pytorch.trainer.trainer.Trainer`"""
INITIALIZING = "initializing" # trainer creation
RUNNING = "running"
FINISHED = "finished"
INTERRUPTED = "interrupted"
def stopped(self) -> bool:
return self in (self.FINISHED, self.INTERRUPTED)
class _TunerExitException(Exception):
"""Exception used to exit early while tuning."""
The provided code snippet includes necessary dependencies for implementing the `_call_and_handle_interrupt` function. Write a Python function `def _call_and_handle_interrupt(trainer: "pl.Trainer", trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any` to solve the following problem:
r"""Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict) as all errors should funnel through them. Args: trainer_fn: one of (fit, validate, test, predict) *args: positional arguments to be passed to the `trainer_fn` **kwargs: keyword arguments to be passed to `trainer_fn`
Here is the function:
def _call_and_handle_interrupt(trainer: "pl.Trainer", trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:
r"""Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)
as all errors should funnel through them.
Args:
trainer_fn: one of (fit, validate, test, predict)
*args: positional arguments to be passed to the `trainer_fn`
**kwargs: keyword arguments to be passed to `trainer_fn`
"""
try:
if trainer.strategy.launcher is not None:
return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
return trainer_fn(*args, **kwargs)
except _TunerExitException:
_call_teardown_hook(trainer)
trainer._teardown()
trainer.state.status = TrainerStatus.FINISHED
trainer.state.stage = None
# TODO: Unify both exceptions below, where `KeyboardError` doesn't re-raise
except KeyboardInterrupt as exception:
rank_zero_warn("Detected KeyboardInterrupt, attempting graceful shutdown...")
# user could press Ctrl+c many times... only shutdown once
if not trainer.interrupted:
trainer.state.status = TrainerStatus.INTERRUPTED
_call_callback_hooks(trainer, "on_exception", exception)
trainer.strategy.on_exception(exception)
for logger in trainer.loggers:
logger.finalize("failed")
except BaseException as exception:
trainer.state.status = TrainerStatus.INTERRUPTED
_call_callback_hooks(trainer, "on_exception", exception)
trainer.strategy.on_exception(exception)
for logger in trainer.loggers:
logger.finalize("failed")
trainer._teardown()
# teardown might access the stage so we reset it after
trainer.state.stage = None
raise | r"""Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict) as all errors should funnel through them. Args: trainer_fn: one of (fit, validate, test, predict) *args: positional arguments to be passed to the `trainer_fn` **kwargs: keyword arguments to be passed to `trainer_fn` |
155,375 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _call_lightning_module_hook(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
pl_module: Optional["pl.LightningModule"] = None,
**kwargs: Any,
) -> Any:
def _call_lightning_datamodule_hook(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
def _call_callback_hooks(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
monitoring_callbacks: Optional[bool] = None,
**kwargs: Any,
) -> None:
class _DeviceDtypeModuleMixin(Module):
def __init__(self) -> None:
def dtype(self) -> Union[str, torch.dtype]:
def dtype(self, new_dtype: Union[str, torch.dtype]) -> None:
def device(self) -> torch.device:
def to(self, *args: Any, **kwargs: Any) -> Self:
def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self:
def cpu(self) -> Self:
def type(self, dst_type: Union[str, torch.dtype]) -> Self:
def float(self) -> Self:
def double(self) -> Self:
def half(self) -> Self:
def _call_setup_hook(trainer: "pl.Trainer") -> None:
assert trainer.state.fn is not None
fn = trainer.state.fn
# It is too early to move the model to the device, but we fake the `LightningModule.device` property
# so the user can access it in the `LightningModule.setup` hook
for module in trainer.lightning_module.modules():
if isinstance(module, _DeviceDtypeModuleMixin):
module._device = trainer.strategy.root_device
# Trigger lazy creation of experiment in loggers so loggers have their metadata available
for logger in trainer.loggers:
if hasattr(logger, "experiment"):
_ = logger.experiment
trainer.strategy.barrier("pre_setup")
if trainer.datamodule is not None:
_call_lightning_datamodule_hook(trainer, "setup", stage=fn)
_call_callback_hooks(trainer, "setup", stage=fn)
_call_lightning_module_hook(trainer, "setup", stage=fn)
trainer.strategy.barrier("post_setup") | null |
155,376 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _call_lightning_module_hook(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
pl_module: Optional["pl.LightningModule"] = None,
**kwargs: Any,
) -> Any:
def is_overridden(method_name: str, instance: Optional[object] = None, parent: Optional[Type[object]] = None) -> bool:
def _call_configure_model(trainer: "pl.Trainer") -> None:
# legacy hook
if is_overridden("configure_sharded_model", trainer.lightning_module):
with trainer.strategy.model_sharded_context():
_call_lightning_module_hook(trainer, "configure_sharded_model")
# we don't normally check for this before calling the hook. it is done here to avoid instantiating the context
# managers
if is_overridden("configure_model", trainer.lightning_module):
with trainer.strategy.tensor_init_context(), trainer.strategy.model_sharded_context(), trainer.precision_plugin.module_init_context(): # noqa: E501
_call_lightning_module_hook(trainer, "configure_model") | null |
155,377 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
The provided code snippet includes necessary dependencies for implementing the `_call_callbacks_state_dict` function. Write a Python function `def _call_callbacks_state_dict(trainer: "pl.Trainer") -> Dict[str, dict]` to solve the following problem:
Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by `Callback.state_key`.
Here is the function:
def _call_callbacks_state_dict(trainer: "pl.Trainer") -> Dict[str, dict]:
"""Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by
`Callback.state_key`."""
callback_state_dicts = {}
for callback in trainer.callbacks:
state_dict = callback.state_dict()
if state_dict:
callback_state_dicts[callback.state_key] = state_dict
return callback_state_dicts | Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by `Callback.state_key`. |
155,378 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
The provided code snippet includes necessary dependencies for implementing the `_call_callbacks_on_save_checkpoint` function. Write a Python function `def _call_callbacks_on_save_checkpoint(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None` to solve the following problem:
Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.
Here is the function:
def _call_callbacks_on_save_checkpoint(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None:
"""Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook."""
pl_module = trainer.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = "on_save_checkpoint"
for callback in trainer.callbacks:
with trainer.profiler.profile(f"[Callback]{callback.state_key}.on_save_checkpoint"):
callback.on_save_checkpoint(trainer, trainer.lightning_module, checkpoint)
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name | Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook. |
155,379 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
The provided code snippet includes necessary dependencies for implementing the `_call_callbacks_on_load_checkpoint` function. Write a Python function `def _call_callbacks_on_load_checkpoint(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None` to solve the following problem:
Called when loading a model checkpoint. Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using `_call_callback_hooks` because we have special logic for getting callback_states.
Here is the function:
def _call_callbacks_on_load_checkpoint(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint.
Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using
`_call_callback_hooks` because we have special logic for getting callback_states.
"""
pl_module = trainer.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = "on_load_checkpoint"
callback_states: Optional[Dict[Union[Type, str], Dict]] = checkpoint.get("callbacks")
if callback_states is None:
return
is_legacy_ckpt = Version(checkpoint["pytorch-lightning_version"]) < Version("1.5.0dev")
current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in trainer.callbacks}
difference = callback_states.keys() - current_callbacks_keys
if difference:
rank_zero_warn(
"Be aware that when using `ckpt_path`,"
" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation."
f" Please add the following callbacks: {list(difference)}.",
)
for callback in trainer.callbacks:
with trainer.profiler.profile(f"[Callback]{callback.state_key}.on_load_checkpoint"):
callback.on_load_checkpoint(trainer, trainer.lightning_module, checkpoint)
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name | Called when loading a model checkpoint. Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using `_call_callback_hooks` because we have special logic for getting callback_states. |
155,380 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
The provided code snippet includes necessary dependencies for implementing the `_call_callbacks_load_state_dict` function. Write a Python function `def _call_callbacks_load_state_dict(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None` to solve the following problem:
Called when loading a model checkpoint, calls every callback's `load_state_dict`.
Here is the function:
def _call_callbacks_load_state_dict(trainer: "pl.Trainer", checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint, calls every callback's `load_state_dict`."""
callback_states: Optional[Dict[Union[Type, str], Dict]] = checkpoint.get("callbacks")
if callback_states is None:
return
for callback in trainer.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
callback.load_state_dict(state) | Called when loading a model checkpoint, calls every callback's `load_state_dict`. |
155,381 | import logging
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Union
from packaging.version import Version
import lightning.pytorch as pl
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.pytorch.callbacks import Checkpoint, EarlyStopping
from lightning.pytorch.trainer.states import TrainerStatus
from lightning.pytorch.utilities.exceptions import _TunerExitException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
log = logging.getLogger(__name__)
def _call_strategy_hook(
trainer: "pl.Trainer",
hook_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
log.debug(f"{trainer.__class__.__name__}: calling strategy hook: {hook_name}")
pl_module = trainer.lightning_module
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
fn = getattr(trainer.strategy, hook_name)
if not callable(fn):
return None
with trainer.profiler.profile(f"[Strategy]{trainer.strategy.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output | null |
155,382 | import time
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
import torch
from typing_extensions import override
from lightning.fabric.plugins import Precision as FabricPrecision
from lightning.fabric.utilities.throughput import Throughput, get_available_flops
from lightning.fabric.utilities.throughput import _plugin_to_compute_dtype as fabric_plugin_to_compute_dtype
from lightning.pytorch.callbacks import Callback
from lightning.pytorch.plugins import (
BitsandbytesPrecision,
DeepSpeedPrecision,
DoublePrecision,
FSDPPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
from lightning.pytorch.trainer.states import RunningStage, TrainerFn
from lightning.pytorch.utilities.rank_zero import rank_zero_only, rank_zero_warn
def _plugin_to_compute_dtype(plugin: Union[FabricPrecision, Precision]) -> torch.dtype:
# TODO: integrate this into the precision plugins
if not isinstance(plugin, Precision):
return fabric_plugin_to_compute_dtype(plugin)
if isinstance(plugin, BitsandbytesPrecision):
return plugin.dtype
if isinstance(plugin, HalfPrecision):
return plugin._desired_input_dtype
if isinstance(plugin, MixedPrecision):
return torch.bfloat16 if plugin.precision == "bf16-mixed" else torch.half
if isinstance(plugin, DoublePrecision):
return torch.double
if isinstance(plugin, (XLAPrecision, DeepSpeedPrecision)):
return plugin._desired_dtype
if isinstance(plugin, TransformerEnginePrecision):
return torch.int8
if isinstance(plugin, FSDPPrecision):
return plugin.mixed_precision_config.reduce_dtype or torch.float32
if isinstance(plugin, Precision):
return torch.float32
raise NotImplementedError(plugin) | null |
155,383 | import logging
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Union
import torch
from torch.nn import Module, ModuleDict
from torch.nn.modules.batchnorm import _BatchNorm
from torch.optim.optimizer import Optimizer
from typing_extensions import override
import lightning.pytorch as pl
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def multiplicative(epoch: int) -> float:
return 2.0 | null |
155,384 | from typing import Any, Dict, Optional
from typing_extensions import override
import lightning.pytorch as pl
from lightning.pytorch.accelerators.cpu import _PSUTIL_AVAILABLE
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.types import STEP_OUTPUT
def _prefix_metric_keys(metrics_dict: Dict[str, float], prefix: str, separator: str) -> Dict[str, float]:
return {prefix + separator + k: v for k, v in metrics_dict.items()} | null |
155,385 | import math
from dataclasses import dataclass
from datetime import timedelta
from typing import Any, Dict, Generator, Optional, Union, cast
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
import lightning.pytorch as pl
from lightning.pytorch.callbacks.progress.progress_bar import ProgressBar
from lightning.pytorch.utilities.types import STEP_OUTPUT
The provided code snippet includes necessary dependencies for implementing the `_detect_light_colab_theme` function. Write a Python function `def _detect_light_colab_theme() -> bool` to solve the following problem:
Detect if it's light theme in Colab.
Here is the function:
def _detect_light_colab_theme() -> bool:
"""Detect if it's light theme in Colab."""
try:
import get_ipython
except (NameError, ModuleNotFoundError):
return False
ipython = get_ipython()
if "google.colab" in str(ipython.__class__):
try:
from google.colab import output
return output.eval_js('document.documentElement.matches("[theme=light]")')
except ModuleNotFoundError:
return False
return False | Detect if it's light theme in Colab. |
155,386 | import importlib
import math
import os
import sys
from typing import Any, Dict, Optional, Union
from typing_extensions import override
from lightning.pytorch.utilities.types import STEP_OUTPUT
import lightning.pytorch as pl
from lightning.pytorch.callbacks.progress.progress_bar import ProgressBar
from lightning.pytorch.utilities.rank_zero import rank_zero_debug
The provided code snippet includes necessary dependencies for implementing the `convert_inf` function. Write a Python function `def convert_inf(x: Optional[Union[int, float]]) -> Optional[Union[int, float]]` to solve the following problem:
The tqdm doesn't support inf/nan values. We have to convert it to None.
Here is the function:
def convert_inf(x: Optional[Union[int, float]]) -> Optional[Union[int, float]]:
"""The tqdm doesn't support inf/nan values.
We have to convert it to None.
"""
if x is None or math.isinf(x) or math.isnan(x):
return None
return x | The tqdm doesn't support inf/nan values. We have to convert it to None. |
155,387 | import importlib
import math
import os
import sys
from typing import Any, Dict, Optional, Union
from typing_extensions import override
from lightning.pytorch.utilities.types import STEP_OUTPUT
import lightning.pytorch as pl
from lightning.pytorch.callbacks.progress.progress_bar import ProgressBar
from lightning.pytorch.utilities.rank_zero import rank_zero_debug
def _update_n(bar: _tqdm, value: int) -> None:
if not bar.disable:
bar.n = value
bar.refresh() | null |
155,388 | from typing import Any, Dict, Optional, Union
from typing_extensions import override
import lightning.pytorch as pl
from lightning.pytorch.callbacks import Callback
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _version(loggers: List[Any], separator: str = "_") -> Union[int, str]:
if len(loggers) == 1:
return loggers[0].version
# Concatenate versions together, removing duplicates and preserving order
return separator.join(dict.fromkeys(str(logger.version) for logger in loggers))
The provided code snippet includes necessary dependencies for implementing the `get_standard_metrics` function. Write a Python function `def get_standard_metrics(trainer: "pl.Trainer") -> Dict[str, Union[int, str]]` to solve the following problem:
r"""Returns the standard metrics displayed in the progress bar. Currently, it only includes the version of the experiment when using a logger. .. code-block:: Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, v_num=10] Return: Dictionary with the standard metrics to be displayed in the progress bar.
Here is the function:
def get_standard_metrics(trainer: "pl.Trainer") -> Dict[str, Union[int, str]]:
r"""Returns the standard metrics displayed in the progress bar. Currently, it only includes the version of the
experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, v_num=10]
Return:
Dictionary with the standard metrics to be displayed in the progress bar.
"""
items_dict: Dict[str, Union[int, str]] = {}
if trainer.loggers:
from lightning.pytorch.loggers.utilities import _version
if (version := _version(trainer.loggers)) not in ("", None):
if isinstance(version, str):
# show last 4 places of long version strings
version = version[-4:]
items_dict["v_num"] = version
return items_dict | r"""Returns the standard metrics displayed in the progress bar. Currently, it only includes the version of the experiment when using a logger. .. code-block:: Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, v_num=10] Return: Dictionary with the standard metrics to be displayed in the progress bar. |
155,389 | import copy
import inspect
import types
from argparse import Namespace
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Iterator, List, MutableMapping, Optional, Sequence, Union
from lightning.fabric.utilities.data import AttributeDict
from lightning.pytorch.utilities.parsing import save_hyperparameters
_given_hyperparameters: ContextVar = ContextVar("_given_hyperparameters", default=None)
def _given_hyperparameters_context(hparams: dict, instantiator: str) -> Iterator[None]:
hparams = hparams.copy()
hparams["_instantiator"] = instantiator
token = _given_hyperparameters.set(hparams)
try:
yield
finally:
_given_hyperparameters.reset(token) | null |
155,390 | from contextlib import contextmanager
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload
from weakref import proxy
import torch
from torch import optim
from torch.optim import Optimizer
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.utilities.types import Optimizable, ReduceLROnPlateau, _Stateful
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
from lightning.pytorch.utilities.types import LRSchedulerConfig, LRSchedulerTypeTuple
def do_nothing_closure() -> None:
return | null |
155,391 | from contextlib import contextmanager
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload
from weakref import proxy
import torch
from torch import optim
from torch.optim import Optimizer
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.utilities.types import Optimizable, ReduceLROnPlateau, _Stateful
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
from lightning.pytorch.utilities.types import LRSchedulerConfig, LRSchedulerTypeTuple
def _configure_optimizers(
optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple],
) -> Tuple[List, List, Optional[str]]:
optimizers, lr_schedulers = [], []
monitor = None
# single output, single optimizer
if isinstance(optim_conf, Optimizable):
optimizers = [optim_conf]
# two lists, optimizer + lr schedulers
elif (
isinstance(optim_conf, (list, tuple))
and len(optim_conf) == 2
and isinstance(optim_conf[0], list)
and all(isinstance(opt, Optimizable) for opt in optim_conf[0])
):
opt, sch = optim_conf
optimizers = opt
lr_schedulers = sch if isinstance(sch, list) else [sch]
# single dictionary
elif isinstance(optim_conf, dict):
_validate_optim_conf(optim_conf)
optimizers = [optim_conf["optimizer"]]
monitor = optim_conf.get("monitor", None)
lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []
# multiple dictionaries
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):
for opt_dict in optim_conf:
_validate_optim_conf(opt_dict)
optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
scheduler_dict = lambda scheduler: dict(scheduler) if isinstance(scheduler, dict) else {"scheduler": scheduler}
lr_schedulers = [
scheduler_dict(opt_dict["lr_scheduler"]) for opt_dict in optim_conf if "lr_scheduler" in opt_dict
]
# single list or tuple, multiple optimizer
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizable) for opt in optim_conf):
optimizers = list(optim_conf)
# unknown configuration
else:
raise MisconfigurationException(
"Unknown configuration for model optimizers."
" Output from `model.configure_optimizers()` should be one of:\n"
" * `Optimizer`\n"
" * [`Optimizer`]\n"
" * ([`Optimizer`], [`LRScheduler`])\n"
' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `LRScheduler`}\n'
)
return optimizers, lr_schedulers, monitor
def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` with relevant information, when using automatic optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
# check provided keys
supported_keys = {field.name for field in fields(LRSchedulerConfig)}
extra_keys = scheduler.keys() - supported_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the lr scheduler dict: {extra_keys}."
" HINT: remove them from the output of `configure_optimizers`.",
category=RuntimeWarning,
)
scheduler = {k: v for k, v in scheduler.items() if k in supported_keys}
if "scheduler" not in scheduler:
raise MisconfigurationException(
'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'
)
if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):
raise MisconfigurationException(
'The "interval" key in lr scheduler dict must be "step" or "epoch"'
f' but is "{scheduler["interval"]}"'
)
scheduler["reduce_on_plateau"] = scheduler.get(
"reduce_on_plateau", isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
' {"scheduler": scheduler, "monitor": "your_loss"}}'
)
is_one_cycle = isinstance(scheduler["scheduler"], optim.lr_scheduler.OneCycleLR)
if is_one_cycle and scheduler.get("interval", "epoch") == "epoch":
rank_zero_warn(
"A `OneCycleLR` scheduler is using 'interval': 'epoch'."
" Are you sure you didn't mean 'interval': 'step'?",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**scheduler)
elif isinstance(scheduler, ReduceLROnPlateau):
if monitor is None:
raise MisconfigurationException(
"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
" scheduler is used. For example:"
' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
)
config = LRSchedulerConfig(scheduler, reduce_on_plateau=True, monitor=monitor)
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` structure with relevant information, when using manual
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
# interval is not in this list even though the user needs to manually call the scheduler because
# the `LearningRateMonitor` callback needs to check its value to know when to log the learning rate
invalid_keys = {"reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler if k in invalid_keys]
if keys_to_warn:
rank_zero_warn(
f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."
" You need to call `lr_scheduler.step()` manually in manual optimization.",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**{key: scheduler[key] for key in scheduler if key not in invalid_keys})
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _validate_scheduler_api(lr_scheduler_configs: List[LRSchedulerConfig], model: "pl.LightningModule") -> None:
for config in lr_scheduler_configs:
scheduler = config.scheduler
if not isinstance(scheduler, _Stateful):
raise TypeError(
f"The provided lr scheduler `{scheduler.__class__.__name__}` is invalid."
" It should have `state_dict` and `load_state_dict` methods defined."
)
if (
not isinstance(scheduler, LRSchedulerTypeTuple)
and not is_overridden("lr_scheduler_step", model)
and model.automatic_optimization
):
raise MisconfigurationException(
f"The provided lr scheduler `{scheduler.__class__.__name__}` doesn't follow PyTorch's LRScheduler"
" API. You should override the `LightningModule.lr_scheduler_step` hook with your own logic if"
" you are using a custom LR scheduler."
)
def _validate_multiple_optimizers_support(optimizers: List[Optimizer], model: "pl.LightningModule") -> None:
if is_param_in_hook_signature(model.training_step, "optimizer_idx", explicit=True):
raise RuntimeError(
"Training with multiple optimizers is only supported with manual optimization. Remove the `optimizer_idx`"
" argument from `training_step`, set `self.automatic_optimization = False` and access your optimizers"
" in `training_step` with `opt1, opt2, ... = self.optimizers()`."
)
if model.automatic_optimization and len(optimizers) > 1:
raise RuntimeError(
"Training with multiple optimizers is only supported with manual optimization. Set"
" `self.automatic_optimization = False`, then access your optimizers in `training_step` with"
" `opt1, opt2, ... = self.optimizers()`."
)
def _validate_optimizers_attached(optimizers: List[Optimizer], lr_scheduler_configs: List[LRSchedulerConfig]) -> None:
for config in lr_scheduler_configs:
if config.scheduler.optimizer not in optimizers:
raise MisconfigurationException(
"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`."
)
class _MockOptimizer(Optimizer):
"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from
:meth:`~lightning.pytorch.core.LightningModule.configure_optimizers`."""
def __init__(self) -> None:
super().__init__([torch.zeros(1)], {})
def add_param_group(self, param_group: Dict[Any, Any]) -> None:
pass # Do Nothing
def load_state_dict(self, state_dict: Dict[Any, Any]) -> None:
pass # Do Nothing
def state_dict(self) -> Dict[str, Any]:
return {} # Return Empty
def step(self, closure: None = ...) -> None: ...
def step(self, closure: Callable[[], float]) -> float: ...
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
if closure is not None:
return closure()
def zero_grad(self, set_to_none: Optional[bool] = True) -> None:
pass # Do Nothing
def __repr__(self) -> str:
return "No Optimizer"
class LRSchedulerConfig:
scheduler: Union[_TORCH_LRSCHEDULER, ReduceLROnPlateau]
# no custom name
name: Optional[str] = None
# after epoch is over
interval: str = "epoch"
# every epoch/batch
frequency: int = 1
# most often not ReduceLROnPlateau scheduler
reduce_on_plateau: bool = False
# value to monitor for ReduceLROnPlateau
monitor: Optional[str] = None
# enforce that the monitor exists for ReduceLROnPlateau
strict: bool = True
The provided code snippet includes necessary dependencies for implementing the `_init_optimizers_and_lr_schedulers` function. Write a Python function `def _init_optimizers_and_lr_schedulers( model: "pl.LightningModule", ) -> Tuple[List[Optimizer], List[LRSchedulerConfig]]` to solve the following problem:
Calls `LightningModule.configure_optimizers` and parses and validates the output.
Here is the function:
def _init_optimizers_and_lr_schedulers(
model: "pl.LightningModule",
) -> Tuple[List[Optimizer], List[LRSchedulerConfig]]:
"""Calls `LightningModule.configure_optimizers` and parses and validates the output."""
from lightning.pytorch.trainer import call
optim_conf = call._call_lightning_module_hook(model.trainer, "configure_optimizers", pl_module=model)
if optim_conf is None:
rank_zero_warn(
"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
)
optim_conf = _MockOptimizer()
optimizers, lr_schedulers, monitor = _configure_optimizers(optim_conf)
lr_scheduler_configs = (
_configure_schedulers_automatic_opt(lr_schedulers, monitor)
if model.automatic_optimization
else _configure_schedulers_manual_opt(lr_schedulers)
)
_validate_multiple_optimizers_support(optimizers, model)
_validate_optimizers_attached(optimizers, lr_scheduler_configs)
_validate_scheduler_api(lr_scheduler_configs, model)
return optimizers, lr_scheduler_configs | Calls `LightningModule.configure_optimizers` and parses and validates the output. |
155,392 | import ast
import contextlib
import csv
import inspect
import logging
import os
from argparse import Namespace
from copy import deepcopy
from enum import Enum
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
from warnings import warn
import torch
import yaml
from lightning_utilities.core.apply_func import apply_to_collection
import lightning.pytorch as pl
from lightning.fabric.utilities.cloud_io import _is_dir, get_filesystem
from lightning.fabric.utilities.cloud_io import _load as pl_load
from lightning.fabric.utilities.data import AttributeDict
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
from lightning.pytorch.accelerators import CUDAAccelerator, MPSAccelerator, XLAAccelerator
from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE
from lightning.pytorch.utilities.migration import pl_legacy_patch
from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.parsing import parse_class_init_keys
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _default_map_location(storage: "UntypedStorage", location: str) -> Optional["UntypedStorage"]:
def _load_state(
cls: Union[Type["pl.LightningModule"], Type["pl.LightningDataModule"]],
checkpoint: Dict[str, Any],
strict: Optional[bool] = None,
**cls_kwargs_new: Any,
) -> Union["pl.LightningModule", "pl.LightningDataModule"]:
def load_hparams_from_tags_csv(tags_csv: _PATH) -> Dict[str, Any]:
def load_hparams_from_yaml(config_yaml: _PATH, use_omegaconf: bool = True) -> Dict[str, Any]:
_PATH = Union[str, Path]
_MAP_LOCATION_TYPE = Optional[
Union[_DEVICE, Callable[[UntypedStorage, str], Optional[UntypedStorage]], Dict[_DEVICE, _DEVICE]]
]
def _pl_migrate_checkpoint(checkpoint: _CHECKPOINT, checkpoint_path: Optional[_PATH] = None) -> _CHECKPOINT:
def _load_from_checkpoint(
cls: Union[Type["pl.LightningModule"], Type["pl.LightningDataModule"]],
checkpoint_path: Union[_PATH, IO],
map_location: _MAP_LOCATION_TYPE = None,
hparams_file: Optional[_PATH] = None,
strict: Optional[bool] = None,
**kwargs: Any,
) -> Union["pl.LightningModule", "pl.LightningDataModule"]:
map_location = map_location or _default_map_location
with pl_legacy_patch():
checkpoint = pl_load(checkpoint_path, map_location=map_location)
# convert legacy checkpoints to the new format
checkpoint = _pl_migrate_checkpoint(
checkpoint, checkpoint_path=(checkpoint_path if isinstance(checkpoint_path, (str, Path)) else None)
)
if hparams_file is not None:
extension = str(hparams_file).split(".")[-1]
if extension.lower() == "csv":
hparams = load_hparams_from_tags_csv(hparams_file)
elif extension.lower() in ("yml", "yaml"):
hparams = load_hparams_from_yaml(hparams_file)
else:
raise ValueError(".csv, .yml or .yaml is required for `hparams_file`")
# overwrite hparams by the given file
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = hparams
# TODO: make this a migration:
# for past checkpoint need to add the new key
checkpoint.setdefault(cls.CHECKPOINT_HYPER_PARAMS_KEY, {})
# override the hparams with values that were passed in
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].update(kwargs)
if issubclass(cls, pl.LightningDataModule):
return _load_state(cls, checkpoint, **kwargs)
if issubclass(cls, pl.LightningModule):
model = _load_state(cls, checkpoint, strict=strict, **kwargs)
state_dict = checkpoint["state_dict"]
if not state_dict:
rank_zero_warn(f"The state dict in {checkpoint_path!r} contains no parameters.")
return model
device = next((t for t in state_dict.values() if isinstance(t, torch.Tensor)), torch.tensor(0)).device
assert isinstance(model, pl.LightningModule)
return model.to(device)
raise NotImplementedError(f"Unsupported {cls}") | null |
155,393 | import ast
import contextlib
import csv
import inspect
import logging
import os
from argparse import Namespace
from copy import deepcopy
from enum import Enum
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
from warnings import warn
import torch
import yaml
from lightning_utilities.core.apply_func import apply_to_collection
import lightning.pytorch as pl
from lightning.fabric.utilities.cloud_io import _is_dir, get_filesystem
from lightning.fabric.utilities.cloud_io import _load as pl_load
from lightning.fabric.utilities.data import AttributeDict
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
from lightning.pytorch.accelerators import CUDAAccelerator, MPSAccelerator, XLAAccelerator
from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE
from lightning.pytorch.utilities.migration import pl_legacy_patch
from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.parsing import parse_class_init_keys
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
The provided code snippet includes necessary dependencies for implementing the `update_hparams` function. Write a Python function `def update_hparams(hparams: dict, updates: dict) -> None` to solve the following problem:
Overrides hparams with new values. >>> hparams = {'c': 4} >>> update_hparams(hparams, {'a': {'b': 2}, 'c': 1}) >>> hparams['a']['b'], hparams['c'] (2, 1) >>> update_hparams(hparams, {'a': {'b': 4}, 'c': 7}) >>> hparams['a']['b'], hparams['c'] (4, 7) Args: hparams: the original params and also target object updates: new params to be used as update
Here is the function:
def update_hparams(hparams: dict, updates: dict) -> None:
"""Overrides hparams with new values.
>>> hparams = {'c': 4}
>>> update_hparams(hparams, {'a': {'b': 2}, 'c': 1})
>>> hparams['a']['b'], hparams['c']
(2, 1)
>>> update_hparams(hparams, {'a': {'b': 4}, 'c': 7})
>>> hparams['a']['b'], hparams['c']
(4, 7)
Args:
hparams: the original params and also target object
updates: new params to be used as update
"""
for k, v in updates.items():
# if missing, add the key
if k not in hparams:
hparams[k] = v
continue
# recurse if dictionary
if isinstance(v, dict):
update_hparams(hparams[k], updates[k])
else:
# update the value
hparams.update({k: v}) | Overrides hparams with new values. >>> hparams = {'c': 4} >>> update_hparams(hparams, {'a': {'b': 2}, 'c': 1}) >>> hparams['a']['b'], hparams['c'] (2, 1) >>> update_hparams(hparams, {'a': {'b': 4}, 'c': 7}) >>> hparams['a']['b'], hparams['c'] (4, 7) Args: hparams: the original params and also target object updates: new params to be used as update |
155,394 | import ast
import contextlib
import csv
import inspect
import logging
import os
from argparse import Namespace
from copy import deepcopy
from enum import Enum
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
from warnings import warn
import torch
import yaml
from lightning_utilities.core.apply_func import apply_to_collection
import lightning.pytorch as pl
from lightning.fabric.utilities.cloud_io import _is_dir, get_filesystem
from lightning.fabric.utilities.cloud_io import _load as pl_load
from lightning.fabric.utilities.data import AttributeDict
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
from lightning.pytorch.accelerators import CUDAAccelerator, MPSAccelerator, XLAAccelerator
from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE
from lightning.pytorch.utilities.migration import pl_legacy_patch
from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.parsing import parse_class_init_keys
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def get_filesystem(path: _PATH, **kwargs: Any) -> AbstractFileSystem:
fs, _ = url_to_fs(str(path), **kwargs)
return fs
def _is_dir(fs: AbstractFileSystem, path: Union[str, Path], strict: bool = False) -> bool:
"""Check if a path is directory-like.
This function determines if a given path is considered directory-like, taking into account the behavior
specific to object storage platforms. For other filesystems, it behaves similarly to the standard `fs.isdir`
method.
Args:
fs: The filesystem to check the path against.
path: The path or URL to be checked.
strict: A flag specific to Object Storage platforms. If set to ``False``, any non-existing path is considered
as a valid directory-like path. In such cases, the directory (and any non-existing parent directories)
will be created on the fly. Defaults to False.
"""
# Object storage fsspec's are inconsistent with other file systems because they do not have real directories,
# see for instance https://gcsfs.readthedocs.io/en/latest/api.html?highlight=makedirs#gcsfs.core.GCSFileSystem.mkdir
# In particular, `fs.makedirs` is a no-op so we use `strict=False` to consider any path as valid, except if the
# path already exists but is a file
if _is_object_storage(fs):
if strict:
return fs.isdir(path)
# Check if the path is not already taken by a file. If not, it is considered a valid directory-like path
# because the directory (and all non-existing parent directories) will be created on the fly.
return not fs.isfile(path)
return fs.isdir(path)
_PATH = Union[str, Path]
def save_hparams_to_tags_csv(tags_csv: _PATH, hparams: Union[dict, Namespace]) -> None:
fs = get_filesystem(tags_csv)
if not _is_dir(fs, os.path.dirname(tags_csv)):
raise RuntimeError(f"Missing folder: {os.path.dirname(tags_csv)}.")
if isinstance(hparams, Namespace):
hparams = vars(hparams)
with fs.open(tags_csv, "w", newline="") as fp:
fieldnames = ["key", "value"]
writer = csv.DictWriter(fp, fieldnames=fieldnames)
writer.writerow({"key": "key", "value": "value"})
for k, v in hparams.items():
writer.writerow({"key": k, "value": v}) | null |
155,395 | import ast
import contextlib
import csv
import inspect
import logging
import os
from argparse import Namespace
from copy import deepcopy
from enum import Enum
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
from warnings import warn
import torch
import yaml
from lightning_utilities.core.apply_func import apply_to_collection
import lightning.pytorch as pl
from lightning.fabric.utilities.cloud_io import _is_dir, get_filesystem
from lightning.fabric.utilities.cloud_io import _load as pl_load
from lightning.fabric.utilities.data import AttributeDict
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
from lightning.pytorch.accelerators import CUDAAccelerator, MPSAccelerator, XLAAccelerator
from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE
from lightning.pytorch.utilities.migration import pl_legacy_patch
from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.parsing import parse_class_init_keys
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def get_filesystem(path: _PATH, **kwargs: Any) -> AbstractFileSystem:
fs, _ = url_to_fs(str(path), **kwargs)
return fs
def _is_dir(fs: AbstractFileSystem, path: Union[str, Path], strict: bool = False) -> bool:
"""Check if a path is directory-like.
This function determines if a given path is considered directory-like, taking into account the behavior
specific to object storage platforms. For other filesystems, it behaves similarly to the standard `fs.isdir`
method.
Args:
fs: The filesystem to check the path against.
path: The path or URL to be checked.
strict: A flag specific to Object Storage platforms. If set to ``False``, any non-existing path is considered
as a valid directory-like path. In such cases, the directory (and any non-existing parent directories)
will be created on the fly. Defaults to False.
"""
# Object storage fsspec's are inconsistent with other file systems because they do not have real directories,
# see for instance https://gcsfs.readthedocs.io/en/latest/api.html?highlight=makedirs#gcsfs.core.GCSFileSystem.mkdir
# In particular, `fs.makedirs` is a no-op so we use `strict=False` to consider any path as valid, except if the
# path already exists but is a file
if _is_object_storage(fs):
if strict:
return fs.isdir(path)
# Check if the path is not already taken by a file. If not, it is considered a valid directory-like path
# because the directory (and all non-existing parent directories) will be created on the fly.
return not fs.isfile(path)
return fs.isdir(path)
class AttributeDict(Dict):
"""A container to store state variables of your program.
This is a drop-in replacement for a Python dictionary, with the additional functionality to access and modify keys
through attribute lookup for convenience.
Use this to define the state of your program, then pass it to
:meth:`~lightning.fabric.fabric.Fabric.save` and :meth:`~lightning.fabric.fabric.Fabric.load`.
Example:
>>> import torch
>>> model = torch.nn.Linear(2, 2)
>>> state = AttributeDict(model=model, iter_num=0)
>>> state.model
Linear(in_features=2, out_features=2, bias=True)
>>> state.iter_num += 1
>>> state.iter_num
1
>>> state
"iter_num": 1
"model": Linear(in_features=2, out_features=2, bias=True)
"""
def __getattr__(self, key: str) -> Any:
try:
return self[key]
except KeyError as e:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{key}'") from e
def __setattr__(self, key: str, val: Any) -> None:
self[key] = val
def __delattr__(self, item: str) -> None:
if item not in self:
raise KeyError(item)
del self[item]
def __repr__(self) -> str:
if not len(self):
return ""
max_key_length = max(len(str(k)) for k in self)
tmp_name = "{:" + str(max_key_length + 3) + "s} {}"
rows = [tmp_name.format(f'"{n}":', self[n]) for n in sorted(self.keys())]
return "\n".join(rows)
_PATH = Union[str, Path]
_OMEGACONF_AVAILABLE = package_available("omegaconf")
The provided code snippet includes necessary dependencies for implementing the `save_hparams_to_yaml` function. Write a Python function `def save_hparams_to_yaml(config_yaml: _PATH, hparams: Union[dict, Namespace], use_omegaconf: bool = True) -> None` to solve the following problem:
Args: config_yaml: path to new YAML file hparams: parameters to be saved use_omegaconf: If omegaconf is available and ``use_omegaconf=True``, the hparams will be converted to ``DictConfig`` if possible.
Here is the function:
def save_hparams_to_yaml(config_yaml: _PATH, hparams: Union[dict, Namespace], use_omegaconf: bool = True) -> None:
"""
Args:
config_yaml: path to new YAML file
hparams: parameters to be saved
use_omegaconf: If omegaconf is available and ``use_omegaconf=True``,
the hparams will be converted to ``DictConfig`` if possible.
"""
fs = get_filesystem(config_yaml)
if not _is_dir(fs, os.path.dirname(config_yaml)):
raise RuntimeError(f"Missing folder: {os.path.dirname(config_yaml)}.")
# convert Namespace or AD to dict
if isinstance(hparams, Namespace):
hparams = vars(hparams)
elif isinstance(hparams, AttributeDict):
hparams = dict(hparams)
# saving with OmegaConf objects
if _OMEGACONF_AVAILABLE and use_omegaconf:
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from omegaconf.errors import UnsupportedValueType, ValidationError
# deepcopy: hparams from user shouldn't be resolved
hparams = deepcopy(hparams)
hparams = apply_to_collection(hparams, DictConfig, OmegaConf.to_container, resolve=True)
with fs.open(config_yaml, "w", encoding="utf-8") as fp:
try:
OmegaConf.save(hparams, fp)
return
except (UnsupportedValueType, ValidationError):
pass
if not isinstance(hparams, dict):
raise TypeError("hparams must be dictionary")
hparams_allowed = {}
# drop parameters which contain some strange datatypes as fsspec
for k, v in hparams.items():
try:
v = v.name if isinstance(v, Enum) else v
yaml.dump(v)
except TypeError:
warn(f"Skipping '{k}' parameter because it is not possible to safely dump to YAML.")
hparams[k] = type(v).__name__
else:
hparams_allowed[k] = v
# saving the standard way
with fs.open(config_yaml, "w", newline="") as fp:
yaml.dump(hparams_allowed, fp) | Args: config_yaml: path to new YAML file hparams: parameters to be saved use_omegaconf: If omegaconf is available and ``use_omegaconf=True``, the hparams will be converted to ``DictConfig`` if possible. |
155,396 | import logging
import numbers
import weakref
from contextlib import contextmanager
from pathlib import Path
from typing import (
IO,
Any,
Callable,
Dict,
Generator,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
overload,
)
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from lightning_utilities.core.imports import RequirementCache
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torchmetrics import Metric, MetricCollection
from typing_extensions import Self, override
import lightning.fabric as lf
import lightning.pytorch as pl
from lightning.fabric.loggers import Logger as FabricLogger
from lightning.fabric.utilities.apply_func import convert_to_tensors
from lightning.fabric.utilities.cloud_io import get_filesystem
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_2_0, _TORCH_GREATER_EQUAL_2_1
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
from lightning.fabric.wrappers import _FabricOptimizer
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from lightning.pytorch.core.mixins import HyperparametersMixin
from lightning.pytorch.core.optimizer import LightningOptimizer
from lightning.pytorch.core.saving import _load_from_checkpoint
from lightning.pytorch.loggers import Logger
from lightning.pytorch.trainer import call
from lightning.pytorch.trainer.connectors.logger_connector.fx_validator import _FxValidator
from lightning.pytorch.trainer.connectors.logger_connector.result import _get_default_dtype
from lightning.pytorch.utilities import GradClipAlgorithmType
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_0_9_1
from lightning.pytorch.utilities.model_helpers import _restricted_classmethod
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_debug, rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
from lightning.pytorch.utilities.types import (
_METRIC,
STEP_OUTPUT,
LRSchedulerPLType,
LRSchedulerTypeUnion,
OptimizerLRScheduler,
)
class LightningModule(
_DeviceDtypeModuleMixin,
HyperparametersMixin,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT
# since none of these are important when using JIT, we are going to ignore them.
__jit_unused_properties__: List[str] = (
[
"example_input_array",
"on_gpu",
"current_epoch",
"global_step",
"global_rank",
"local_rank",
"logger",
"loggers",
"automatic_optimization",
"trainer",
"fabric",
"strict_loading",
]
+ _DeviceDtypeModuleMixin.__jit_unused_properties__
+ HyperparametersMixin.__jit_unused_properties__
)
_jit_is_scripting = False
CHECKPOINT_HYPER_PARAMS_KEY = "hyper_parameters"
CHECKPOINT_HYPER_PARAMS_NAME = "hparams_name"
CHECKPOINT_HYPER_PARAMS_TYPE = "hparams_type"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# pointer to the trainer object
self._trainer: Optional["pl.Trainer"] = None
# attributes that can be set by user
self._example_input_array: Optional[Union[Tensor, Tuple, Dict]] = None
self._automatic_optimization: bool = True
self._strict_loading: Optional[bool] = None
# attributes used internally
self._current_fx_name: Optional[str] = None
self._param_requires_grad_state: Dict[str, bool] = {}
self._metric_attributes: Optional[Dict[int, str]] = None
self._register_sharded_tensor_state_dict_hooks_if_available()
self._compiler_ctx: Optional[Dict[str, Any]] = None
# attributes only used when using fabric
self._fabric: Optional["lf.Fabric"] = None
self._fabric_optimizers: List[_FabricOptimizer] = []
def optimizers(
self, use_pl_optimizer: Literal[True] = True
) -> Union[LightningOptimizer, List[LightningOptimizer]]: ...
def optimizers(self, use_pl_optimizer: Literal[False]) -> Union[Optimizer, List[Optimizer]]: ...
def optimizers(self, use_pl_optimizer: bool) -> MODULE_OPTIMIZERS: ...
def optimizers(self, use_pl_optimizer: bool = True) -> MODULE_OPTIMIZERS:
"""Returns the optimizer(s) that are being used during training. Useful for manual optimization.
Args:
use_pl_optimizer: If ``True``, will wrap the optimizer(s) in a
:class:`~lightning.pytorch.core.optimizer.LightningOptimizer` for automatic handling of precision,
profiling, and counting of step calls for proper logging and checkpointing. It specifically wraps the
``step`` method and custom optimizers that don't have this method are not supported.
Returns:
A single optimizer, or a list of optimizers in case multiple ones are present.
"""
if self._fabric:
opts: MODULE_OPTIMIZERS = self._fabric_optimizers
elif use_pl_optimizer:
opts = self.trainer.strategy._lightning_optimizers
else:
opts = self.trainer.optimizers
# single optimizer
if (
isinstance(opts, list)
and len(opts) == 1
and isinstance(opts[0], (Optimizer, LightningOptimizer, _FabricOptimizer))
):
return opts[0]
# multiple opts
return opts
def lr_schedulers(self) -> Union[None, List[LRSchedulerPLType], LRSchedulerPLType]:
"""Returns the learning rate scheduler(s) that are being used during training. Useful for manual optimization.
Returns:
A single scheduler, or a list of schedulers in case multiple ones are present, or ``None`` if no
schedulers were returned in :meth:`~lightning.pytorch.core.LightningModule.configure_optimizers`.
"""
if not self.trainer.lr_scheduler_configs:
return None
# ignore other keys "interval", "frequency", etc.
lr_schedulers: List[LRSchedulerPLType] = [config.scheduler for config in self.trainer.lr_scheduler_configs]
# single scheduler
if len(lr_schedulers) == 1:
return lr_schedulers[0]
# multiple schedulers
return lr_schedulers
def trainer(self) -> "pl.Trainer":
if self._fabric is not None:
return _TrainerFabricShim(fabric=self._fabric) # type: ignore[return-value]
if not self._jit_is_scripting and self._trainer is None:
raise RuntimeError(f"{self.__class__.__qualname__} is not attached to a `Trainer`.")
return self._trainer # type: ignore[return-value]
def trainer(self, trainer: Optional["pl.Trainer"]) -> None:
for v in self.children():
if isinstance(v, LightningModule):
v.trainer = trainer # type: ignore[assignment]
# https://github.com/pytorch/pytorch/issues/95857
if not _TORCH_GREATER_EQUAL_2_0 and trainer is not None and not isinstance(trainer, weakref.ProxyTypes):
trainer = weakref.proxy(trainer)
self._trainer = trainer
def fabric(self) -> Optional["lf.Fabric"]:
return self._fabric
def fabric(self, fabric: Optional["lf.Fabric"]) -> None:
for v in self.children():
if isinstance(v, LightningModule):
v.fabric = fabric
if fabric is not None and not isinstance(fabric, weakref.ProxyTypes):
fabric = weakref.proxy(fabric)
self._fabric = fabric
def example_input_array(self) -> Optional[Union[Tensor, Tuple, Dict]]:
"""The example input array is a specification of what the module can consume in the :meth:`forward` method. The
return type is interpreted as follows:
- Single tensor: It is assumed the model takes a single argument, i.e.,
``model.forward(model.example_input_array)``
- Tuple: The input array should be interpreted as a sequence of positional arguments, i.e.,
``model.forward(*model.example_input_array)``
- Dict: The input array represents named keyword arguments, i.e.,
``model.forward(**model.example_input_array)``
"""
return self._example_input_array
def example_input_array(self, example: Optional[Union[Tensor, Tuple, Dict]]) -> None:
self._example_input_array = example
def current_epoch(self) -> int:
"""The current epoch in the ``Trainer``, or 0 if not attached."""
return self.trainer.current_epoch if self._trainer else 0
def global_step(self) -> int:
"""Total training batches seen across all epochs.
If no Trainer is attached, this propery is 0.
"""
return self.trainer.global_step if self._trainer else 0
def global_rank(self) -> int:
"""The index of the current process across all nodes and devices."""
return self.trainer.global_rank if self._trainer else 0
def local_rank(self) -> int:
"""The index of the current process within a single node."""
return self.trainer.local_rank if self._trainer else 0
def on_gpu(self) -> bool:
"""Returns ``True`` if this model is currently located on a GPU.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
def automatic_optimization(self) -> bool:
"""If set to ``False`` you are responsible for calling ``.backward()``, ``.step()``, ``.zero_grad()``."""
return self._automatic_optimization
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
def strict_loading(self) -> bool:
"""Determines how Lightning loads this model using `.load_state_dict(..., strict=model.strict_loading)`."""
# We use None as the default internally to determine whether the user has set a value
return self._strict_loading in (None, True)
def strict_loading(self, strict_loading: bool) -> None:
self._strict_loading = strict_loading
def logger(self) -> Optional[Union[Logger, FabricLogger]]:
"""Reference to the logger object in the Trainer."""
if self._fabric is not None:
return self._fabric.logger
return self._trainer.logger if self._trainer is not None else None
def loggers(self) -> Union[List[Logger], List[FabricLogger]]:
"""Reference to the list of loggers in the Trainer."""
if self._fabric is not None:
return self._fabric.loggers
if self._trainer is not None:
return self._trainer.loggers
return []
def _call_batch_hook(self, hook_name: str, *args: Any) -> Any:
trainer = self._trainer
if trainer:
datahook_selector = trainer._data_connector._datahook_selector
assert datahook_selector is not None
obj = datahook_selector.get_instance(hook_name)
if isinstance(obj, self.__class__):
trainer_method = call._call_lightning_module_hook
else:
trainer_method = call._call_lightning_datamodule_hook
return trainer_method(trainer, hook_name, *args)
hook = getattr(self, hook_name)
return hook(*args)
def _on_before_batch_transfer(self, batch: Any, dataloader_idx: int = 0) -> Any:
return self._call_batch_hook("on_before_batch_transfer", batch, dataloader_idx)
def _apply_batch_transfer_handler(
self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0
) -> Any:
device = device or self.device
batch = self._call_batch_hook("transfer_batch_to_device", batch, device, dataloader_idx)
batch = self._call_batch_hook("on_after_batch_transfer", batch, dataloader_idx)
return batch
def print(self, *args: Any, **kwargs: Any) -> None:
r"""Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. The same as for Python's built-in print function.
**kwargs: The same as for Python's built-in print function.
Example::
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
progress_bar = self.trainer.progress_bar_callback
if progress_bar is not None and progress_bar.is_enabled:
progress_bar.print(*args, **kwargs)
else:
print(*args, **kwargs)
def log(
self,
name: str,
value: _METRIC,
prog_bar: bool = False,
logger: Optional[bool] = None,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Union[str, Callable] = "mean",
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
"""Log a key, value pair.
Example::
self.log('train_loss', loss)
The default behavior per hook is documented here: :ref:`extensions/logging:Automatic Logging`.
Args:
name: key to log.
value: value to log. Can be a ``float``, ``Tensor``, or a ``Metric``.
prog_bar: if ``True`` logs to the progress bar.
logger: if ``True`` logs to the logger.
on_step: if ``True`` logs at this step. The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
on_epoch: if ``True`` logs epoch accumulated metrics. The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
reduce_fx: reduction function over step values for end of epoch. :meth:`torch.mean` by default.
enable_graph: if ``True``, will not auto detach the graph.
sync_dist: if ``True``, reduces the metric across devices. Use with care as this may lead to a significant
communication overhead.
sync_dist_group: the DDP group to sync across.
add_dataloader_idx: if ``True``, appends the index of the current dataloader to
the name (when using multiple dataloaders). If False, user needs to give unique names for
each dataloader to not mix the values.
batch_size: Current batch_size. This will be directly inferred from the loaded batch,
but for some data structures you might need to explicitly provide it.
metric_attribute: To restore the metric state, Lightning requires the reference of the
:class:`torchmetrics.Metric` in your model. This is found automatically if it is a model attribute.
rank_zero_only: Tells Lightning if you are calling ``self.log`` from every process (default) or only from
rank 0. If ``True``, you won't be able to use this metric as a monitor in callbacks
(e.g., early stopping). Warning: Improper use can lead to deadlocks! See
:ref:`Advanced Logging <visualize/logging_advanced:rank_zero_only>` for more details.
"""
if self._fabric is not None:
self._log_dict_through_fabric(dictionary={name: value}, logger=logger)
return
# check for invalid values
apply_to_collection(value, dict, self.__check_not_nested, name)
apply_to_collection(
value, object, self.__check_allowed, name, value, wrong_dtype=(numbers.Number, Metric, Tensor)
)
trainer = self._trainer
if trainer is None:
# not an error to support testing the `*_step` methods without a `Trainer` reference
rank_zero_warn(
"You are trying to `self.log()` but the `self.trainer` reference is not registered on the model yet."
" This is most likely because the model hasn't been passed to the `Trainer`"
)
return
if trainer.barebones:
rank_zero_warn(
"You are trying to `self.log()` but `Trainer(barebones=True)` is configured."
" Logging can impact raw speed so it is disabled under this setting."
)
return
results = trainer._results
if results is None:
raise MisconfigurationException(
"You are trying to `self.log()` but the loop's result collection is not registered"
" yet. This is most likely because you are trying to log in a `predict` hook,"
" but it doesn't support logging"
)
if self._current_fx_name is None:
raise MisconfigurationException(
"You are trying to `self.log()` but it is not managed by the `Trainer` control flow"
)
on_step, on_epoch = _FxValidator.check_logging_and_get_default_levels(
self._current_fx_name, on_step=on_step, on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"You called `self.log` with the key `{name}`"
" but it should not contain information about `dataloader_idx`"
)
value = apply_to_collection(value, (Tensor, numbers.Number), self.__to_tensor, name)
if trainer._logger_connector.should_reset_tensors(self._current_fx_name):
# if we started a new epoch (running its first batch) the hook name has changed
# reset any tensors for the new hook name
results.reset(metrics=False, fx=self._current_fx_name)
if metric_attribute is None and isinstance(value, Metric):
if self._metric_attributes is None:
# compute once
self._metric_attributes = {
id(module): name for name, module in self.named_modules() if isinstance(module, Metric)
}
if not self._metric_attributes:
raise MisconfigurationException(
"Could not find the `LightningModule` attribute for the `torchmetrics.Metric` logged."
" You can fix this by setting an attribute for the metric in your `LightningModule`."
)
# try to find the passed metric in the LightningModule
metric_attribute = self._metric_attributes.get(id(value), None)
if metric_attribute is None:
raise MisconfigurationException(
"Could not find the `LightningModule` attribute for the `torchmetrics.Metric` logged."
f" You can fix this by calling `self.log({name}, ..., metric_attribute=name)` where `name` is one"
f" of {list(self._metric_attributes.values())}"
)
if (
trainer.training
and is_param_in_hook_signature(self.training_step, "dataloader_iter", explicit=True)
and batch_size is None
):
raise MisconfigurationException(
"With `def training_step(self, dataloader_iter)`, `self.log(..., batch_size=...)` should be provided."
)
if logger and trainer.logger is None:
rank_zero_warn(
f"You called `self.log({name!r}, ..., logger=True)` but have no logger configured. You can enable one"
" by doing `Trainer(logger=ALogger(...))`"
)
if logger is None:
# we could set false here if there's no configured logger, however, we still need to compute the "logged"
# metrics anyway because that's what the evaluation loops use as return value
logger = True
results.log(
self._current_fx_name,
name,
value,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx, # type: ignore[arg-type]
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
sync_dist=sync_dist and trainer._accelerator_connector.is_distributed,
sync_dist_fn=trainer.strategy.reduce,
sync_dist_group=sync_dist_group,
metric_attribute=metric_attribute,
rank_zero_only=rank_zero_only,
)
trainer._logger_connector._current_fx = self._current_fx_name
def log_dict(
self,
dictionary: Union[Mapping[str, _METRIC], MetricCollection],
prog_bar: bool = False,
logger: Optional[bool] = None,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Union[str, Callable] = "mean",
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
rank_zero_only: bool = False,
) -> None:
"""Log a dictionary of values at once.
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs.
The values can be a ``float``, ``Tensor``, ``Metric``, or ``MetricCollection``.
prog_bar: if ``True`` logs to the progress base.
logger: if ``True`` logs to the logger.
on_step: if ``True`` logs at this step.
``None`` auto-logs for training_step but not validation/test_step.
The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
on_epoch: if ``True`` logs epoch accumulated metrics.
``None`` auto-logs for val/test step but not ``training_step``.
The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
reduce_fx: reduction function over step values for end of epoch. :meth:`torch.mean` by default.
enable_graph: if ``True``, will not auto-detach the graph
sync_dist: if ``True``, reduces the metric across GPUs/TPUs. Use with care as this may lead to a significant
communication overhead.
sync_dist_group: the ddp group to sync across.
add_dataloader_idx: if ``True``, appends the index of the current dataloader to
the name (when using multiple). If ``False``, user needs to give unique names for
each dataloader to not mix values.
batch_size: Current batch size. This will be directly inferred from the loaded batch,
but some data structures might need to explicitly provide it.
rank_zero_only: Tells Lightning if you are calling ``self.log`` from every process (default) or only from
rank 0. If ``True``, you won't be able to use this metric as a monitor in callbacks
(e.g., early stopping). Warning: Improper use can lead to deadlocks! See
:ref:`Advanced Logging <visualize/logging_advanced:rank_zero_only>` for more details.
"""
if self._fabric is not None:
return self._log_dict_through_fabric(dictionary=dictionary, logger=logger)
kwargs: Dict[str, bool] = {}
if isinstance(dictionary, MetricCollection):
kwargs["keep_base"] = False
if _TORCHMETRICS_GREATER_EQUAL_0_9_1 and dictionary._enable_compute_groups:
kwargs["copy_state"] = False
for k, v in dictionary.items(**kwargs):
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
rank_zero_only=rank_zero_only,
)
return None
def _log_dict_through_fabric(
self, dictionary: Union[Mapping[str, _METRIC], MetricCollection], logger: Optional[bool] = None
) -> None:
if logger is False:
# Passing `logger=False` with Fabric does not make much sense because there is no other destination to
# log to, but we support it in case the original code was written for Trainer use
return
if any(isinstance(v, dict) for v in dictionary.values()):
raise ValueError(f"`self.log_dict({dictionary})` was called, but nested dictionaries cannot be logged")
for name, value in dictionary.items():
apply_to_collection(value, object, self.__check_allowed, name, value, wrong_dtype=(numbers.Number, Tensor))
assert self._fabric is not None
self._fabric.log_dict(metrics=dictionary) # type: ignore[arg-type]
def __check_not_nested(value: dict, name: str) -> None:
# self-imposed restriction. for simplicity
if any(isinstance(v, dict) for v in value.values()):
raise ValueError(f"`self.log({name}, {value})` was called, but nested dictionaries cannot be logged")
def __check_allowed(v: Any, name: str, value: Any) -> None:
raise ValueError(f"`self.log({name}, {value})` was called, but `{type(v).__name__}` values cannot be logged")
def __to_tensor(self, value: Union[Tensor, numbers.Number], name: str) -> Tensor:
value = (
value.clone().detach()
if isinstance(value, Tensor)
else torch.tensor(value, device=self.device, dtype=_get_default_dtype())
)
if not torch.numel(value) == 1:
raise ValueError(
f"`self.log({name}, {value})` was called, but the tensor must have a single element."
f" You can try doing `self.log({name}, {value}.mean())`"
)
value = value.squeeze()
return value
def all_gather(
self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False
) -> Union[Tensor, Dict, List, Tuple]:
r"""Gather tensors or collections of tensors from multiple processes.
This method needs to be called on all processes and the tensors need to have the same shape across all
processes, otherwise your program will stall forever.
Args:
data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof.
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for the all_gather operation
Return:
A tensor of shape (world_size, batch, ...), or if the input was a collection
the output will also be a collection with tensors of this shape. For the special case where
world_size is 1, no additional dimension is added to the tensor(s).
"""
group = group if group is not None else torch.distributed.group.WORLD
all_gather = self.trainer.strategy.all_gather
data = convert_to_tensors(data, device=self.device)
return apply_to_collection(data, Tensor, all_gather, group=group, sync_grads=sync_grads)
def forward(self, *args: Any, **kwargs: Any) -> Any:
r"""Same as :meth:`torch.nn.Module.forward`.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Your model's output
"""
return super().forward(*args, **kwargs)
def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
r"""Here you compute and return the training loss and some additional metrics for e.g. the progress bar or
logger.
Args:
batch: The output of your data iterable, normally a :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_idx: The index of the dataloader that produced this batch.
(only if multiple dataloaders used)
Return:
- :class:`~torch.Tensor` - The loss tensor
- ``dict`` - A dictionary which can include any keys, but must include the key ``'loss'`` in the case of
automatic optimization.
- ``None`` - In automatic optimization, this will skip to the next batch (but is not supported for
multi-GPU, TPU, or DeepSpeed). For manual optimization, this has no special meaning, as returning
the loss is not required.
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
To use multiple optimizers, you can switch to 'manual optimization' and control their stepping:
.. code-block:: python
def __init__(self):
super().__init__()
self.automatic_optimization = False
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx):
opt1, opt2 = self.optimizers()
# do training_step with encoder
...
opt1.step()
# do training_step with decoder
...
opt2.step()
Note:
When ``accumulate_grad_batches`` > 1, the loss returned here will be automatically
normalized by ``accumulate_grad_batches`` internally.
"""
rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer")
def validation_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
r"""Operates on a single batch of data from the validation set. In this step you'd might generate examples or
calculate anything of interest like accuracy.
Args:
batch: The output of your data iterable, normally a :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_idx: The index of the dataloader that produced this batch.
(only if multiple dataloaders used)
Return:
- :class:`~torch.Tensor` - The loss tensor
- ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'``.
- ``None`` - Skip to the next batch.
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx): ...
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx=0): ...
Examples::
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument. We recommend
setting the default value of 0 so that you can quickly switch between single and multiple dataloaders.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx=0):
# dataloader_idx tells you which dataset this is.
...
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def test_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
r"""Operates on a single batch of data from the test set. In this step you'd normally generate examples or
calculate anything of interest such as accuracy.
Args:
batch: The output of your data iterable, normally a :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_idx: The index of the dataloader that produced this batch.
(only if multiple dataloaders used)
Return:
- :class:`~torch.Tensor` - The loss tensor
- ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'``.
- ``None`` - Skip to the next batch.
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx): ...
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx=0): ...
Examples::
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional argument. We recommend
setting the default value of 0 so that you can quickly switch between single and multiple dataloaders.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx=0):
# dataloader_idx tells you which dataset this is.
...
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def predict_step(self, *args: Any, **kwargs: Any) -> Any:
"""Step function called during :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`. By default, it calls
:meth:`~lightning.pytorch.core.LightningModule.forward`. Override to add any processing logic.
The :meth:`~lightning.pytorch.core.LightningModule.predict_step` is used
to scale inference on multi-devices.
To prevent an OOM error, it is possible to use :class:`~lightning.pytorch.callbacks.BasePredictionWriter`
callback to write the predictions to disk or database after each batch or on epoch end.
The :class:`~lightning.pytorch.callbacks.BasePredictionWriter` should be used while using a spawn
based accelerator. This happens for ``Trainer(strategy="ddp_spawn")``
or training on 8 TPU cores with ``Trainer(accelerator="tpu", devices=8)`` as predictions won't be returned.
Args:
batch: The output of your data iterable, normally a :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_idx: The index of the dataloader that produced this batch.
(only if multiple dataloaders used)
Return:
Predicted output (optional).
Example ::
class MyModel(LightningModule):
def predict_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
dm = ...
model = MyModel()
trainer = Trainer(accelerator="gpu", devices=2)
predictions = trainer.predict(model, dm)
"""
# For backwards compatibility
batch = kwargs.get("batch", args[0])
return self(batch)
def configure_callbacks(self) -> Union[Sequence[Callback], Callback]:
"""Configure model-specific callbacks. When the model gets attached, e.g., when ``.fit()`` or ``.test()`` gets
called, the list or a callback returned here will be merged with the list of callbacks passed to the Trainer's
``callbacks`` argument. If a callback returned here has the same type as one or several callbacks already
present in the Trainer's callbacks list, it will take priority and replace them. In addition, Lightning will
make sure :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks run last.
Return:
A callback or a list of callbacks which will extend the list of callbacks in the Trainer.
Example::
def configure_callbacks(self):
early_stop = EarlyStopping(monitor="val_acc", mode="max")
checkpoint = ModelCheckpoint(monitor="val_loss")
return [early_stop, checkpoint]
"""
return []
def configure_optimizers(self) -> OptimizerLRScheduler:
r"""Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need one.
But in the case of GANs or similar you might have multiple. Optimization with multiple optimizers only works in
the manual optimization mode.
Return:
Any of these 6 options.
- **Single optimizer**.
- **List or Tuple** of optimizers.
- **Two lists** - The first list has multiple optimizers, and the second has multiple LR schedulers
(or multiple ``lr_scheduler_config``).
- **Dictionary**, with an ``"optimizer"`` key, and (optionally) a ``"lr_scheduler"``
key whose value is a single LR scheduler or ``lr_scheduler_config``.
- **None** - Fit will run without any optimizer.
The ``lr_scheduler_config`` is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
lr_scheduler_config = {
# REQUIRED: The scheduler instance
"scheduler": lr_scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "epoch",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# Metric to to monitor for schedulers like `ReduceLROnPlateau`
"monitor": "val_loss",
# If set to `True`, will enforce that the value specified 'monitor'
# is available when the scheduler is updated, thus stopping
# training if not found. If set to `False`, it will only produce a warning
"strict": True,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
When there are schedulers in which the ``.step()`` method is conditioned on a value, such as the
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau` scheduler, Lightning requires that the
``lr_scheduler_config`` contains the keyword ``"monitor"`` set to the metric name that the scheduler
should be conditioned on.
.. testcode::
# The ReduceLROnPlateau scheduler requires a monitor
def configure_optimizers(self):
optimizer = Adam(...)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(optimizer, ...),
"monitor": "metric_to_track",
"frequency": "indicates how often the metric is updated",
# If "monitor" references validation metrics, then "frequency" should be set to a
# multiple of "trainer.check_val_every_n_epoch".
},
}
# In the case of two optimizers, only one using the ReduceLROnPlateau scheduler
def configure_optimizers(self):
optimizer1 = Adam(...)
optimizer2 = SGD(...)
scheduler1 = ReduceLROnPlateau(optimizer1, ...)
scheduler2 = LambdaLR(optimizer2, ...)
return (
{
"optimizer": optimizer1,
"lr_scheduler": {
"scheduler": scheduler1,
"monitor": "metric_to_track",
},
},
{"optimizer": optimizer2, "lr_scheduler": scheduler2},
)
Metrics can be made available to monitor by simply logging it using
``self.log('metric_to_track', metric_val)`` in your :class:`~lightning.pytorch.core.LightningModule`.
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` automatically in case of automatic optimization.
- If a learning rate scheduler is specified in ``configure_optimizers()`` with key
``"interval"`` (default "epoch") in the scheduler configuration, Lightning will call
the scheduler's ``.step()`` method automatically in case of automatic optimization.
- If you use 16-bit precision (``precision=16``), Lightning will automatically handle the optimizer.
- If you use :class:`torch.optim.LBFGS`, Lightning handles the closure function automatically for you.
- If you use multiple optimizers, you will have to switch to 'manual optimization' mode and step them
yourself.
- If you need to control how often the optimizer steps, override the :meth:`optimizer_step` hook.
"""
rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer")
def manual_backward(self, loss: Tensor, *args: Any, **kwargs: Any) -> None:
"""Call this directly from your :meth:`training_step` when doing optimizations manually. By using this,
Lightning can ensure that all the proper scaling gets applied when using mixed precision.
See :ref:`manual optimization<common/optimization:Manual optimization>` for more examples.
Example::
def training_step(...):
opt = self.optimizers()
loss = ...
opt.zero_grad()
# automatically applies scaling, etc...
self.manual_backward(loss)
opt.step()
Args:
loss: The tensor on which to compute gradients. Must have a graph attached.
*args: Additional positional arguments to be forwarded to :meth:`~torch.Tensor.backward`
**kwargs: Additional keyword arguments to be forwarded to :meth:`~torch.Tensor.backward`
"""
if self._fabric:
self._fabric.backward(loss, *args, **kwargs)
else:
self._verify_is_manual_optimization("manual_backward")
self.trainer.strategy.backward(loss, None, *args, **kwargs)
def backward(self, loss: Tensor, *args: Any, **kwargs: Any) -> None:
"""Called to perform backward on the loss returned in :meth:`training_step`. Override this hook with your own
implementation if you need to.
Args:
loss: The loss tensor returned by :meth:`training_step`. If gradient accumulation is used, the loss here
holds the normalized value (scaled by 1 / accumulation steps).
Example::
def backward(self, loss):
loss.backward()
"""
if self._fabric:
self._fabric.backward(loss, *args, **kwargs)
else:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer]) -> None:
"""Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to
prevent dangling gradients in multiple-optimizer setup.
It works with :meth:`untoggle_optimizer` to make sure ``param_requires_grad_state`` is properly reset.
Args:
optimizer: The optimizer to toggle.
"""
# Iterate over all optimizer parameters to preserve their `requires_grad` information
# in case these are pre-defined during `configure_optimizers`
param_requires_grad_state = {}
for opt in self.trainer.optimizers:
for group in opt.param_groups:
for param in group["params"]:
# If a param already appear in param_requires_grad_state, continue
if param in param_requires_grad_state:
continue
param_requires_grad_state[param] = param.requires_grad
param.requires_grad = False
# Then iterate over the current optimizer's parameters and set its `requires_grad`
# properties accordingly
for group in optimizer.param_groups:
for param in group["params"]:
param.requires_grad = param_requires_grad_state[param]
self._param_requires_grad_state = param_requires_grad_state
def untoggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer]) -> None:
"""Resets the state of required gradients that were toggled with :meth:`toggle_optimizer`.
Args:
optimizer: The optimizer to untoggle.
"""
for opt in self.trainer.optimizers:
if not (opt is optimizer or (isinstance(optimizer, LightningOptimizer) and opt is optimizer.optimizer)):
for group in opt.param_groups:
for param in group["params"]:
if param in self._param_requires_grad_state:
param.requires_grad = self._param_requires_grad_state[param]
# save memory
self._param_requires_grad_state = {}
def clip_gradients(
self,
optimizer: Optimizer,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
) -> None:
"""Handles gradient clipping internally.
Note:
- Do not override this method. If you want to customize gradient clipping, consider using
:meth:`configure_gradient_clipping` method.
- For manual optimization (``self.automatic_optimization = False``), if you want to use
gradient clipping, consider calling
``self.clip_gradients(opt, gradient_clip_val=0.5, gradient_clip_algorithm="norm")``
manually in the training step.
Args:
optimizer: Current optimizer being used.
gradient_clip_val: The value at which to clip gradients.
gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm.
"""
if self.fabric is not None:
self.fabric.clip_gradients(
self,
optimizer,
clip_val=gradient_clip_val if gradient_clip_algorithm == GradClipAlgorithmType.VALUE else None,
max_norm=None if gradient_clip_algorithm == GradClipAlgorithmType.VALUE else gradient_clip_val,
)
return
if gradient_clip_val is None:
gradient_clip_val = self.trainer.gradient_clip_val or 0.0
elif self.trainer.gradient_clip_val is not None and self.trainer.gradient_clip_val != gradient_clip_val:
raise MisconfigurationException(
f"You have set `Trainer(gradient_clip_val={self.trainer.gradient_clip_val!r})`"
f" and have passed `clip_gradients(gradient_clip_val={gradient_clip_val!r})`."
" Please use only one of them."
)
if gradient_clip_algorithm is None:
gradient_clip_algorithm = self.trainer.gradient_clip_algorithm or "norm"
else:
gradient_clip_algorithm = gradient_clip_algorithm.lower()
if (
self.trainer.gradient_clip_algorithm is not None
and self.trainer.gradient_clip_algorithm != gradient_clip_algorithm
):
raise MisconfigurationException(
f"You have set `Trainer(gradient_clip_algorithm={self.trainer.gradient_clip_algorithm.value!r})`"
f" and have passed `clip_gradients(gradient_clip_algorithm={gradient_clip_algorithm!r})"
" Please use only one of them."
)
if not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if not GradClipAlgorithmType.supported_type(gradient_clip_algorithm.lower()):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid."
f" Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
gradient_clip_algorithm = GradClipAlgorithmType(gradient_clip_algorithm)
self.trainer.precision_plugin.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)
def configure_gradient_clipping(
self,
optimizer: Optimizer,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
) -> None:
"""Perform gradient clipping for the optimizer parameters. Called before :meth:`optimizer_step`.
Args:
optimizer: Current optimizer being used.
gradient_clip_val: The value at which to clip gradients. By default, value passed in Trainer
will be available here.
gradient_clip_algorithm: The gradient clipping algorithm to use. By default, value
passed in Trainer will be available here.
Example::
def configure_gradient_clipping(self, optimizer, gradient_clip_val, gradient_clip_algorithm):
# Implement your own custom logic to clip gradients
# You can call `self.clip_gradients` with your settings:
self.clip_gradients(
optimizer,
gradient_clip_val=gradient_clip_val,
gradient_clip_algorithm=gradient_clip_algorithm
)
"""
self.clip_gradients(
optimizer, gradient_clip_val=gradient_clip_val, gradient_clip_algorithm=gradient_clip_algorithm
)
def lr_scheduler_step(self, scheduler: LRSchedulerTypeUnion, metric: Optional[Any]) -> None:
r"""Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` calls
each scheduler. By default, Lightning calls ``step()`` and as shown in the example for each scheduler based on
its ``interval``.
Args:
scheduler: Learning rate scheduler.
metric: Value of the monitor used for schedulers like ``ReduceLROnPlateau``.
Examples::
# DEFAULT
def lr_scheduler_step(self, scheduler, metric):
if metric is None:
scheduler.step()
else:
scheduler.step(metric)
# Alternative way to update schedulers if it requires an epoch value
def lr_scheduler_step(self, scheduler, metric):
scheduler.step(epoch=self.current_epoch)
"""
if metric is None:
scheduler.step() # type: ignore[call-arg]
else:
scheduler.step(metric)
def optimizer_step(
self,
epoch: int,
batch_idx: int,
optimizer: Union[Optimizer, LightningOptimizer],
optimizer_closure: Optional[Callable[[], Any]] = None,
) -> None:
r"""Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` calls
the optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example.
This method (and ``zero_grad()``) won't be called during the accumulation phase when
``Trainer(accumulate_grad_batches != 1)``. Overriding this hook has no benefit with manual optimization.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_closure: The optimizer closure. This closure must be executed as it includes the
calls to ``training_step()``, ``optimizer.zero_grad()``, and ``backward()``.
Examples::
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
# Add your custom logic to run directly before `optimizer.step()`
optimizer.step(closure=optimizer_closure)
# Add your custom logic to run directly after `optimizer.step()`
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer) -> None:
"""Override this method to change the default behaviour of ``optimizer.zero_grad()``.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
Examples::
# DEFAULT
def optimizer_zero_grad(self, epoch, batch_idx, optimizer):
optimizer.zero_grad()
# Set gradients to `None` instead of zero to improve performance (not required on `torch>=2.0.0`).
def optimizer_zero_grad(self, epoch, batch_idx, optimizer):
optimizer.zero_grad(set_to_none=True)
See :meth:`torch.optim.Optimizer.zero_grad` for the explanation of the above example.
"""
optimizer.zero_grad()
def freeze(self) -> None:
r"""Freeze all params for inference.
Example::
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def _verify_is_manual_optimization(self, fn_name: str) -> None:
if self.automatic_optimization:
raise MisconfigurationException(
f"to use {fn_name}, please disable automatic optimization:"
" set model property `automatic_optimization` as False"
)
def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs: Any) -> None:
"""Saves the model in ONNX format.
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example::
class SimpleModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(in_features=64, out_features=4)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)
model = SimpleModel()
input_sample = torch.randn(1, 64)
model.to_onnx("export.onnx", input_sample, export_params=True)
"""
if _TORCH_GREATER_EQUAL_2_0 and not _ONNX_AVAILABLE:
raise ModuleNotFoundError(
f"`torch>=2.0` requires `onnx` to be installed to use `{type(self).__name__}.to_onnx()`"
)
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self._on_before_batch_transfer(input_sample)
input_sample = self._apply_batch_transfer_handler(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = "script",
example_inputs: Optional[Any] = None,
**kwargs: Any,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""By default compiles the whole model to a :class:`~torch.jit.ScriptModule`. If you want to use tracing,
please provided the argument ``method='trace'`` and make sure that either the `example_inputs` argument is
provided, or the model has :attr:`example_input_array` set. If you would like to customize the modules that are
scripted you should override this method. In case you want to return multiple modules, we recommend using a
dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (uses :attr:`example_input_array`)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~lightning.pytorch.core.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example::
class SimpleModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(in_features=64, out_features=4)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
model = SimpleModel()
model.to_torchscript(file_path="model.pt")
torch.jit.save(model.to_torchscript(
file_path="model_trace.pt", method='trace', example_inputs=torch.randn(1, 64))
)
Return:
This LightningModule as a torchscript, regardless of whether `file_path` is
defined or not.
"""
mode = self.training
if method == "script":
with _jit_is_scripting():
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == "trace":
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
"Choosing method=`trace` requires either `example_inputs`"
" or `model.example_input_array` to be defined."
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self._on_before_batch_transfer(example_inputs)
example_inputs = self._apply_batch_transfer_handler(example_inputs)
with _jit_is_scripting():
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was: {method}")
self.train(mode)
if file_path is not None:
fs = get_filesystem(file_path)
with fs.open(file_path, "wb") as f:
torch.jit.save(torchscript_module, f)
return torchscript_module
def load_from_checkpoint(
cls,
checkpoint_path: Union[_PATH, IO],
map_location: _MAP_LOCATION_TYPE = None,
hparams_file: Optional[_PATH] = None,
strict: Optional[bool] = None,
**kwargs: Any,
) -> Self:
r"""Primary way of loading a model from a checkpoint. When Lightning saves a checkpoint it stores the arguments
passed to ``__init__`` in the checkpoint under ``"hyper_parameters"``.
Any arguments specified through \*\*kwargs will override args stored in ``"hyper_parameters"``.
Args:
checkpoint_path: Path to checkpoint. This can also be a URL, or file-like object
map_location:
If your checkpoint saved a GPU model and you now load on CPUs
or a different number of GPUs, use this to map to the new setup.
The behaviour is the same as in :func:`torch.load`.
hparams_file: Optional path to a ``.yaml`` or ``.csv`` file with hierarchical structure
as in this example::
drop_prob: 0.2
dataloader:
batch_size: 32
You most likely won't need this since Lightning will always save the hyperparameters
to the checkpoint.
However, if your checkpoint weights don't have the hyperparameters saved,
use this method to pass in a ``.yaml`` file with the hparams you'd like to use.
These will be converted into a :class:`~dict` and passed into your
:class:`LightningModule` for use.
If your model's ``hparams`` argument is :class:`~argparse.Namespace`
and ``.yaml`` file has hierarchical structure, you need to refactor your model to treat
``hparams`` as :class:`~dict`.
strict: Whether to strictly enforce that the keys in :attr:`checkpoint_path` match the keys
returned by this module's state dict. Defaults to ``True`` unless ``LightningModule.strict_loading`` is
set, in which case it defaults to the value of ``LightningModule.strict_loading``.
\**kwargs: Any extra keyword args needed to init the model. Can also be used to override saved
hyperparameter values.
Return:
:class:`LightningModule` instance with loaded weights and hyperparameters (if available).
Note:
``load_from_checkpoint`` is a **class** method. You should use your :class:`LightningModule`
**class** to call it instead of the :class:`LightningModule` instance, or a
``TypeError`` will be raised.
Note:
To ensure all layers can be loaded from the checkpoint, this function will call
:meth:`~lightning.pytorch.core.hooks.ModelHooks.configure_model` directly after instantiating the
model if this hook is overridden in your LightningModule. However, note that ``load_from_checkpoint`` does
not support loading sharded checkpoints, and you may run out of memory if the model is too large. In this
case, consider loading through the Trainer via ``.fit(ckpt_path=...)``.
Example::
# load weights without mapping ...
model = MyLightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')
# or load weights mapping all weights from GPU 1 to GPU 0 ...
map_location = {'cuda:1':'cuda:0'}
model = MyLightningModule.load_from_checkpoint(
'path/to/checkpoint.ckpt',
map_location=map_location
)
# or load weights and hyperparameters from separate files.
model = MyLightningModule.load_from_checkpoint(
'path/to/checkpoint.ckpt',
hparams_file='/path/to/hparams_file.yaml'
)
# override some of the params with new values
model = MyLightningModule.load_from_checkpoint(
PATH,
num_layers=128,
pretrained_ckpt_path=NEW_PATH,
)
# predict
pretrained_model.eval()
pretrained_model.freeze()
y_hat = pretrained_model(x)
"""
loaded = _load_from_checkpoint(
cls, # type: ignore[arg-type]
checkpoint_path,
map_location,
hparams_file,
strict,
**kwargs,
)
return cast(Self, loaded)
def __getstate__(self) -> Dict[str, Any]:
state = dict(self.__dict__)
state["_trainer"] = None
return state
def _register_sharded_tensor_state_dict_hooks_if_available(self) -> None:
"""Adds ShardedTensor state dict hooks if ShardedTensors are supported.
These hooks ensure that ShardedTensors are included when saving, and are loaded the LightningModule correctly.
"""
if _TORCH_GREATER_EQUAL_2_1:
# ShardedTensor is deprecated in favor of DistributedTensor
return
if _IS_WINDOWS or not torch.distributed.is_available():
rank_zero_debug("Could not register sharded tensor state dict hooks")
return
from torch.distributed._shard.sharded_tensor import pre_load_state_dict_hook, state_dict_hook
self._register_state_dict_hook(state_dict_hook)
self._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
The provided code snippet includes necessary dependencies for implementing the `_jit_is_scripting` function. Write a Python function `def _jit_is_scripting() -> Generator` to solve the following problem:
Workaround for https://github.com/pytorch/pytorch/issues/67146.
Here is the function:
def _jit_is_scripting() -> Generator:
"""Workaround for https://github.com/pytorch/pytorch/issues/67146."""
LightningModule._jit_is_scripting = True
try:
yield
finally:
LightningModule._jit_is_scripting = False | Workaround for https://github.com/pytorch/pytorch/issues/67146. |
155,397 | import importlib
import logging
import os
import uuid
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER
from lightning.pytorch.callbacks import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.parsing import lightning_hasattr, lightning_setattr
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.types import STEP_OUTPUT, LRScheduler, LRSchedulerConfig
log = logging.getLogger(__name__)
def _determine_lr_attr_name(model: "pl.LightningModule", attr_name: str = "") -> str:
if attr_name:
if not lightning_hasattr(model, attr_name):
raise AttributeError(
f"The attribute name for the learning rate was set to {attr_name}, but"
" could not find this as a field in `model` or `model.hparams`."
)
return attr_name
attr_options = ("lr", "learning_rate")
for attr in attr_options:
if lightning_hasattr(model, attr):
return attr
raise AttributeError(
"When using the learning rate finder, either `model` or `model.hparams` should"
f" have one of these fields: {attr_options}. If your model has a different name for the learning rate, set"
f" it with `.lr_find(attr_name=...)`."
)
class _LRFinder:
"""LR finder object. This object stores the results of lr_find().
Args:
mode: either `linear` or `exponential`, how to increase lr after each step
lr_min: lr to start search from
lr_max: lr to stop search
num_training: number of steps to take between lr_min and lr_max
Example::
# Run lr finder
lr_finder = trainer.lr_find(model)
# Results stored in
lr_finder.results
# Plot using
lr_finder.plot()
# Get suggestion
lr = lr_finder.suggestion()
"""
def __init__(self, mode: str, lr_min: float, lr_max: float, num_training: int) -> None:
assert mode in ("linear", "exponential"), "mode should be either `linear` or `exponential`"
self.mode = mode
self.lr_min = lr_min
self.lr_max = lr_max
self.num_training = num_training
self.results: Dict[str, Any] = {}
self._total_batch_idx = 0 # for debug purpose
def _exchange_scheduler(self, trainer: "pl.Trainer") -> None:
# TODO: update docs here
"""Decorate `trainer.strategy.setup_optimizers` method such that it sets the user's originally specified
optimizer together with a new scheduler that takes care of the learning rate search."""
from lightning.pytorch.core.optimizer import _validate_optimizers_attached
optimizers = trainer.strategy.optimizers
if len(optimizers) != 1:
raise MisconfigurationException(
f"`model.configure_optimizers()` returned {len(optimizers)}, but"
" learning rate finder only works with single optimizer"
)
optimizer = optimizers[0]
new_lrs = [self.lr_min] * len(optimizer.param_groups)
for param_group, new_lr in zip(optimizer.param_groups, new_lrs):
param_group["lr"] = new_lr
param_group["initial_lr"] = new_lr
args = (optimizer, self.lr_max, self.num_training)
scheduler = _LinearLR(*args) if self.mode == "linear" else _ExponentialLR(*args)
scheduler = cast(LRScheduler, scheduler)
trainer.strategy.optimizers = [optimizer]
trainer.strategy.lr_scheduler_configs = [LRSchedulerConfig(scheduler, interval="step")]
_validate_optimizers_attached(trainer.optimizers, trainer.lr_scheduler_configs)
def plot(self, suggest: bool = False, show: bool = False, ax: Optional["Axes"] = None) -> Optional["plt.Figure"]:
"""Plot results from lr_find run
Args:
suggest: if True, will mark suggested lr to use with a red point
show: if True, will show figure
ax: Axes object to which the plot is to be drawn. If not provided, a new figure is created.
"""
if not _MATPLOTLIB_AVAILABLE:
raise MisconfigurationException(
"To use the `plot` method, you must have Matplotlib installed."
" Install it by running `pip install -U matplotlib`."
)
import matplotlib.pyplot as plt
lrs = self.results["lr"]
losses = self.results["loss"]
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure # type: ignore[assignment]
# Plot loss as a function of the learning rate
ax.plot(lrs, losses)
if self.mode == "exponential":
ax.set_xscale("log")
ax.set_xlabel("Learning rate")
ax.set_ylabel("Loss")
if suggest:
_ = self.suggestion()
if self._optimal_idx:
ax.plot(lrs[self._optimal_idx], losses[self._optimal_idx], markersize=10, marker="o", color="red")
if show:
plt.show()
return fig
def suggestion(self, skip_begin: int = 10, skip_end: int = 1) -> Optional[float]:
"""This will propose a suggestion for an initial learning rate based on the point with the steepest negative
gradient.
Args:
skip_begin: how many samples to skip in the beginning; helps to avoid too naive estimates
skip_end: how many samples to skip in the end; helps to avoid too optimistic estimates
Returns:
The suggested initial learning rate to use, or `None` if a suggestion is not possible due to too few
loss samples.
"""
losses = torch.tensor(self.results["loss"][skip_begin:-skip_end])
losses = losses[torch.isfinite(losses)]
if len(losses) < 2:
# computing np.gradient requires at least 2 points
log.error(
"Failed to compute suggestion for learning rate because there are not enough points. Increase the loop"
" iteration limits or the size of your dataset/dataloader."
)
self._optimal_idx = None
return None
# TODO: When computing the argmin here, and some losses are non-finite, the expected indices could be
# incorrectly shifted by an offset
gradients = torch.gradient(losses)[0] # Unpack the tuple
min_grad = torch.argmin(gradients).item()
self._optimal_idx = min_grad + skip_begin
return self.results["lr"][self._optimal_idx]
def __lr_finder_dump_params(trainer: "pl.Trainer") -> Dict[str, Any]:
return {
"optimizers": trainer.strategy.optimizers,
"lr_scheduler_configs": trainer.strategy.lr_scheduler_configs,
"callbacks": trainer.callbacks,
"loggers": trainer.loggers,
"max_steps": trainer.fit_loop.max_steps,
"limit_val_batches": trainer.limit_val_batches,
"loop_state_dict": deepcopy(trainer.fit_loop.state_dict()),
}
def __lr_finder_reset_params(trainer: "pl.Trainer", num_training: int, early_stop_threshold: Optional[float]) -> None:
from lightning.pytorch.loggers.logger import DummyLogger
trainer.strategy.lr_scheduler_configs = []
# Use special lr logger callback
trainer.callbacks = [_LRCallback(num_training, early_stop_threshold, progress_bar_refresh_rate=1)]
# No logging
trainer.logger = DummyLogger() if trainer.logger is not None else None
# Max step set to number of iterations starting at current number of iterations
trainer.fit_loop.epoch_loop.max_steps = num_training + trainer.global_step
trainer.limit_val_batches = num_training
def __lr_finder_restore_params(trainer: "pl.Trainer", params: Dict[str, Any]) -> None:
trainer.strategy.optimizers = params["optimizers"]
trainer.strategy.lr_scheduler_configs = params["lr_scheduler_configs"]
trainer.callbacks = params["callbacks"]
trainer.loggers = params["loggers"]
loop = trainer.fit_loop
loop.epoch_loop.max_steps = params["max_steps"]
trainer.limit_val_batches = params["limit_val_batches"]
loop.load_state_dict(deepcopy(params["loop_state_dict"]))
loop.restarting = False
trainer.should_stop = False
def _try_loop_run(trainer: "pl.Trainer", params: Dict[str, Any]) -> None:
loop = trainer.fit_loop
loop.load_state_dict(deepcopy(params["loop_state_dict"]))
loop.restarting = False
loop.run()
def lightning_setattr(model: "pl.LightningModule", attribute: str, value: Any) -> None:
"""Special setattr for Lightning. Checks for attribute in model namespace and the old hparams namespace/dict. Will
also set the attribute on datamodule, if it exists.
Raises:
AttributeError:
If ``model`` doesn't have ``attribute`` in any of
model namespace, the hparams namespace/dict, and the datamodule.
"""
holders = _lightning_get_all_attr_holders(model, attribute)
if len(holders) == 0:
raise AttributeError(
f"{attribute} is neither stored in the model namespace"
" nor the `hparams` namespace/dict, nor the datamodule."
)
for holder in holders:
if isinstance(holder, dict):
holder[attribute] = value
else:
setattr(holder, attribute, value)
The provided code snippet includes necessary dependencies for implementing the `_lr_find` function. Write a Python function `def _lr_find( trainer: "pl.Trainer", model: "pl.LightningModule", min_lr: float = 1e-8, max_lr: float = 1, num_training: int = 100, mode: str = "exponential", early_stop_threshold: Optional[float] = 4.0, update_attr: bool = False, attr_name: str = "", ) -> Optional[_LRFinder]` to solve the following problem:
Enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in picking a good starting learning rate. Args: trainer: A Trainer instance. model: Model to tune. min_lr: minimum learning rate to investigate max_lr: maximum learning rate to investigate num_training: number of learning rates to test mode: Search strategy to update learning rate after each batch: - ``'exponential'``: Increases the learning rate exponentially. - ``'linear'``: Increases the learning rate linearly. early_stop_threshold: Threshold for stopping the search. If the loss at any point is larger than early_stop_threshold*best_loss then the search is stopped. To disable, set to None. update_attr: Whether to update the learning rate attribute or not. attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get automatically detected. Otherwise, set the name here.
Here is the function:
def _lr_find(
trainer: "pl.Trainer",
model: "pl.LightningModule",
min_lr: float = 1e-8,
max_lr: float = 1,
num_training: int = 100,
mode: str = "exponential",
early_stop_threshold: Optional[float] = 4.0,
update_attr: bool = False,
attr_name: str = "",
) -> Optional[_LRFinder]:
"""Enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in picking
a good starting learning rate.
Args:
trainer: A Trainer instance.
model: Model to tune.
min_lr: minimum learning rate to investigate
max_lr: maximum learning rate to investigate
num_training: number of learning rates to test
mode: Search strategy to update learning rate after each batch:
- ``'exponential'``: Increases the learning rate exponentially.
- ``'linear'``: Increases the learning rate linearly.
early_stop_threshold: Threshold for stopping the search. If the
loss at any point is larger than early_stop_threshold*best_loss
then the search is stopped. To disable, set to None.
update_attr: Whether to update the learning rate attribute or not.
attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get
automatically detected. Otherwise, set the name here.
"""
if trainer.fast_dev_run:
rank_zero_warn("Skipping learning rate finder since `fast_dev_run` is enabled.")
return None
# Determine lr attr
if update_attr:
attr_name = _determine_lr_attr_name(model, attr_name)
# Save initial model, that is loaded after learning rate is found
ckpt_path = os.path.join(trainer.default_root_dir, f".lr_find_{uuid.uuid4()}.ckpt")
ckpt_path = trainer.strategy.broadcast(ckpt_path)
trainer.save_checkpoint(ckpt_path)
start_steps = trainer.global_step
# Arguments we adjust during the lr finder, save for restoring
params = __lr_finder_dump_params(trainer)
# Set to values that are required by the algorithm
__lr_finder_reset_params(trainer, num_training, early_stop_threshold)
# Disable standard progress bar for fit
if trainer.progress_bar_callback:
trainer.progress_bar_callback.disable()
# Initialize lr finder object (stores results)
lr_finder = _LRFinder(mode, min_lr, max_lr, num_training)
# Configure optimizer and scheduler
lr_finder._exchange_scheduler(trainer)
# Fit, lr & loss logged in callback
_try_loop_run(trainer, params)
# Prompt if we stopped early
if trainer.global_step != num_training + start_steps:
log.info(f"LR finder stopped early after {trainer.global_step} steps due to diverging loss.")
# Transfer results from callback to lr finder object
lr_finder.results.update({"lr": trainer.callbacks[0].lrs, "loss": trainer.callbacks[0].losses})
lr_finder._total_batch_idx = trainer.fit_loop.total_batch_idx # for debug purpose
__lr_finder_restore_params(trainer, params)
if trainer.progress_bar_callback:
trainer.progress_bar_callback.enable()
# Update lr attr if required
lr_finder.results = trainer.strategy.broadcast(lr_finder.results)
if update_attr:
lr = lr_finder.suggestion()
# TODO: log lr.results to self.logger
if lr is not None:
lightning_setattr(model, attr_name, lr)
log.info(f"Learning rate set to {lr}")
# Restore initial state of model
trainer._checkpoint_connector.restore(ckpt_path)
trainer.strategy.remove_checkpoint(ckpt_path)
trainer.fit_loop.restarting = False # reset restarting flag as checkpoint restoring sets it to True
trainer.fit_loop.epoch_loop.val_loop._combined_loader = None
return lr_finder | Enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in picking a good starting learning rate. Args: trainer: A Trainer instance. model: Model to tune. min_lr: minimum learning rate to investigate max_lr: maximum learning rate to investigate num_training: number of learning rates to test mode: Search strategy to update learning rate after each batch: - ``'exponential'``: Increases the learning rate exponentially. - ``'linear'``: Increases the learning rate linearly. early_stop_threshold: Threshold for stopping the search. If the loss at any point is larger than early_stop_threshold*best_loss then the search is stopped. To disable, set to None. update_attr: Whether to update the learning rate attribute or not. attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get automatically detected. Otherwise, set the name here. |
155,398 | from typing import TYPE_CHECKING, Literal, Optional, Union
import lightning.pytorch as pl
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
TRAIN_DATALOADERS = Any
EVAL_DATALOADERS = Any
def _check_tuner_configuration(
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, "pl.LightningDataModule"]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
dataloaders: Optional[EVAL_DATALOADERS] = None,
method: Literal["fit", "validate", "test", "predict"] = "fit",
) -> None:
supported_methods = ("fit", "validate", "test", "predict")
if method not in supported_methods:
raise ValueError(f"method {method!r} is invalid. Should be one of {supported_methods}.")
if method == "fit":
if dataloaders is not None:
raise MisconfigurationException(
f"In tuner with method={method!r}, `dataloaders` argument should be None,"
" please consider setting `train_dataloaders` and `val_dataloaders` instead."
)
else:
if train_dataloaders is not None or val_dataloaders is not None:
raise MisconfigurationException(
f"In tuner with `method`={method!r}, `train_dataloaders` and `val_dataloaders`"
" arguments should be None, please consider setting `dataloaders` instead."
) | null |
155,399 | from typing import TYPE_CHECKING, Literal, Optional, Union
import lightning.pytorch as pl
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
class LearningRateFinder(Callback):
"""The ``LearningRateFinder`` callback enables the user to do a range test of good initial learning rates, to
reduce the amount of guesswork in picking a good starting learning rate.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Args:
min_lr: Minimum learning rate to investigate
max_lr: Maximum learning rate to investigate
num_training_steps: Number of learning rates to test
mode: Search strategy to update learning rate after each batch:
- ``'exponential'`` (default): Increases the learning rate exponentially.
- ``'linear'``: Increases the learning rate linearly.
early_stop_threshold: Threshold for stopping the search. If the
loss at any point is larger than early_stop_threshold*best_loss
then the search is stopped. To disable, set to None.
update_attr: Whether to update the learning rate attribute or not.
attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get
automatically detected. Otherwise, set the name here.
Example::
# Customize LearningRateFinder callback to run at different epochs.
# This feature is useful while fine-tuning models.
from lightning.pytorch.callbacks import LearningRateFinder
class FineTuneLearningRateFinder(LearningRateFinder):
def __init__(self, milestones, *args, **kwargs):
super().__init__(*args, **kwargs)
self.milestones = milestones
def on_fit_start(self, *args, **kwargs):
return
def on_train_epoch_start(self, trainer, pl_module):
if trainer.current_epoch in self.milestones or trainer.current_epoch == 0:
self.lr_find(trainer, pl_module)
trainer = Trainer(callbacks=[FineTuneLearningRateFinder(milestones=(5, 10))])
trainer.fit(...)
Raises:
MisconfigurationException:
If learning rate/lr in ``model`` or ``model.hparams`` isn't overridden, or if you are using more than
one optimizer.
"""
SUPPORTED_MODES = ("linear", "exponential")
def __init__(
self,
min_lr: float = 1e-8,
max_lr: float = 1,
num_training_steps: int = 100,
mode: str = "exponential",
early_stop_threshold: Optional[float] = 4.0,
update_attr: bool = True,
attr_name: str = "",
) -> None:
mode = mode.lower()
if mode not in self.SUPPORTED_MODES:
raise ValueError(f"`mode` should be either of {self.SUPPORTED_MODES}")
self._min_lr = min_lr
self._max_lr = max_lr
self._num_training_steps = num_training_steps
self._mode = mode
self._early_stop_threshold = early_stop_threshold
self._update_attr = update_attr
self._attr_name = attr_name
self._early_exit = False
self.lr_finder: Optional[_LRFinder] = None
def lr_find(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
with isolate_rng():
self.optimal_lr = _lr_find(
trainer,
pl_module,
min_lr=self._min_lr,
max_lr=self._max_lr,
num_training=self._num_training_steps,
mode=self._mode,
early_stop_threshold=self._early_stop_threshold,
update_attr=self._update_attr,
attr_name=self._attr_name,
)
if self._early_exit:
raise _TunerExitException()
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.lr_find(trainer, pl_module)
def _check_lr_find_configuration(trainer: "pl.Trainer") -> None:
# local import to avoid circular import
from lightning.pytorch.callbacks.lr_finder import LearningRateFinder
configured_callbacks = [cb for cb in trainer.callbacks if isinstance(cb, LearningRateFinder)]
if configured_callbacks:
raise ValueError(
"Trainer is already configured with a `LearningRateFinder` callback."
"Please remove it if you want to use the Tuner."
) | null |
155,400 | from typing import TYPE_CHECKING, Literal, Optional, Union
import lightning.pytorch as pl
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
class BatchSizeFinder(Callback):
"""Finds the largest batch size supported by a given model before encountering an out of memory (OOM) error.
All you need to do is add it as a callback inside Trainer and call ``trainer.{fit,validate,test,predict}``.
Internally, it calls the respective step function ``steps_per_trial`` times for each batch size until one
of the batch sizes generates an OOM error.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Args:
mode: search strategy to update the batch size:
- ``'power'``: Keep multiplying the batch size by 2, until we get an OOM error.
- ``'binsearch'``: Initially keep multiplying by 2 and after encountering an OOM error
do a binary search between the last successful batch size and the batch size that failed.
steps_per_trial: number of steps to run with a given batch size.
Ideally 1 should be enough to test if an OOM error occurs,
however in practice a few are needed.
init_val: initial batch size to start the search with.
max_trials: max number of increases in batch size done before
algorithm is terminated
batch_arg_name: name of the attribute that stores the batch size.
It is expected that the user has provided a model or datamodule that has a hyperparameter
with that name. We will look for this attribute name in the following places
- ``model``
- ``model.hparams``
- ``trainer.datamodule`` (the datamodule passed to the tune method)
Example::
# 1. Customize the BatchSizeFinder callback to run at different epochs. This feature is
# useful while fine-tuning models since you can't always use the same batch size after
# unfreezing the backbone.
from lightning.pytorch.callbacks import BatchSizeFinder
class FineTuneBatchSizeFinder(BatchSizeFinder):
def __init__(self, milestones, *args, **kwargs):
super().__init__(*args, **kwargs)
self.milestones = milestones
def on_fit_start(self, *args, **kwargs):
return
def on_train_epoch_start(self, trainer, pl_module):
if trainer.current_epoch in self.milestones or trainer.current_epoch == 0:
self.scale_batch_size(trainer, pl_module)
trainer = Trainer(callbacks=[FineTuneBatchSizeFinder(milestones=(5, 10))])
trainer.fit(...)
Example::
# 2. Run batch size finder for validate/test/predict.
from lightning.pytorch.callbacks import BatchSizeFinder
class EvalBatchSizeFinder(BatchSizeFinder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def on_fit_start(self, *args, **kwargs):
return
def on_test_start(self, trainer, pl_module):
self.scale_batch_size(trainer, pl_module)
trainer = Trainer(callbacks=[EvalBatchSizeFinder()])
trainer.test(...)
"""
SUPPORTED_MODES = ("power", "binsearch")
def __init__(
self,
mode: str = "power",
steps_per_trial: int = 3,
init_val: int = 2,
max_trials: int = 25,
batch_arg_name: str = "batch_size",
) -> None:
mode = mode.lower()
if mode not in self.SUPPORTED_MODES:
raise ValueError(f"`mode` should be either of {self.SUPPORTED_MODES}")
self.optimal_batch_size: Optional[int] = init_val
self._mode = mode
self._steps_per_trial = steps_per_trial
self._init_val = init_val
self._max_trials = max_trials
self._batch_arg_name = batch_arg_name
self._early_exit = False
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: Optional[str] = None) -> None:
if trainer._accelerator_connector.is_distributed:
raise MisconfigurationException("The Batch size finder is not supported with distributed strategies.")
# TODO: check if this can be enabled (#4040)
if not trainer.fit_loop._data_source.is_module():
raise MisconfigurationException(
"The Batch size finder cannot be used with dataloaders passed directly to `.fit()`. Please disable"
" the feature or incorporate the dataloader into your LightningModule or LightningDataModule."
)
# TODO: Add support for multiple eval dataloader
if stage != "fit":
loop = trainer._active_loop
assert loop is not None
loop.setup_data()
combined_loader = loop._combined_loader
assert combined_loader is not None
if len(combined_loader.flattened) > 1:
stage = trainer.state.stage
assert stage is not None
raise MisconfigurationException(
f"The Batch size finder cannot be used with multiple {stage.dataloader_prefix} dataloaders."
)
if not lightning_hasattr(pl_module, self._batch_arg_name):
raise MisconfigurationException(
f"Field {self._batch_arg_name} not found in `model`, `datamodule`, nor their `hparams` attributes."
)
if (
hasattr(pl_module, self._batch_arg_name)
and hasattr(pl_module, "hparams")
and self._batch_arg_name in pl_module.hparams
):
rank_zero_warn(
f"Field `model.{self._batch_arg_name}` and `model.hparams.{self._batch_arg_name}` are mutually"
f" exclusive! `model.{self._batch_arg_name}` will be used as the initial batch size for scaling."
" If this is not the intended behavior, please remove either one."
)
def scale_batch_size(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
new_size = _scale_batch_size(
trainer,
self._mode,
self._steps_per_trial,
self._init_val,
self._max_trials,
self._batch_arg_name,
)
self.optimal_batch_size = new_size
if self._early_exit:
raise _TunerExitException()
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.scale_batch_size(trainer, pl_module)
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if trainer.sanity_checking or trainer.state.fn != "validate":
return
self.scale_batch_size(trainer, pl_module)
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.scale_batch_size(trainer, pl_module)
def on_predict_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.scale_batch_size(trainer, pl_module)
def _check_scale_batch_size_configuration(trainer: "pl.Trainer") -> None:
if trainer._accelerator_connector.is_distributed:
raise ValueError("Tuning the batch size is currently not supported with distributed strategies.")
# local import to avoid circular import
from lightning.pytorch.callbacks.batch_size_finder import BatchSizeFinder
configured_callbacks = [cb for cb in trainer.callbacks if isinstance(cb, BatchSizeFinder)]
if configured_callbacks:
raise ValueError(
"Trainer is already configured with a `BatchSizeFinder` callback."
"Please remove it if you want to use the Tuner."
) | null |
155,401 | import logging
import os
import uuid
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import lightning.pytorch as pl
from lightning.pytorch.utilities.memory import garbage_collection_cuda, is_oom_error
from lightning.pytorch.utilities.parsing import lightning_getattr, lightning_setattr
from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn
log = logging.getLogger(__name__)
def __scale_batch_dump_params(trainer: "pl.Trainer") -> Dict[str, Any]:
dumped_params = {
"loggers": trainer.loggers,
"callbacks": trainer.callbacks,
}
loop = trainer._active_loop
assert loop is not None
if isinstance(loop, pl.loops._FitLoop):
dumped_params["max_steps"] = trainer.max_steps
dumped_params["limit_train_batches"] = trainer.limit_train_batches
dumped_params["limit_val_batches"] = trainer.limit_val_batches
elif isinstance(loop, pl.loops._EvaluationLoop):
stage = trainer.state.stage
assert stage is not None
dumped_params["limit_eval_batches"] = getattr(trainer, f"limit_{stage.dataloader_prefix}_batches")
dumped_params["loop_verbose"] = loop.verbose
dumped_params["loop_state_dict"] = deepcopy(loop.state_dict())
return dumped_params
def __scale_batch_reset_params(trainer: "pl.Trainer", steps_per_trial: int) -> None:
from lightning.pytorch.loggers.logger import DummyLogger
trainer.logger = DummyLogger() if trainer.logger is not None else None
trainer.callbacks = []
loop = trainer._active_loop
assert loop is not None
if isinstance(loop, pl.loops._FitLoop):
trainer.limit_train_batches = 1.0
trainer.limit_val_batches = steps_per_trial
trainer.fit_loop.epoch_loop.max_steps = steps_per_trial
elif isinstance(loop, pl.loops._EvaluationLoop):
stage = trainer.state.stage
assert stage is not None
setattr(trainer, f"limit_{stage.dataloader_prefix}_batches", steps_per_trial)
loop.verbose = False
def __scale_batch_restore_params(trainer: "pl.Trainer", params: Dict[str, Any]) -> None:
# TODO: There are more states that needs to be reset (#4512 and #4870)
trainer.loggers = params["loggers"]
trainer.callbacks = params["callbacks"]
loop = trainer._active_loop
assert loop is not None
if isinstance(loop, pl.loops._FitLoop):
loop.epoch_loop.max_steps = params["max_steps"]
trainer.limit_train_batches = params["limit_train_batches"]
trainer.limit_val_batches = params["limit_val_batches"]
elif isinstance(loop, pl.loops._EvaluationLoop):
stage = trainer.state.stage
assert stage is not None
setattr(trainer, f"limit_{stage.dataloader_prefix}_batches", params["limit_eval_batches"])
loop.load_state_dict(deepcopy(params["loop_state_dict"]))
loop.restarting = False
if isinstance(loop, pl.loops._EvaluationLoop) and "loop_verbose" in params:
loop.verbose = params["loop_verbose"]
# make sure the loop's state is reset
_reset_dataloaders(trainer)
loop.reset()
def _run_power_scaling(
trainer: "pl.Trainer",
new_size: int,
batch_arg_name: str,
max_trials: int,
params: Dict[str, Any],
) -> int:
"""Batch scaling mode where the size is doubled at each iteration until an OOM error is encountered."""
# this flag is used to determine whether the previously scaled batch size, right before OOM, was a success or not
# if it was we exit, else we continue downscaling in case we haven't encountered a single optimal batch size
any_success = False
for _ in range(max_trials):
garbage_collection_cuda()
# reset after each try
_reset_progress(trainer)
try:
_try_loop_run(trainer, params)
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc="succeeded")
if not changed:
break
# Force the train dataloader to reset as the batch size has changed
_reset_dataloaders(trainer)
any_success = True
except RuntimeError as exception:
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc="failed")
# Force the train dataloader to reset as the batch size has changed
_reset_dataloaders(trainer)
if any_success:
break
else:
raise # some other error not memory related
return new_size
def _run_binary_scaling(
trainer: "pl.Trainer",
new_size: int,
batch_arg_name: str,
max_trials: int,
params: Dict[str, Any],
) -> int:
"""Batch scaling mode where the size is initially is doubled at each iteration until an OOM error is encountered.
Hereafter, the batch size is further refined using a binary search
"""
low = 1
high = None
count = 0
while True:
garbage_collection_cuda()
# reset after each try
_reset_progress(trainer)
try:
# run loop
_try_loop_run(trainer, params)
count += 1
if count > max_trials:
break
# Double in size
low = new_size
if high:
if high - low <= 1:
break
midval = (high + low) // 2
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc="succeeded")
else:
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc="succeeded")
if not changed:
break
# Force the train dataloader to reset as the batch size has changed
_reset_dataloaders(trainer)
except RuntimeError as exception:
# Only these errors should trigger an adjustment
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
high = new_size
midval = (high + low) // 2
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc="failed")
# Force the train dataloader to reset as the batch size has changed
_reset_dataloaders(trainer)
if high - low <= 1:
break
else:
raise # some other error not memory related
return new_size
def _adjust_batch_size(
trainer: "pl.Trainer",
batch_arg_name: str = "batch_size",
factor: float = 1.0,
value: Optional[int] = None,
desc: Optional[str] = None,
) -> Tuple[int, bool]:
"""Helper function for adjusting the batch size.
Args:
trainer: instance of lightning.pytorch.Trainer
factor: value which the old batch size is multiplied by to get the
new batch size
value: if a value is given, will override the batch size with this value.
Note that the value of `factor` will not have an effect in this case
desc: either ``"succeeded"`` or ``"failed"``. Used purely for logging
Returns:
The new batch size for the next trial and a bool that signals whether the
new value is different than the previous batch size.
"""
model = trainer.lightning_module
batch_size = lightning_getattr(model, batch_arg_name)
assert batch_size is not None
loop = trainer._active_loop
assert loop is not None
loop.setup_data()
combined_loader = loop._combined_loader
assert combined_loader is not None
try:
combined_dataset_length = combined_loader._dataset_length()
if batch_size >= combined_dataset_length:
rank_zero_info(f"The batch size {batch_size} is greater or equal than the length of your dataset.")
return batch_size, False
except NotImplementedError:
# all datasets are iterable style
pass
new_size = value if value is not None else int(batch_size * factor)
if desc:
rank_zero_info(f"Batch size {batch_size} {desc}, trying batch size {new_size}")
changed = new_size != batch_size
lightning_setattr(model, batch_arg_name, new_size)
return new_size, changed
def garbage_collection_cuda() -> None:
"""Garbage collection Torch (CUDA) memory."""
gc.collect()
try:
# This is the last thing that should cause an OOM error, but seemingly it can.
torch.cuda.empty_cache()
except RuntimeError as exception:
if not is_oom_error(exception):
# Only handle OOM errors
raise
The provided code snippet includes necessary dependencies for implementing the `_scale_batch_size` function. Write a Python function `def _scale_batch_size( trainer: "pl.Trainer", mode: str = "power", steps_per_trial: int = 3, init_val: int = 2, max_trials: int = 25, batch_arg_name: str = "batch_size", ) -> Optional[int]` to solve the following problem:
Iteratively try to find the largest batch size for a given model that does not give an out of memory (OOM) error. Args: trainer: A Trainer instance. mode: Search strategy to update the batch size: - ``'power'``: Keep multiplying the batch size by 2, until we get an OOM error. - ``'binsearch'``: Initially keep multiplying by 2 and after encountering an OOM error do a binary search between the last successful batch size and the batch size that failed. steps_per_trial: number of steps to run with a given batch size. Ideally 1 should be enough to test if an OOM error occurs, however in practise a few are needed init_val: initial batch size to start the search with max_trials: max number of increases in batch size done before algorithm is terminated batch_arg_name: name of the attribute that stores the batch size. It is expected that the user has provided a model or datamodule that has a hyperparameter with that name. We will look for this attribute name in the following places - ``model`` - ``model.hparams`` - ``trainer.datamodule`` (the datamodule passed to the tune method)
Here is the function:
def _scale_batch_size(
trainer: "pl.Trainer",
mode: str = "power",
steps_per_trial: int = 3,
init_val: int = 2,
max_trials: int = 25,
batch_arg_name: str = "batch_size",
) -> Optional[int]:
"""Iteratively try to find the largest batch size for a given model that does not give an out of memory (OOM)
error.
Args:
trainer: A Trainer instance.
mode: Search strategy to update the batch size:
- ``'power'``: Keep multiplying the batch size by 2, until we get an OOM error.
- ``'binsearch'``: Initially keep multiplying by 2 and after encountering an OOM error
do a binary search between the last successful batch size and the batch size that failed.
steps_per_trial: number of steps to run with a given batch size.
Ideally 1 should be enough to test if an OOM error occurs,
however in practise a few are needed
init_val: initial batch size to start the search with
max_trials: max number of increases in batch size done before
algorithm is terminated
batch_arg_name: name of the attribute that stores the batch size.
It is expected that the user has provided a model or datamodule that has a hyperparameter
with that name. We will look for this attribute name in the following places
- ``model``
- ``model.hparams``
- ``trainer.datamodule`` (the datamodule passed to the tune method)
"""
if trainer.fast_dev_run:
rank_zero_warn("Skipping batch size scaler since `fast_dev_run` is enabled.")
return None
# Save initial model, that is loaded after batch size is found
ckpt_path = os.path.join(trainer.default_root_dir, f".scale_batch_size_{uuid.uuid4()}.ckpt")
trainer.save_checkpoint(ckpt_path)
# Arguments we adjust during the batch size finder, save for restoring
params = __scale_batch_dump_params(trainer)
# Set to values that are required by the algorithm
__scale_batch_reset_params(trainer, steps_per_trial)
if trainer.progress_bar_callback:
trainer.progress_bar_callback.disable()
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, value=init_val)
if mode == "power":
new_size = _run_power_scaling(trainer, new_size, batch_arg_name, max_trials, params)
elif mode == "binsearch":
new_size = _run_binary_scaling(trainer, new_size, batch_arg_name, max_trials, params)
garbage_collection_cuda()
log.info(f"Finished batch size finder, will continue with full run using batch size {new_size}")
__scale_batch_restore_params(trainer, params)
if trainer.progress_bar_callback:
trainer.progress_bar_callback.enable()
trainer._checkpoint_connector.restore(ckpt_path)
trainer.strategy.remove_checkpoint(ckpt_path)
return new_size | Iteratively try to find the largest batch size for a given model that does not give an out of memory (OOM) error. Args: trainer: A Trainer instance. mode: Search strategy to update the batch size: - ``'power'``: Keep multiplying the batch size by 2, until we get an OOM error. - ``'binsearch'``: Initially keep multiplying by 2 and after encountering an OOM error do a binary search between the last successful batch size and the batch size that failed. steps_per_trial: number of steps to run with a given batch size. Ideally 1 should be enough to test if an OOM error occurs, however in practise a few are needed init_val: initial batch size to start the search with max_trials: max number of increases in batch size done before algorithm is terminated batch_arg_name: name of the attribute that stores the batch size. It is expected that the user has provided a model or datamodule that has a hyperparameter with that name. We will look for this attribute name in the following places - ``model`` - ``model.hparams`` - ``trainer.datamodule`` (the datamodule passed to the tune method) |
155,402 | from contextlib import contextmanager
from typing import Generator
from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states
def _collect_rng_states(include_cuda: bool = True) -> Dict[str, Any]:
r"""Collect the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python."""
states = {
"torch": torch.get_rng_state(),
"numpy": np.random.get_state(),
"python": python_get_rng_state(),
}
if include_cuda:
states["torch.cuda"] = torch.cuda.get_rng_state_all() if torch.cuda.is_available() else []
return states
def _set_rng_states(rng_state_dict: Dict[str, Any]) -> None:
r"""Set the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python in the current
process."""
torch.set_rng_state(rng_state_dict["torch"])
# torch.cuda rng_state is only included since v1.8.
if "torch.cuda" in rng_state_dict:
torch.cuda.set_rng_state_all(rng_state_dict["torch.cuda"])
np.random.set_state(rng_state_dict["numpy"])
version, state, gauss = rng_state_dict["python"]
python_set_rng_state((version, tuple(state), gauss))
The provided code snippet includes necessary dependencies for implementing the `isolate_rng` function. Write a Python function `def isolate_rng(include_cuda: bool = True) -> Generator[None, None, None]` to solve the following problem:
A context manager that resets the global random state on exit to what it was before entering. It supports isolating the states for PyTorch, Numpy, and Python built-in random number generators. Args: include_cuda: Whether to allow this function to also control the `torch.cuda` random number generator. Set this to ``False`` when using the function in a forked process where CUDA re-initialization is prohibited. Example: >>> import torch >>> torch.manual_seed(1) # doctest: +ELLIPSIS <torch._C.Generator object at ...> >>> with isolate_rng(): ... [torch.rand(1) for _ in range(3)] [tensor([0.7576]), tensor([0.2793]), tensor([0.4031])] >>> torch.rand(1) tensor([0.7576])
Here is the function:
def isolate_rng(include_cuda: bool = True) -> Generator[None, None, None]:
"""A context manager that resets the global random state on exit to what it was before entering.
It supports isolating the states for PyTorch, Numpy, and Python built-in random number generators.
Args:
include_cuda: Whether to allow this function to also control the `torch.cuda` random number generator.
Set this to ``False`` when using the function in a forked process where CUDA re-initialization is
prohibited.
Example:
>>> import torch
>>> torch.manual_seed(1) # doctest: +ELLIPSIS
<torch._C.Generator object at ...>
>>> with isolate_rng():
... [torch.rand(1) for _ in range(3)]
[tensor([0.7576]), tensor([0.2793]), tensor([0.4031])]
>>> torch.rand(1)
tensor([0.7576])
"""
states = _collect_rng_states(include_cuda)
yield
_set_rng_states(states) | A context manager that resets the global random state on exit to what it was before entering. It supports isolating the states for PyTorch, Numpy, and Python built-in random number generators. Args: include_cuda: Whether to allow this function to also control the `torch.cuda` random number generator. Set this to ``False`` when using the function in a forked process where CUDA re-initialization is prohibited. Example: >>> import torch >>> torch.manual_seed(1) # doctest: +ELLIPSIS <torch._C.Generator object at ...> >>> with isolate_rng(): ... [torch.rand(1) for _ in range(3)] [tensor([0.7576]), tensor([0.2793]), tensor([0.4031])] >>> torch.rand(1) tensor([0.7576]) |
155,403 | import re
from typing import Any, Dict
import torch
from lightning.fabric.utilities.consolidate_checkpoint import _parse_cli_args, _process_cli_args
from lightning.fabric.utilities.load import _load_distributed_checkpoint
The provided code snippet includes necessary dependencies for implementing the `_format_checkpoint` function. Write a Python function `def _format_checkpoint(checkpoint: Dict[str, Any]) -> Dict[str, Any]` to solve the following problem:
Converts the special FSDP checkpoint format to the standard format the Lightning Trainer can load.
Here is the function:
def _format_checkpoint(checkpoint: Dict[str, Any]) -> Dict[str, Any]:
"""Converts the special FSDP checkpoint format to the standard format the Lightning Trainer can load."""
# Rename the model key
checkpoint["state_dict"] = checkpoint.pop("model")
optimizer_keys = [key for key in checkpoint if re.match("optimizer_[0-9]+", key)]
if not optimizer_keys:
return checkpoint
# Optimizers are saved in special keys named `optimizer_0`, `optimizer_1`, etc.
# These need to be merged back into a Python list
checkpoint["optimizer_states"] = [checkpoint.pop(f"optimizer_{opt_idx}") for opt_idx in range(len(optimizer_keys))]
return checkpoint | Converts the special FSDP checkpoint format to the standard format the Lightning Trainer can load. |
155,404 | from typing import Dict, Union
import torch
from torch.nn import Module
The provided code snippet includes necessary dependencies for implementing the `grad_norm` function. Write a Python function `def grad_norm(module: Module, norm_type: Union[float, int, str], group_separator: str = "/") -> Dict[str, float]` to solve the following problem:
Compute each parameter's gradient's norm and their overall norm. The overall norm is computed over all gradients together, as if they were concatenated into a single vector. Args: module: :class:`torch.nn.Module` to inspect. norm_type: The type of the used p-norm, cast to float if necessary. Can be ``'inf'`` for infinity norm. group_separator: The separator string used by the logger to group the gradients norms in their own subfolder instead of the logs one. Return: norms: The dictionary of p-norms of each parameter's gradient and a special entry for the total p-norm of the gradients viewed as a single vector.
Here is the function:
def grad_norm(module: Module, norm_type: Union[float, int, str], group_separator: str = "/") -> Dict[str, float]:
"""Compute each parameter's gradient's norm and their overall norm.
The overall norm is computed over all gradients together, as if they
were concatenated into a single vector.
Args:
module: :class:`torch.nn.Module` to inspect.
norm_type: The type of the used p-norm, cast to float if necessary.
Can be ``'inf'`` for infinity norm.
group_separator: The separator string used by the logger to group
the gradients norms in their own subfolder instead of the logs one.
Return:
norms: The dictionary of p-norms of each parameter's gradient and
a special entry for the total p-norm of the gradients viewed
as a single vector.
"""
norm_type = float(norm_type)
if norm_type <= 0:
raise ValueError(f"`norm_type` must be a positive number or 'inf' (infinity norm). Got {norm_type}")
norms = {
f"grad_{norm_type}_norm{group_separator}{name}": p.grad.data.norm(norm_type)
for name, p in module.named_parameters()
if p.grad is not None
}
if norms:
total_norm = torch.tensor(list(norms.values())).norm(norm_type)
norms[f"grad_{norm_type}_norm_total"] = total_norm
return norms | Compute each parameter's gradient's norm and their overall norm. The overall norm is computed over all gradients together, as if they were concatenated into a single vector. Args: module: :class:`torch.nn.Module` to inspect. norm_type: The type of the used p-norm, cast to float if necessary. Can be ``'inf'`` for infinity norm. group_separator: The separator string used by the logger to group the gradients norms in their own subfolder instead of the logs one. Return: norms: The dictionary of p-norms of each parameter's gradient and a special entry for the total p-norm of the gradients viewed as a single vector. |
155,405 | import glob
import logging
from argparse import ArgumentParser, Namespace
from pathlib import Path
from shutil import copyfile
from typing import List
import torch
from tqdm import tqdm
from lightning.pytorch.utilities.migration import migrate_checkpoint, pl_legacy_patch
_log = logging.getLogger(__name__)
def _upgrade(args: Namespace) -> None:
path = Path(args.path).absolute()
extension: str = args.extension if args.extension.startswith(".") else f".{args.extension}"
files: List[Path] = []
if not path.exists():
_log.error(
f"The path {path} does not exist. Please provide a valid path to a checkpoint file or a directory"
f" containing checkpoints ending in {extension}."
)
exit(1)
if path.is_file():
files = [path]
if path.is_dir():
files = [Path(p) for p in glob.glob(str(path / "**" / f"*{extension}"), recursive=True)]
if not files:
_log.error(
f"No checkpoint files with extension {extension} were found in {path}."
f" HINT: Try setting the `--extension` option to specify the right file extension to look for."
)
exit(1)
_log.info("Creating a backup of the existing checkpoint files before overwriting in the upgrade process.")
for file in files:
backup_file = file.with_suffix(".bak")
if backup_file.exists():
# never overwrite backup files - they are the original, untouched checkpoints
continue
copyfile(file, backup_file)
_log.info("Upgrading checkpoints ...")
for file in tqdm(files):
with pl_legacy_patch():
checkpoint = torch.load(file, map_location=(torch.device("cpu") if args.map_to_cpu else None))
migrate_checkpoint(checkpoint)
torch.save(checkpoint, file)
_log.info("Done.") | null |
155,406 | import inspect
import os
from argparse import Namespace
from ast import literal_eval
from contextlib import suppress
from functools import wraps
from typing import Any, Callable, Type, TypeVar, cast
_T = TypeVar("_T", bound=Callable[..., Any])
def _parse_env_variables(cls: Type, template: str = "PL_%(cls_name)s_%(cls_argument)s") -> Namespace:
"""Parse environment arguments if they are defined.
Examples:
>>> from lightning.pytorch import Trainer
>>> _parse_env_variables(Trainer)
Namespace()
>>> import os
>>> os.environ["PL_TRAINER_DEVICES"] = '42'
>>> os.environ["PL_TRAINER_BLABLABLA"] = '1.23'
>>> _parse_env_variables(Trainer)
Namespace(devices=42)
>>> del os.environ["PL_TRAINER_DEVICES"]
"""
env_args = {}
for arg_name in inspect.signature(cls).parameters:
env = template % {"cls_name": cls.__name__.upper(), "cls_argument": arg_name.upper()}
val = os.environ.get(env)
if not (val is None or val == ""):
# todo: specify the possible exception
with suppress(Exception):
# converting to native types like int/float/bool
val = literal_eval(val)
env_args[arg_name] = val
return Namespace(**env_args)
def _defaults_from_env_vars(fn: _T) -> _T:
@wraps(fn)
def insert_env_defaults(self: Any, *args: Any, **kwargs: Any) -> Any:
cls = self.__class__ # get the class
if args: # in case any args passed move them to kwargs
# parse the argument names
cls_arg_names = inspect.signature(cls).parameters
# convert args to kwargs
kwargs.update(dict(zip(cls_arg_names, args)))
env_variables = vars(_parse_env_variables(cls))
# update the kwargs by env variables
kwargs = dict(list(env_variables.items()) + list(kwargs.items()))
# all args were already moved to kwargs
return fn(self, **kwargs)
return cast(_T, insert_env_defaults) | null |
155,407 | import copy
import inspect
import pickle
import types
from dataclasses import fields, is_dataclass
from typing import Any, Dict, List, Literal, MutableMapping, Optional, Sequence, Tuple, Type, Union
from torch import nn
import lightning.pytorch as pl
from lightning.fabric.utilities.data import AttributeDict as _AttributeDict
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def is_picklable(obj: object) -> bool:
"""Tests if an object can be pickled."""
try:
pickle.dumps(obj)
return True
except (pickle.PickleError, AttributeError, RuntimeError, TypeError):
return False
The provided code snippet includes necessary dependencies for implementing the `clean_namespace` function. Write a Python function `def clean_namespace(hparams: MutableMapping) -> None` to solve the following problem:
Removes all unpicklable entries from hparams.
Here is the function:
def clean_namespace(hparams: MutableMapping) -> None:
"""Removes all unpicklable entries from hparams."""
del_attrs = [k for k, v in hparams.items() if not is_picklable(v)]
for k in del_attrs:
rank_zero_warn(
f"Attribute '{k}' removed from hparams because it cannot be pickled. You can suppress this warning by"
f" setting `self.save_hyperparameters(ignore=['{k}'])`.",
)
del hparams[k] | Removes all unpicklable entries from hparams. |
155,408 | import copy
import inspect
import pickle
import types
from dataclasses import fields, is_dataclass
from typing import Any, Dict, List, Literal, MutableMapping, Optional, Sequence, Tuple, Type, Union
from torch import nn
import lightning.pytorch as pl
from lightning.fabric.utilities.data import AttributeDict as _AttributeDict
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def _get_init_args(frame: types.FrameType) -> Tuple[Optional[Any], Dict[str, Any]]:
_, _, _, local_vars = inspect.getargvalues(frame)
if "__class__" not in local_vars:
return None, {}
cls = local_vars["__class__"]
init_parameters = inspect.signature(cls.__init__).parameters
self_var, args_var, kwargs_var = parse_class_init_keys(cls)
filtered_vars = [n for n in (self_var, args_var, kwargs_var) if n]
exclude_argnames = (*filtered_vars, "__class__", "frame", "frame_args")
# only collect variables that appear in the signature
local_args = {k: local_vars[k] for k in init_parameters}
# kwargs_var might be None => raised an error by mypy
if kwargs_var:
local_args.update(local_args.get(kwargs_var, {}))
local_args = {k: v for k, v in local_args.items() if k not in exclude_argnames}
self_arg = local_vars.get(self_var, None)
return self_arg, local_args
The provided code snippet includes necessary dependencies for implementing the `get_init_args` function. Write a Python function `def get_init_args(frame: types.FrameType) -> Dict[str, Any]` to solve the following problem:
For backwards compatibility: #16369.
Here is the function:
def get_init_args(frame: types.FrameType) -> Dict[str, Any]: # pragma: no-cover
"""For backwards compatibility: #16369."""
_, local_args = _get_init_args(frame)
return local_args | For backwards compatibility: #16369. |
155,409 | import copy
import inspect
import pickle
import types
from dataclasses import fields, is_dataclass
from typing import Any, Dict, List, Literal, MutableMapping, Optional, Sequence, Tuple, Type, Union
from torch import nn
import lightning.pytorch as pl
from lightning.fabric.utilities.data import AttributeDict as _AttributeDict
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
def collect_init_args(
frame: types.FrameType,
path_args: List[Dict[str, Any]],
inside: bool = False,
classes: Tuple[Type, ...] = (),
) -> List[Dict[str, Any]]:
"""Recursively collects the arguments passed to the child constructors in the inheritance tree.
Args:
frame: the current stack frame
path_args: a list of dictionaries containing the constructor args in all parent classes
inside: track if we are inside inheritance path, avoid terminating too soon
classes: the classes in which to inspect the frames
Return:
A list of dictionaries where each dictionary contains the arguments passed to the
constructor at that level. The last entry corresponds to the constructor call of the
most specific class in the hierarchy.
"""
_, _, _, local_vars = inspect.getargvalues(frame)
# frame.f_back must be of a type types.FrameType for get_init_args/collect_init_args due to mypy
if not isinstance(frame.f_back, types.FrameType):
return path_args
local_self, local_args = _get_init_args(frame)
if "__class__" in local_vars and (not classes or isinstance(local_self, classes)):
# recursive update
path_args.append(local_args)
return collect_init_args(frame.f_back, path_args, inside=True, classes=classes)
if not inside:
return collect_init_args(frame.f_back, path_args, inside=False, classes=classes)
return path_args
The provided code snippet includes necessary dependencies for implementing the `save_hyperparameters` function. Write a Python function `def save_hyperparameters( obj: Any, *args: Any, ignore: Optional[Union[Sequence[str], str]] = None, frame: Optional[types.FrameType] = None, given_hparams: Optional[Dict[str, Any]] = None, ) -> None` to solve the following problem:
See :meth:`~lightning.pytorch.LightningModule.save_hyperparameters`
Here is the function:
def save_hyperparameters(
obj: Any,
*args: Any,
ignore: Optional[Union[Sequence[str], str]] = None,
frame: Optional[types.FrameType] = None,
given_hparams: Optional[Dict[str, Any]] = None,
) -> None:
"""See :meth:`~lightning.pytorch.LightningModule.save_hyperparameters`"""
if len(args) == 1 and not isinstance(args, str) and not args[0]:
# args[0] is an empty container
return
if not frame:
current_frame = inspect.currentframe()
# inspect.currentframe() return type is Optional[types.FrameType]: current_frame.f_back called only if available
if current_frame:
frame = current_frame.f_back
if not isinstance(frame, types.FrameType):
raise AttributeError("There is no `frame` available while being required.")
if given_hparams is not None:
init_args = given_hparams
elif is_dataclass(obj):
init_args = {f.name: getattr(obj, f.name) for f in fields(obj)}
else:
init_args = {}
from lightning.pytorch.core.mixins import HyperparametersMixin
for local_args in collect_init_args(frame, [], classes=(HyperparametersMixin,)):
init_args.update(local_args)
if ignore is None:
ignore = []
elif isinstance(ignore, str):
ignore = [ignore]
elif isinstance(ignore, (list, tuple)):
ignore = [arg for arg in ignore if isinstance(arg, str)]
ignore = list(set(ignore))
init_args = {k: v for k, v in init_args.items() if k not in ignore}
if not args:
# take all arguments
hp = init_args
obj._hparams_name = "kwargs" if hp else None
else:
# take only listed arguments in `save_hparams`
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
obj._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
obj._hparams_name = "kwargs"
# `hparams` are expected here
obj._set_hparams(hp)
for k, v in obj._hparams.items():
if isinstance(v, nn.Module):
rank_zero_warn(
f"Attribute {k!r} is an instance of `nn.Module` and is already saved during checkpointing."
f" It is recommended to ignore them using `self.save_hyperparameters(ignore=[{k!r}])`."
)
# make a deep copy so there are no other runtime changes reflected
obj._hparams_initial = copy.deepcopy(obj._hparams) | See :meth:`~lightning.pytorch.LightningModule.save_hyperparameters` |
155,410 | from __future__ import annotations
import os
from typing import Any
import torch
from lightning.fabric.utilities.types import _PATH
from lightning.pytorch.strategies.deepspeed import _DEEPSPEED_AVAILABLE
CPU_DEVICE = torch.device("cpu")
def ds_checkpoint_dir(checkpoint_dir: _PATH, tag: str | None = None) -> str:
if tag is None:
latest_path = os.path.join(checkpoint_dir, "latest")
if os.path.isfile(latest_path):
with open(latest_path) as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
directory = os.path.join(checkpoint_dir, tag)
if not os.path.isdir(directory):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return directory
def _remove_prefix(key: str, prefix: str) -> str:
return key[len(prefix) :] if key.startswith(prefix) else key
_PATH = Union[str, Path]
The provided code snippet includes necessary dependencies for implementing the `convert_zero_checkpoint_to_fp32_state_dict` function. Write a Python function `def convert_zero_checkpoint_to_fp32_state_dict( checkpoint_dir: _PATH, output_file: _PATH, tag: str | None = None ) -> dict[str, Any]` to solve the following problem:
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. It gets copied into the top level checkpoint dir, so the user can easily do the conversion at any point in the future. Once extracted, the weights don't require DeepSpeed and can be used in any application. Additionally the script has been modified to ensure we keep the lightning state inside the state dict for being able to run ``LightningModule.load_from_checkpoint('...')```. Args: checkpoint_dir: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) output_file: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) tag: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Examples:: # Lightning deepspeed has saved a directory instead of a file convert_zero_checkpoint_to_fp32_state_dict( "lightning_logs/version_0/checkpoints/epoch=0-step=0.ckpt/", "lightning_model.pt" )
Here is the function:
def convert_zero_checkpoint_to_fp32_state_dict(
checkpoint_dir: _PATH, output_file: _PATH, tag: str | None = None
) -> dict[str, Any]:
"""Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with
``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. It gets copied into the top
level checkpoint dir, so the user can easily do the conversion at any point in the future. Once extracted, the
weights don't require DeepSpeed and can be used in any application. Additionally the script has been modified to
ensure we keep the lightning state inside the state dict for being able to run
``LightningModule.load_from_checkpoint('...')```.
Args:
checkpoint_dir: path to the desired checkpoint folder.
(one that contains the tag-folder, like ``global_step14``)
output_file: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
tag: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt
to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
Examples::
# Lightning deepspeed has saved a directory instead of a file
convert_zero_checkpoint_to_fp32_state_dict(
"lightning_logs/version_0/checkpoints/epoch=0-step=0.ckpt/",
"lightning_model.pt"
)
"""
if not _DEEPSPEED_AVAILABLE:
raise ModuleNotFoundError(str(_DEEPSPEED_AVAILABLE))
from deepspeed.utils.zero_to_fp32 import (
get_fp32_state_dict_from_zero_checkpoint,
get_model_state_file,
get_optim_files,
)
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
# additional logic to ensure we keep the lightning state dict as well from rank 0.
deepspeed_states = [
"module",
"optimizer",
"lr_scheduler",
"csr_tensor_module_names",
"skipped_steps",
"global_steps",
"dp_world_size",
"mp_world_size",
]
checkpoint_dir = ds_checkpoint_dir(checkpoint_dir)
optim_files = get_optim_files(checkpoint_dir)
optim_state = torch.load(optim_files[0], map_location=CPU_DEVICE)
zero_stage = optim_state["optimizer_state_dict"]["zero_stage"]
model_file = get_model_state_file(checkpoint_dir, zero_stage)
client_state = torch.load(model_file, map_location=CPU_DEVICE)
client_state = {key: value for key, value in client_state.items() if key not in deepspeed_states}
# State dict keys will include reference to wrapper _LightningModuleWrapperBase in old checkpoints created in
# Lightning version < 2.1. Delete the `_forward_module` prefix before saving.
state_dict = {_remove_prefix(k, "_forward_module."): state_dict[k] for k in state_dict}
client_state["state_dict"] = state_dict
print(f"Saving fp32 state dict to {output_file}")
torch.save(client_state, output_file)
return client_state | Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. It gets copied into the top level checkpoint dir, so the user can easily do the conversion at any point in the future. Once extracted, the weights don't require DeepSpeed and can be used in any application. Additionally the script has been modified to ensure we keep the lightning state inside the state dict for being able to run ``LightningModule.load_from_checkpoint('...')```. Args: checkpoint_dir: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) output_file: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) tag: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Examples:: # Lightning deepspeed has saved a directory instead of a file convert_zero_checkpoint_to_fp32_state_dict( "lightning_logs/version_0/checkpoints/epoch=0-step=0.ckpt/", "lightning_model.pt" ) |
155,411 | import inspect
from dataclasses import fields
from typing import Any, Dict, Generator, Iterable, Mapping, Optional, Sized, Tuple, Union
import torch
from lightning_utilities.core.apply_func import is_dataclass_instance
from torch import Tensor
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler, Sampler, SequentialSampler
from typing_extensions import TypeGuard
import lightning.pytorch as pl
from lightning.fabric.utilities.data import (
_reinstantiate_wrapped_cls,
_replace_value_in_saved_args,
has_iterable_dataset,
sized_len,
)
from lightning.pytorch.overrides.distributed import _IndexBatchSamplerWrapper
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_warn
BType = Union[Tensor, str, Mapping[Any, "BType"], Iterable["BType"]]
warning_cache = WarningCache()
def _extract_batch_size(batch: BType) -> Generator[Optional[int], None, None]:
if isinstance(batch, Tensor):
if batch.ndim == 0:
yield 1
else:
yield batch.size(0)
elif isinstance(batch, (Iterable, Mapping)) and not isinstance(batch, str):
if isinstance(batch, Mapping):
batch = batch.values()
for sample in batch:
yield from _extract_batch_size(sample)
elif is_dataclass_instance(batch):
for field in fields(batch): # type: ignore[arg-type]
yield from _extract_batch_size(getattr(batch, field.name))
else:
yield None
The provided code snippet includes necessary dependencies for implementing the `extract_batch_size` function. Write a Python function `def extract_batch_size(batch: BType) -> int` to solve the following problem:
Unpack a batch to find a ``torch.Tensor``. Returns: ``len(tensor)`` when found, or ``1`` when it hits an empty or non iterable.
Here is the function:
def extract_batch_size(batch: BType) -> int:
"""Unpack a batch to find a ``torch.Tensor``.
Returns:
``len(tensor)`` when found, or ``1`` when it hits an empty or non iterable.
"""
error_msg = (
"We could not infer the batch_size from the batch. Either simplify its structure"
" or provide the batch_size as `self.log(..., batch_size=batch_size)`."
)
batch_size = None
try:
for bs in _extract_batch_size(batch):
if batch_size is None:
batch_size = bs
elif batch_size != bs:
warning_cache.warn(
"Trying to infer the `batch_size` from an ambiguous collection. The batch size we"
f" found is {batch_size}. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`."
)
break
except RecursionError:
raise RecursionError(error_msg)
if batch_size is None:
raise MisconfigurationException(error_msg)
return batch_size | Unpack a batch to find a ``torch.Tensor``. Returns: ``len(tensor)`` when found, or ``1`` when it hits an empty or non iterable. |
155,412 | import inspect
from dataclasses import fields
from typing import Any, Dict, Generator, Iterable, Mapping, Optional, Sized, Tuple, Union
import torch
from lightning_utilities.core.apply_func import is_dataclass_instance
from torch import Tensor
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler, Sampler, SequentialSampler
from typing_extensions import TypeGuard
import lightning.pytorch as pl
from lightning.fabric.utilities.data import (
_reinstantiate_wrapped_cls,
_replace_value_in_saved_args,
has_iterable_dataset,
sized_len,
)
from lightning.pytorch.overrides.distributed import _IndexBatchSamplerWrapper
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_warn
def has_iterable_dataset(dataloader: object) -> bool:
return hasattr(dataloader, "dataset") and isinstance(dataloader.dataset, IterableDataset)
def sized_len(dataloader: object) -> Optional[int]:
"""Try to get the length of an object, return ``None`` otherwise."""
try:
# try getting the length
length = len(dataloader) # type: ignore [arg-type]
except (TypeError, NotImplementedError):
length = None
return length
The provided code snippet includes necessary dependencies for implementing the `has_len_all_ranks` function. Write a Python function `def has_len_all_ranks( dataloader: object, strategy: "pl.strategies.Strategy", allow_zero_length_dataloader_with_multiple_devices: bool = False, ) -> TypeGuard[Sized]` to solve the following problem:
Checks if a given object has ``__len__`` method implemented on all ranks.
Here is the function:
def has_len_all_ranks(
dataloader: object,
strategy: "pl.strategies.Strategy",
allow_zero_length_dataloader_with_multiple_devices: bool = False,
) -> TypeGuard[Sized]:
"""Checks if a given object has ``__len__`` method implemented on all ranks."""
local_length = sized_len(dataloader)
if local_length is None:
# __len__ is not defined, skip these checks
return False
total_length = strategy.reduce(torch.tensor(local_length, device=strategy.root_device), reduce_op="sum")
if total_length == 0:
rank_zero_warn(
f"Total length of `{type(dataloader).__name__}` across ranks is zero."
" Please make sure this was your intention."
)
if total_length > 0 and local_length == 0:
dataloader_cls_name = type(dataloader).__name__
if not allow_zero_length_dataloader_with_multiple_devices:
raise RuntimeError(
f"`{dataloader_cls_name}` within local rank has zero length."
" Please make sure that it returns at least 1 batch."
)
rank_zero_warn(
f"Total length of `{dataloader_cls_name}` across ranks is zero, but local rank has zero"
" length. Please be cautious of uneven batch length."
)
if has_iterable_dataset(dataloader):
rank_zero_warn(
"Your `IterableDataset` has `__len__` defined."
" In combination with multi-process data loading (when num_workers > 1),"
" `__len__` could be inaccurate if each worker is not configured independently"
" to avoid having duplicate data."
)
return True | Checks if a given object has ``__len__`` method implemented on all ranks. |
155,413 | import re
from lightning.fabric.utilities.exceptions import MisconfigurationException
def _augment_message(exception: BaseException, pattern: str, new_message: str) -> None:
exception.args = tuple(
new_message if isinstance(arg, str) and re.match(pattern, arg, re.DOTALL) else arg for arg in exception.args
) | null |
155,414 | import functools
import inspect
import logging
import os
from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, Type, TypeVar
from lightning_utilities.core.imports import RequirementCache
from torch import nn
from typing_extensions import Concatenate, ParamSpec
import lightning.pytorch as pl
_TORCHVISION_AVAILABLE = RequirementCache("torchvision")
def get_torchvision_model(model_name: str, **kwargs: Any) -> nn.Module:
from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError(str(_TORCHVISION_AVAILABLE))
from torchvision import models
torchvision_greater_equal_0_14 = RequirementCache("torchvision>=0.14.0")
# TODO: deprecate this function when 0.14 is the minimum supported torchvision
if torchvision_greater_equal_0_14:
return models.get_model(model_name, **kwargs)
return getattr(models, model_name)(**kwargs) | null |
155,415 | from typing import Any, List, Tuple
from torch.utils._pytree import SUPPORTED_NODES, LeafSpec, PyTree, TreeSpec, _get_node_type, tree_unflatten
def _is_leaf_or_primitive_container(pytree: PyTree) -> bool:
"""Customized :func:`torch.utils._pytree._is_leaf` to avoid flattening containers of primitives."""
is_leaf = _get_node_type(pytree) not in SUPPORTED_NODES
if is_leaf:
return True
node_type = _get_node_type(pytree)
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
child_pytrees, _ = flatten_fn(pytree)
return all(isinstance(child, (int, float, str)) for child in child_pytrees)
The provided code snippet includes necessary dependencies for implementing the `_tree_flatten` function. Write a Python function `def _tree_flatten(pytree: PyTree) -> Tuple[List[Any], TreeSpec]` to solve the following problem:
Copy of :func:`torch.utils._pytree.tree_flatten` using our custom leaf function.
Here is the function:
def _tree_flatten(pytree: PyTree) -> Tuple[List[Any], TreeSpec]:
"""Copy of :func:`torch.utils._pytree.tree_flatten` using our custom leaf function."""
if _is_leaf_or_primitive_container(pytree):
return [pytree], LeafSpec()
node_type = _get_node_type(pytree)
flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
child_pytrees, context = flatten_fn(pytree)
result: List[Any] = []
children_specs: List["TreeSpec"] = []
for child in child_pytrees:
flat, child_spec = _tree_flatten(child)
result += flat
children_specs.append(child_spec)
return result, TreeSpec(node_type, context, children_specs) | Copy of :func:`torch.utils._pytree.tree_flatten` using our custom leaf function. |
155,416 | from typing import Any, List, Tuple
from torch.utils._pytree import SUPPORTED_NODES, LeafSpec, PyTree, TreeSpec, _get_node_type, tree_unflatten
The provided code snippet includes necessary dependencies for implementing the `_map_and_unflatten` function. Write a Python function `def _map_and_unflatten(fn: Any, values: List[Any], spec: TreeSpec) -> PyTree` to solve the following problem:
Utility function to apply a function and unflatten it.
Here is the function:
def _map_and_unflatten(fn: Any, values: List[Any], spec: TreeSpec) -> PyTree:
"""Utility function to apply a function and unflatten it."""
return tree_unflatten([fn(i) for i in values], spec) | Utility function to apply a function and unflatten it. |
155,417 | from typing import Union
import torch
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0, _TORCH_GREATER_EQUAL_2_1
from lightning.pytorch.strategies import DDPStrategy, DeepSpeedStrategy, FSDPStrategy, SingleDeviceStrategy, Strategy
from lightning.pytorch.utilities.model_helpers import _check_mixed_imports
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
The provided code snippet includes necessary dependencies for implementing the `to_uncompiled` function. Write a Python function `def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedModule"]) -> "pl.LightningModule"` to solve the following problem:
Returns an instance of LightningModule without any compilation optimizations from a compiled model. .. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature. This takes either a ``torch._dynamo.OptimizedModule`` returned by ``torch.compile()`` or a ``LightningModule`` returned by ``from_compiled``. Note: this method will in-place modify the ``LightningModule`` that is passed in.
Here is the function:
def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedModule"]) -> "pl.LightningModule":
"""Returns an instance of LightningModule without any compilation optimizations from a compiled model.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
This takes either a ``torch._dynamo.OptimizedModule`` returned by ``torch.compile()`` or a ``LightningModule``
returned by ``from_compiled``.
Note: this method will in-place modify the ``LightningModule`` that is passed in.
"""
if not _TORCH_GREATER_EQUAL_2_0:
raise ModuleNotFoundError("`to_uncompiled` requires torch>=2.0")
from torch._dynamo import OptimizedModule
if isinstance(model, OptimizedModule):
original = model._orig_mod
if not isinstance(original, pl.LightningModule):
raise TypeError(
f"Unexpected error, the wrapped model should be a LightningModule, found {type(model).__name__}"
)
elif isinstance(model, pl.LightningModule):
if model._compiler_ctx is None:
raise ValueError(
"`model` is required to be a compiled LightningModule. Found a non-compiled LightningModule instead."
)
original = model
else:
raise ValueError("`model` must either be an instance of OptimizedModule or LightningModule")
ctx = original._compiler_ctx
if ctx is not None:
original.forward = ctx["original_forward"] # type: ignore[method-assign]
original.training_step = ctx["original_training_step"] # type: ignore[method-assign]
original.validation_step = ctx["original_validation_step"] # type: ignore[method-assign]
original.test_step = ctx["original_test_step"] # type: ignore[method-assign]
original.predict_step = ctx["original_predict_step"] # type: ignore[method-assign]
original._compiler_ctx = None
return original | Returns an instance of LightningModule without any compilation optimizations from a compiled model. .. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature. This takes either a ``torch._dynamo.OptimizedModule`` returned by ``torch.compile()`` or a ``LightningModule`` returned by ``from_compiled``. Note: this method will in-place modify the ``LightningModule`` that is passed in. |
155,418 | from typing import Union
import torch
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0, _TORCH_GREATER_EQUAL_2_1
from lightning.pytorch.strategies import DDPStrategy, DeepSpeedStrategy, FSDPStrategy, SingleDeviceStrategy, Strategy
from lightning.pytorch.utilities.model_helpers import _check_mixed_imports
def from_compiled(model: "torch._dynamo.OptimizedModule") -> "pl.LightningModule":
"""Returns an instance LightningModule from the output of ``torch.compile``.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
The ``torch.compile`` function returns a ``torch._dynamo.OptimizedModule``, which wraps the LightningModule
passed in as an argument, but doesn't inherit from it. This means that the output of ``torch.compile`` behaves
like a LightningModule, but it doesn't inherit from it (i.e. `isinstance` will fail).
Use this method to obtain a LightningModule that still runs with all the optimizations from ``torch.compile``.
"""
if not _TORCH_GREATER_EQUAL_2_0:
raise ModuleNotFoundError("`from_compiled` requires torch>=2.0")
from torch._dynamo import OptimizedModule
if not isinstance(model, OptimizedModule):
raise ValueError(f"`model` is required to be a `OptimizedModule`. Found a `{type(model).__name__}` instead.")
orig_module = model._orig_mod
if not isinstance(orig_module, pl.LightningModule):
_check_mixed_imports(model)
raise ValueError(
f"`model` is expected to be a compiled LightningModule. Found a `{type(orig_module).__name__}` instead"
)
orig_module._compiler_ctx = {
"compiler": "dynamo",
"dynamo_ctx": model.dynamo_ctx,
"original_forward": orig_module.forward,
"original_training_step": orig_module.training_step,
"original_validation_step": orig_module.validation_step,
"original_test_step": orig_module.test_step,
"original_predict_step": orig_module.predict_step,
}
orig_module.forward = model.dynamo_ctx(orig_module.forward) # type: ignore[method-assign]
if not _TORCH_GREATER_EQUAL_2_1: # https://github.com/pytorch/pytorch/issues/95630
orig_module.forward._torchdynamo_inline = orig_module.forward
orig_module.training_step = model.dynamo_ctx(orig_module.training_step) # type: ignore[method-assign]
if not _TORCH_GREATER_EQUAL_2_1: # https://github.com/pytorch/pytorch/issues/95630
orig_module.training_step._torchdynamo_inline = orig_module.training_step
orig_module.validation_step = model.dynamo_ctx(orig_module.validation_step) # type: ignore[method-assign]
orig_module.test_step = model.dynamo_ctx(orig_module.test_step) # type: ignore[method-assign]
orig_module.predict_step = model.dynamo_ctx(orig_module.predict_step) # type: ignore[method-assign]
return orig_module
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
def _check_mixed_imports(instance: object) -> None:
old, new = "pytorch_" + "lightning", "lightning." + "pytorch"
klass = type(instance)
module = klass.__module__
if module.startswith(old) and __name__.startswith(new):
pass
elif module.startswith(new) and __name__.startswith(old):
old, new = new, old
else:
return
raise TypeError(
f"You passed a `{old}` object ({type(instance).__qualname__}) to a `{new}`"
" Trainer. Please switch to a single import style."
)
def _maybe_unwrap_optimized(model: object) -> "pl.LightningModule":
if not _TORCH_GREATER_EQUAL_2_0:
if not isinstance(model, pl.LightningModule):
_check_mixed_imports(model)
raise TypeError(f"`model` must be a `LightningModule`, got `{type(model).__qualname__}`")
return model
from torch._dynamo import OptimizedModule
if isinstance(model, OptimizedModule):
return from_compiled(model)
if isinstance(model, pl.LightningModule):
return model
_check_mixed_imports(model)
raise TypeError(
f"`model` must be a `LightningModule` or `torch._dynamo.OptimizedModule`, got `{type(model).__qualname__}`"
) | null |
155,419 | from typing import Union
import torch
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0, _TORCH_GREATER_EQUAL_2_1
from lightning.pytorch.strategies import DDPStrategy, DeepSpeedStrategy, FSDPStrategy, SingleDeviceStrategy, Strategy
from lightning.pytorch.utilities.model_helpers import _check_mixed_imports
def _verify_strategy_supports_compile(model: "pl.LightningModule", strategy: Strategy) -> None:
if model._compiler_ctx is not None:
supported_strategies = (SingleDeviceStrategy, DDPStrategy, FSDPStrategy)
if not isinstance(strategy, supported_strategies) or isinstance(strategy, DeepSpeedStrategy):
supported_strategy_names = ", ".join(s.__name__ for s in supported_strategies)
raise RuntimeError(
f"Using a compiled model is incompatible with the current strategy: `{type(strategy).__name__}`."
f" Only {supported_strategy_names} support compilation. Either switch to one of the supported"
" strategies or avoid passing in compiled model."
) | null |
155,420 | import contextlib
from collections.abc import Iterable
from typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Tuple, Type, Union
from torch.utils.data.dataloader import _BaseDataLoaderIter, _MultiProcessingDataLoaderIter
from typing_extensions import Self, TypedDict, override
from lightning.fabric.utilities.data import sized_len
from lightning.fabric.utilities.types import _Stateful
from lightning.pytorch.utilities._pytree import _map_and_unflatten, _tree_flatten, tree_unflatten
def _shutdown_workers_and_reset_iterator(dataloader: object) -> None:
if hasattr(dataloader, "_iterator"):
if isinstance(dataloader._iterator, _MultiProcessingDataLoaderIter):
dataloader._iterator._shutdown_workers()
dataloader._iterator = None | null |
155,421 | import contextlib
from collections.abc import Iterable
from typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Tuple, Type, Union
from torch.utils.data.dataloader import _BaseDataLoaderIter, _MultiProcessingDataLoaderIter
from typing_extensions import Self, TypedDict, override
from lightning.fabric.utilities.data import sized_len
from lightning.fabric.utilities.types import _Stateful
from lightning.pytorch.utilities._pytree import _map_and_unflatten, _tree_flatten, tree_unflatten
def sized_len(dataloader: object) -> Optional[int]:
"""Try to get the length of an object, return ``None`` otherwise."""
try:
# try getting the length
length = len(dataloader) # type: ignore [arg-type]
except (TypeError, NotImplementedError):
length = None
return length
def _get_iterables_lengths(iterables: List[Iterable]) -> List[Union[int, float]]:
return [(float("inf") if (length := sized_len(iterable)) is None else length) for iterable in iterables] | null |
155,422 | import gc
from typing import Any
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `recursive_detach` function. Write a Python function `def recursive_detach(in_dict: Any, to_cpu: bool = False) -> Any` to solve the following problem:
Detach all tensors in `in_dict`. May operate recursively if some of the values in `in_dict` are dictionaries which contain instances of `Tensor`. Other types in `in_dict` are not affected by this utility function. Args: in_dict: Dictionary with tensors to detach to_cpu: Whether to move tensor to cpu Return: out_dict: Dictionary with detached tensors
Here is the function:
def recursive_detach(in_dict: Any, to_cpu: bool = False) -> Any:
"""Detach all tensors in `in_dict`.
May operate recursively if some of the values in `in_dict` are dictionaries
which contain instances of `Tensor`. Other types in `in_dict` are
not affected by this utility function.
Args:
in_dict: Dictionary with tensors to detach
to_cpu: Whether to move tensor to cpu
Return:
out_dict: Dictionary with detached tensors
"""
def detach_and_move(t: Tensor, to_cpu: bool) -> Tensor:
t = t.detach()
if to_cpu:
t = t.cpu()
return t
return apply_to_collection(in_dict, Tensor, detach_and_move, to_cpu=to_cpu) | Detach all tensors in `in_dict`. May operate recursively if some of the values in `in_dict` are dictionaries which contain instances of `Tensor`. Other types in `in_dict` are not affected by this utility function. Args: in_dict: Dictionary with tensors to detach to_cpu: Whether to move tensor to cpu Return: out_dict: Dictionary with detached tensors |
155,423 | import contextlib
import logging
import math
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.pytorch.utilities.model_helpers import _ModuleMode
from lightning.pytorch.utilities.rank_zero import WarningCache
UNKNOWN_SIZE = "?"
def parse_batch_shape(batch: Any) -> Union[str, List]:
if hasattr(batch, "shape"):
return list(batch.shape)
if isinstance(batch, (list, tuple)):
return [parse_batch_shape(el) for el in batch]
return UNKNOWN_SIZE | null |
155,424 | import contextlib
import logging
import math
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.pytorch.utilities.model_helpers import _ModuleMode
from lightning.pytorch.utilities.rank_zero import WarningCache
def get_formatted_model_size(total_model_size: float) -> str:
return f"{total_model_size:,.3f}"
def get_human_readable_count(number: int) -> str:
"""Abbreviates an integer number with K, M, B, T for thousands, millions, billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1.2 K'
>>> get_human_readable_count(2e6) # (two million)
'2.0 M'
>>> get_human_readable_count(3e9) # (three billion)
'3.0 B'
>>> get_human_readable_count(4e14) # (four hundred trillion)
'400 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = PARAMETER_NUM_UNITS
num_digits = int(math.floor(math.log10(number)) + 1 if number > 0 else 1)
num_groups = int(math.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
shift = -3 * (num_groups - 1)
number = number * (10**shift)
index = num_groups - 1
if index < 1 or number >= 100:
return f"{int(number):,d} {labels[index]}"
return f"{number:,.1f} {labels[index]}"
The provided code snippet includes necessary dependencies for implementing the `_format_summary_table` function. Write a Python function `def _format_summary_table( total_parameters: int, trainable_parameters: int, model_size: float, *cols: Tuple[str, List[str]], ) -> str` to solve the following problem:
Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big string defining the summary table that are nicely formatted.
Here is the function:
def _format_summary_table(
total_parameters: int,
trainable_parameters: int,
model_size: float,
*cols: Tuple[str, List[str]],
) -> str:
"""Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big
string defining the summary table that are nicely formatted."""
n_rows = len(cols[0][1])
n_cols = 1 + len(cols)
# Get formatting width of each column
col_widths = []
for c in cols:
col_width = max(len(str(a)) for a in c[1]) if n_rows else 0
col_width = max(col_width, len(c[0])) # minimum length is header length
col_widths.append(col_width)
# Formatting
s = "{:<{}}"
total_width = sum(col_widths) + 3 * n_cols
header = [s.format(c[0], w) for c, w in zip(cols, col_widths)]
# Summary = header + divider + Rest of table
summary = " | ".join(header) + "\n" + "-" * total_width
for i in range(n_rows):
line = []
for c, w in zip(cols, col_widths):
line.append(s.format(str(c[1][i]), w))
summary += "\n" + " | ".join(line)
summary += "\n" + "-" * total_width
summary += "\n" + s.format(get_human_readable_count(trainable_parameters), 10)
summary += "Trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters - trainable_parameters), 10)
summary += "Non-trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters), 10)
summary += "Total params"
summary += "\n" + s.format(get_formatted_model_size(model_size), 10)
summary += "Total estimated model params size (MB)"
return summary | Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big string defining the summary table that are nicely formatted. |
155,425 | import contextlib
import logging
import math
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.pytorch.utilities.model_helpers import _ModuleMode
from lightning.pytorch.utilities.rank_zero import WarningCache
warning_cache = WarningCache()
def _is_lazy_weight_tensor(p: Tensor) -> bool:
from torch.nn.parameter import UninitializedParameter
if isinstance(p, UninitializedParameter):
warning_cache.warn(
"The total number of parameters detected may be inaccurate because the model contains"
" an instance of `UninitializedParameter`. To get an accurate number, set `self.example_input_array`"
" in your LightningModule."
)
return True
return False | null |
155,426 | import contextlib
import logging
import math
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import lightning.pytorch as pl
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.pytorch.utilities.model_helpers import _ModuleMode
from lightning.pytorch.utilities.rank_zero import WarningCache
class ModelSummary:
"""Generates a summary of all layers in a :class:`~lightning.pytorch.core.LightningModule`.
Args:
model: The model to summarize (also referred to as the root module).
max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no
summary. Defaults to 1.
The string representation of this summary prints a table with columns containing
the name, type and number of parameters for each layer.
The root module may also have an attribute ``example_input_array`` as shown in the example below.
If present, the root module will be called with it as input to determine the
intermediate input- and output shapes of all layers. Supported are tensors and
nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?`
in the summary table. The summary will also display `?` for layers not used in the forward pass.
If there are parameters not associated with any layers or modules, the count of those parameters
will be displayed in the table under `other params`. The summary will display `n/a` for module type,
in size, and out size.
Example::
>>> import lightning.pytorch as pl
>>> class LitModel(pl.LightningModule):
...
... def __init__(self):
... super().__init__()
... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512))
... self.example_input_array = torch.zeros(10, 256) # optional
...
... def forward(self, x):
... return self.net(x)
...
>>> model = LitModel()
>>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | Mode | In sizes | Out sizes
--------------------------------------------------------------------
0 | net | Sequential | 132 K | train | [10, 256] | [10, 512]
--------------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
>>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | Mode | In sizes | Out sizes
----------------------------------------------------------------------
0 | net | Sequential | 132 K | train | [10, 256] | [10, 512]
1 | net.0 | Linear | 131 K | train | [10, 256] | [10, 512]
2 | net.1 | BatchNorm1d | 1.0 K | train | [10, 512] | [10, 512]
----------------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
"""
def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None:
self._model = model
if not isinstance(max_depth, int) or max_depth < -1:
raise ValueError(f"`max_depth` can be -1, 0 or > 0, got {max_depth}.")
self._max_depth = max_depth
self._layer_summary = self.summarize()
# 1 byte -> 8 bits
# TODO: how do we compute precision_megabytes in case of mixed precision?
precision_to_bits = {"64": 64, "32": 32, "16": 16, "bf16": 16}
precision = precision_to_bits.get(self._model.trainer.precision, 32) if self._model._trainer else 32
self._precision_megabytes = (precision / 8.0) * 1e-6
def named_modules(self) -> List[Tuple[str, nn.Module]]:
mods: List[Tuple[str, nn.Module]]
if self._max_depth == 0:
mods = []
elif self._max_depth == 1:
# the children are the top-level modules
mods = list(self._model.named_children())
else:
mods = self._model.named_modules()
mods = list(mods)[1:] # do not include root module (LightningModule)
return mods
def layer_names(self) -> List[str]:
return list(self._layer_summary.keys())
def layer_types(self) -> List[str]:
return [layer.layer_type for layer in self._layer_summary.values()]
def in_sizes(self) -> List:
return [layer.in_size for layer in self._layer_summary.values()]
def out_sizes(self) -> List:
return [layer.out_size for layer in self._layer_summary.values()]
def param_nums(self) -> List[int]:
return [layer.num_parameters for layer in self._layer_summary.values()]
def training_modes(self) -> List[bool]:
return [layer.training for layer in self._layer_summary.values()]
def total_parameters(self) -> int:
return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters())
def trainable_parameters(self) -> int:
return sum(
p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters() if p.requires_grad
)
def total_layer_params(self) -> int:
return sum(self.param_nums)
def model_size(self) -> float:
return self.total_parameters * self._precision_megabytes
def summarize(self) -> Dict[str, LayerSummary]:
summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules)
if self._model.example_input_array is not None:
self._forward_example_input()
for layer in summary.values():
layer.detach_hook()
if self._max_depth >= 1:
# remove summary entries with depth > max_depth
for k in [k for k in summary if k.count(".") >= self._max_depth]:
del summary[k]
return summary
def _forward_example_input(self) -> None:
"""Run the example input through each layer to get input- and output sizes."""
model = self._model
# the summary is supported without a trainer instance so we need to use the underscore property
trainer = self._model._trainer
input_ = model.example_input_array
input_ = model._on_before_batch_transfer(input_)
input_ = model._apply_batch_transfer_handler(input_)
mode = _ModuleMode()
mode.capture(model)
model.eval()
forward_context = contextlib.nullcontext() if trainer is None else trainer.precision_plugin.forward_context()
with torch.no_grad(), forward_context:
# let the model hooks collect the input- and output shapes
if isinstance(input_, (list, tuple)):
model(*input_)
elif isinstance(input_, dict):
model(**input_)
else:
model(input_)
mode.restore(model)
def _get_summary_data(self) -> List[Tuple[str, List[str]]]:
"""Makes a summary listing with:
Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size
"""
arrays = [
(" ", list(map(str, range(len(self._layer_summary))))),
("Name", self.layer_names),
("Type", self.layer_types),
("Params", list(map(get_human_readable_count, self.param_nums))),
("Mode", ["train" if mode else "eval" for mode in self.training_modes]),
]
if self._model.example_input_array is not None:
arrays.append(("In sizes", [str(x) for x in self.in_sizes]))
arrays.append(("Out sizes", [str(x) for x in self.out_sizes]))
total_leftover_params = self.total_parameters - self.total_layer_params
if total_leftover_params > 0:
self._add_leftover_params_to_summary(arrays, total_leftover_params)
return arrays
def _add_leftover_params_to_summary(self, arrays: List[Tuple[str, List[str]]], total_leftover_params: int) -> None:
"""Add summary of params not associated with module or layer to model summary."""
layer_summaries = dict(arrays)
layer_summaries[" "].append(" ")
layer_summaries["Name"].append(LEFTOVER_PARAMS_NAME)
layer_summaries["Type"].append(NOT_APPLICABLE)
layer_summaries["Params"].append(get_human_readable_count(total_leftover_params))
layer_summaries["Mode"].append(NOT_APPLICABLE)
if "In sizes" in layer_summaries:
layer_summaries["In sizes"].append(NOT_APPLICABLE)
if "Out sizes" in layer_summaries:
layer_summaries["Out sizes"].append(NOT_APPLICABLE)
def __str__(self) -> str:
arrays = self._get_summary_data()
total_parameters = self.total_parameters
trainable_parameters = self.trainable_parameters
model_size = self.model_size
return _format_summary_table(total_parameters, trainable_parameters, model_size, *arrays)
def __repr__(self) -> str:
return str(self)
The provided code snippet includes necessary dependencies for implementing the `summarize` function. Write a Python function `def summarize(lightning_module: "pl.LightningModule", max_depth: int = 1) -> ModelSummary` to solve the following problem:
Summarize the LightningModule specified by `lightning_module`. Args: lightning_module: `LightningModule` to summarize. max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the layer summary off. Default: 1. Return: The model summary object
Here is the function:
def summarize(lightning_module: "pl.LightningModule", max_depth: int = 1) -> ModelSummary:
"""Summarize the LightningModule specified by `lightning_module`.
Args:
lightning_module: `LightningModule` to summarize.
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off. Default: 1.
Return:
The model summary object
"""
return ModelSummary(lightning_module, max_depth=max_depth) | Summarize the LightningModule specified by `lightning_module`. Args: lightning_module: `LightningModule` to summarize. max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the layer summary off. Default: 1. Return: The model summary object |
155,427 | from collections import OrderedDict
from typing import Dict, List, Tuple
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Parameter
from typing_extensions import override
from lightning.pytorch.utilities.model_summary.model_summary import (
NOT_APPLICABLE,
LayerSummary,
ModelSummary,
_is_lazy_weight_tensor,
get_human_readable_count,
)
def deepspeed_param_size(p: torch.nn.Parameter) -> int:
assert hasattr(p, "ds_numel")
return p.ds_numel | null |
155,428 | from typing import Dict, List, Optional
from torch import nn
def _find_shared_parameters(module: nn.Module, tied_parameters: Optional[Dict] = None, prefix: str = "") -> List[str]:
if tied_parameters is None:
tied_parameters = {}
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param is None:
continue
if param not in tied_parameters:
tied_parameters[param] = []
tied_parameters[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_find_shared_parameters(m, tied_parameters, submodule_prefix)
return [x for x in tied_parameters.values() if len(x) > 1]
The provided code snippet includes necessary dependencies for implementing the `find_shared_parameters` function. Write a Python function `def find_shared_parameters(module: nn.Module) -> List[str]` to solve the following problem:
Returns a list of names of shared parameters set in the module.
Here is the function:
def find_shared_parameters(module: nn.Module) -> List[str]:
"""Returns a list of names of shared parameters set in the module."""
return _find_shared_parameters(module) | Returns a list of names of shared parameters set in the module. |
155,429 | from typing import Dict, List, Optional
from torch import nn
def _get_module_by_path(module: nn.Module, path: str) -> nn.Module:
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module: nn.Module, path: str, value: nn.Module) -> None:
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
def set_shared_parameters(module: nn.Module, shared_params: list) -> nn.Module:
for shared_param in shared_params:
ref = _get_module_by_path(module, shared_param[0])
for path in shared_param[1:]:
_set_module_by_path(module, path, ref)
return module | null |
155,430 | import contextlib
import logging
import os
from argparse import Namespace
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Set, Union
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _sanitize_callable_params
from lightning.pytorch.callbacks import Checkpoint
from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment
from lightning.pytorch.utilities.model_summary import ModelSummary
from lightning.pytorch.utilities.rank_zero import rank_zero_only
def _catch_inactive(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
from neptune.exceptions import InactiveRunException
with contextlib.suppress(InactiveRunException):
return func(*args, **kwargs)
return wrapper | null |
155,431 | from pathlib import Path
from typing import Any, List, Tuple, Union
from torch import Tensor
import lightning.pytorch as pl
from lightning.pytorch.callbacks import Checkpoint
The provided code snippet includes necessary dependencies for implementing the `_scan_checkpoints` function. Write a Python function `def _scan_checkpoints(checkpoint_callback: Checkpoint, logged_model_time: dict) -> List[Tuple[float, str, float, str]]` to solve the following problem:
Return the checkpoints to be logged. Args: checkpoint_callback: Checkpoint callback reference. logged_model_time: dictionary containing the logged model times.
Here is the function:
def _scan_checkpoints(checkpoint_callback: Checkpoint, logged_model_time: dict) -> List[Tuple[float, str, float, str]]:
"""Return the checkpoints to be logged.
Args:
checkpoint_callback: Checkpoint callback reference.
logged_model_time: dictionary containing the logged model times.
"""
# get checkpoints to be saved with associated score
checkpoints = {}
if hasattr(checkpoint_callback, "last_model_path") and hasattr(checkpoint_callback, "current_score"):
checkpoints[checkpoint_callback.last_model_path] = (checkpoint_callback.current_score, "latest")
if hasattr(checkpoint_callback, "best_model_path") and hasattr(checkpoint_callback, "best_model_score"):
checkpoints[checkpoint_callback.best_model_path] = (checkpoint_callback.best_model_score, "best")
if hasattr(checkpoint_callback, "best_k_models"):
for key, value in checkpoint_callback.best_k_models.items():
checkpoints[key] = (value, "best_k")
checkpoints = sorted(
(Path(p).stat().st_mtime, p, s, tag) for p, (s, tag) in checkpoints.items() if Path(p).is_file()
)
checkpoints = [c for c in checkpoints if c[1] not in logged_model_time or logged_model_time[c[1]] < c[0]]
return checkpoints | Return the checkpoints to be logged. Args: checkpoint_callback: Checkpoint callback reference. logged_model_time: dictionary containing the logged model times. |
155,432 | from pathlib import Path
from typing import Any, List, Tuple, Union
from torch import Tensor
import lightning.pytorch as pl
from lightning.pytorch.callbacks import Checkpoint
def _log_hyperparams(trainer: "pl.Trainer") -> None:
if not trainer.loggers:
return
pl_module = trainer.lightning_module
datamodule_log_hyperparams = trainer.datamodule._log_hyperparams if trainer.datamodule is not None else False
hparams_initial = None
if pl_module._log_hyperparams and datamodule_log_hyperparams:
datamodule_hparams = trainer.datamodule.hparams_initial
lightning_hparams = pl_module.hparams_initial
inconsistent_keys = []
for key in lightning_hparams.keys() & datamodule_hparams.keys():
lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]
if (
type(lm_val) != type(dm_val)
or (isinstance(lm_val, Tensor) and id(lm_val) != id(dm_val))
or lm_val != dm_val
):
inconsistent_keys.append(key)
if inconsistent_keys:
raise RuntimeError(
f"Error while merging hparams: the keys {inconsistent_keys} are present "
"in both the LightningModule's and LightningDataModule's hparams "
"but have different values."
)
hparams_initial = {**lightning_hparams, **datamodule_hparams}
elif pl_module._log_hyperparams:
hparams_initial = pl_module.hparams_initial
elif datamodule_log_hyperparams:
hparams_initial = trainer.datamodule.hparams_initial
for logger in trainer.loggers:
if hparams_initial is not None:
logger.log_hyperparams(hparams_initial)
logger.log_graph(pl_module)
logger.save() | null |
155,433 | import logging
import os
import re
import tempfile
from argparse import Namespace
from pathlib import Path
from time import time
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Mapping, Optional, Union
import yaml
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from typing_extensions import override
from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict
from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint
from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment
from lightning.pytorch.loggers.utilities import _scan_checkpoints
from lightning.pytorch.utilities.rank_zero import rank_zero_only, rank_zero_warn
def _get_resolve_tags() -> Callable:
from mlflow.tracking import context
# before v1.1.0
if hasattr(context, "resolve_tags"):
from mlflow.tracking.context import resolve_tags
# since v1.1.0
elif hasattr(context, "registry"):
from mlflow.tracking.context.registry import resolve_tags
else:
resolve_tags = lambda tags: tags
return resolve_tags | null |
155,434 | import functools
import operator
from abc import ABC
from collections import defaultdict
from typing import Any, Callable, Dict, Mapping, Optional, Sequence
import numpy as np
from typing_extensions import override
from lightning.fabric.loggers import Logger as FabricLogger
from lightning.fabric.loggers.logger import _DummyExperiment as DummyExperiment
from lightning.fabric.loggers.logger import rank_zero_experiment
from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint
The provided code snippet includes necessary dependencies for implementing the `merge_dicts` function. Write a Python function `def merge_dicts( # pragma: no cover dicts: Sequence[Mapping], agg_key_funcs: Optional[Mapping] = None, default_func: Callable[[Sequence[float]], float] = np.mean, ) -> Dict` to solve the following problem:
Merge a sequence with dictionaries into one dictionary by aggregating the same keys with some given function. Args: dicts: Sequence of dictionaries to be merged. agg_key_funcs: Mapping from key name to function. This function will aggregate a list of values, obtained from the same key of all dictionaries. If some key has no specified aggregation function, the default one will be used. Default is: ``None`` (all keys will be aggregated by the default function). default_func: Default function to aggregate keys, which are not presented in the `agg_key_funcs` map. Returns: Dictionary with merged values. Examples: >>> import pprint >>> d1 = {'a': 1.7, 'b': 2.0, 'c': 1, 'd': {'d1': 1, 'd3': 3}} >>> d2 = {'a': 1.1, 'b': 2.2, 'v': 1, 'd': {'d1': 2, 'd2': 3}} >>> d3 = {'a': 1.1, 'v': 2.3, 'd': {'d3': 3, 'd4': {'d5': 1}}} >>> dflt_func = min >>> agg_funcs = {'a': np.mean, 'v': max, 'd': {'d1': sum}} >>> pprint.pprint(merge_dicts([d1, d2, d3], agg_funcs, dflt_func)) {'a': 1.3, 'b': 2.0, 'c': 1, 'd': {'d1': 3, 'd2': 3, 'd3': 3, 'd4': {'d5': 1}}, 'v': 2.3}
Here is the function:
def merge_dicts( # pragma: no cover
dicts: Sequence[Mapping],
agg_key_funcs: Optional[Mapping] = None,
default_func: Callable[[Sequence[float]], float] = np.mean,
) -> Dict:
"""Merge a sequence with dictionaries into one dictionary by aggregating the same keys with some given function.
Args:
dicts:
Sequence of dictionaries to be merged.
agg_key_funcs:
Mapping from key name to function. This function will aggregate a
list of values, obtained from the same key of all dictionaries.
If some key has no specified aggregation function, the default one
will be used. Default is: ``None`` (all keys will be aggregated by the
default function).
default_func:
Default function to aggregate keys, which are not presented in the
`agg_key_funcs` map.
Returns:
Dictionary with merged values.
Examples:
>>> import pprint
>>> d1 = {'a': 1.7, 'b': 2.0, 'c': 1, 'd': {'d1': 1, 'd3': 3}}
>>> d2 = {'a': 1.1, 'b': 2.2, 'v': 1, 'd': {'d1': 2, 'd2': 3}}
>>> d3 = {'a': 1.1, 'v': 2.3, 'd': {'d3': 3, 'd4': {'d5': 1}}}
>>> dflt_func = min
>>> agg_funcs = {'a': np.mean, 'v': max, 'd': {'d1': sum}}
>>> pprint.pprint(merge_dicts([d1, d2, d3], agg_funcs, dflt_func))
{'a': 1.3,
'b': 2.0,
'c': 1,
'd': {'d1': 3, 'd2': 3, 'd3': 3, 'd4': {'d5': 1}},
'v': 2.3}
"""
agg_key_funcs = agg_key_funcs or {}
keys = list(functools.reduce(operator.or_, [set(d.keys()) for d in dicts]))
d_out: Dict = defaultdict(dict)
for k in keys:
fn = agg_key_funcs.get(k)
values_to_agg = [v for v in [d_in.get(k) for d_in in dicts] if v is not None]
if isinstance(values_to_agg[0], dict):
d_out[k] = merge_dicts(values_to_agg, fn, default_func)
else:
d_out[k] = (fn or default_func)(values_to_agg)
return dict(d_out) | Merge a sequence with dictionaries into one dictionary by aggregating the same keys with some given function. Args: dicts: Sequence of dictionaries to be merged. agg_key_funcs: Mapping from key name to function. This function will aggregate a list of values, obtained from the same key of all dictionaries. If some key has no specified aggregation function, the default one will be used. Default is: ``None`` (all keys will be aggregated by the default function). default_func: Default function to aggregate keys, which are not presented in the `agg_key_funcs` map. Returns: Dictionary with merged values. Examples: >>> import pprint >>> d1 = {'a': 1.7, 'b': 2.0, 'c': 1, 'd': {'d1': 1, 'd3': 3}} >>> d2 = {'a': 1.1, 'b': 2.2, 'v': 1, 'd': {'d1': 2, 'd2': 3}} >>> d3 = {'a': 1.1, 'v': 2.3, 'd': {'d3': 3, 'd4': {'d5': 1}}} >>> dflt_func = min >>> agg_funcs = {'a': np.mean, 'v': max, 'd': {'d1': sum}} >>> pprint.pprint(merge_dicts([d1, d2, d3], agg_funcs, dflt_func)) {'a': 1.3, 'b': 2.0, 'c': 1, 'd': {'d1': 3, 'd2': 3, 'd3': 3, 'd4': {'d5': 1}}, 'v': 2.3} |
155,435 | from typing import Any, Iterator, List, Optional
from typing_extensions import override
from lightning.fabric.utilities.data import sized_len
from lightning.pytorch.utilities.combined_loader import _ITERATOR_RETURN, CombinedLoader
from lightning.pytorch.utilities.exceptions import MisconfigurationException
def _profile_nothing() -> None:
pass | null |
155,436 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
The provided code snippet includes necessary dependencies for implementing the `check_finite_loss` function. Write a Python function `def check_finite_loss(loss: Optional[Tensor]) -> None` to solve the following problem:
Checks for finite loss value. Args: loss: the loss value to check to be finite
Here is the function:
def check_finite_loss(loss: Optional[Tensor]) -> None:
"""Checks for finite loss value.
Args:
loss: the loss value to check to be finite
"""
if loss is not None and not torch.isfinite(loss).all():
raise ValueError(f"The loss returned in `training_step` is {loss}.") | Checks for finite loss value. Args: loss: the loss value to check to be finite |
155,437 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
class PossibleUserWarning(UserWarning):
"""Warnings that could be false positives."""
class Timer(Callback):
"""The Timer callback tracks the time spent in the training, validation, and test loops and interrupts the Trainer
if the given time limit for the training loop is reached.
Args:
duration: A string in the format DD:HH:MM:SS (days, hours, minutes seconds), or a :class:`datetime.timedelta`,
or a dict containing key-value compatible with :class:`~datetime.timedelta`.
interval: Determines if the interruption happens on epoch level or mid-epoch.
Can be either ``"epoch"`` or ``"step"``.
verbose: Set this to ``False`` to suppress logging messages.
Raises:
MisconfigurationException:
If ``duration`` is not in the expected format.
MisconfigurationException:
If ``interval`` is not one of the supported choices.
Example::
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import Timer
# stop training after 12 hours
timer = Timer(duration="00:12:00:00")
# or provide a datetime.timedelta
from datetime import timedelta
timer = Timer(duration=timedelta(weeks=1))
# or provide a dictionary
timer = Timer(duration=dict(weeks=4, days=2))
# force training to stop after given time limit
trainer = Trainer(callbacks=[timer])
# query training/validation/test time (in seconds)
timer.time_elapsed("train")
timer.start_time("validate")
timer.end_time("test")
"""
def __init__(
self,
duration: Optional[Union[str, timedelta, Dict[str, int]]] = None,
interval: str = Interval.step,
verbose: bool = True,
) -> None:
super().__init__()
if isinstance(duration, str):
duration_match = re.fullmatch(r"(\d+):(\d\d):(\d\d):(\d\d)", duration.strip())
if not duration_match:
raise MisconfigurationException(
f"`Timer(duration={duration!r})` is not a valid duration. "
"Expected a string in the format DD:HH:MM:SS."
)
duration = timedelta(
days=int(duration_match.group(1)),
hours=int(duration_match.group(2)),
minutes=int(duration_match.group(3)),
seconds=int(duration_match.group(4)),
)
elif isinstance(duration, dict):
duration = timedelta(**duration)
if interval not in set(Interval):
raise MisconfigurationException(
f"Unsupported parameter value `Timer(interval={interval})`. Possible choices are:"
f" {', '.join(set(Interval))}"
)
self._duration = duration.total_seconds() if duration is not None else None
self._interval = interval
self._verbose = verbose
self._start_time: Dict[RunningStage, Optional[float]] = {stage: None for stage in RunningStage}
self._end_time: Dict[RunningStage, Optional[float]] = {stage: None for stage in RunningStage}
self._offset = 0
def start_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the start time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._start_time[stage]
def end_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the end time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._end_time[stage]
def time_elapsed(self, stage: str = RunningStage.TRAINING) -> float:
"""Return the time elapsed for a particular stage (in seconds)"""
start = self.start_time(stage)
end = self.end_time(stage)
offset = self._offset if stage == RunningStage.TRAINING else 0
if start is None:
return offset
if end is None:
return time.monotonic() - start + offset
return end - start + offset
def time_remaining(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the time remaining for a particular stage (in seconds)"""
if self._duration is not None:
return self._duration - self.time_elapsed(stage)
return None
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TRAINING] = time.monotonic()
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TRAINING] = time.monotonic()
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.VALIDATING] = time.monotonic()
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.VALIDATING] = time.monotonic()
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TESTING] = time.monotonic()
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TESTING] = time.monotonic()
def on_fit_start(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
# this checks the time after the state is reloaded, regardless of the interval.
# this is necessary in case we load a state whose timer is already depleted
if self._duration is None:
return
self._check_time_remaining(trainer)
def on_train_batch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.step or self._duration is None:
return
self._check_time_remaining(trainer)
def on_train_epoch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.epoch or self._duration is None:
return
self._check_time_remaining(trainer)
def state_dict(self) -> Dict[str, Any]:
return {"time_elapsed": {stage.value: self.time_elapsed(stage) for stage in RunningStage}}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
time_elapsed = state_dict.get("time_elapsed", {})
self._offset = time_elapsed.get(RunningStage.TRAINING.value, 0)
def _check_time_remaining(self, trainer: "pl.Trainer") -> None:
assert self._duration is not None
should_stop = self.time_elapsed() >= self._duration
should_stop = trainer.strategy.broadcast(should_stop)
trainer.should_stop = trainer.should_stop or should_stop
if should_stop and self._verbose:
elapsed = timedelta(seconds=int(self.time_elapsed(RunningStage.TRAINING)))
rank_zero_info(f"Time limit reached. Elapsed time is {elapsed}. Signaling Trainer to stop.")
The provided code snippet includes necessary dependencies for implementing the `_parse_loop_limits` function. Write a Python function `def _parse_loop_limits( min_steps: Optional[int], max_steps: int, min_epochs: Optional[int], max_epochs: Optional[int], trainer: "pl.Trainer", ) -> Tuple[int, int]` to solve the following problem:
This utility computes the default values for the minimum and maximum number of steps and epochs given the values the user has selected. Args: min_steps: Minimum number of steps. max_steps: Maximum number of steps. min_epochs: Minimum number of epochs. max_epochs: Maximum number of epochs. trainer: Trainer instance. Returns: The parsed limits, with default values being set for the ones that the user did not specify.
Here is the function:
def _parse_loop_limits(
min_steps: Optional[int],
max_steps: int,
min_epochs: Optional[int],
max_epochs: Optional[int],
trainer: "pl.Trainer",
) -> Tuple[int, int]:
"""This utility computes the default values for the minimum and maximum number of steps and epochs given the values
the user has selected.
Args:
min_steps: Minimum number of steps.
max_steps: Maximum number of steps.
min_epochs: Minimum number of epochs.
max_epochs: Maximum number of epochs.
trainer: Trainer instance.
Returns:
The parsed limits, with default values being set for the ones that the user did not specify.
"""
if max_epochs is None:
if max_steps == -1 and not any(isinstance(cb, Timer) for cb in trainer.callbacks):
rank_zero_warn(
"`max_epochs` was not set. Setting it to 1000 epochs. To train without an epoch limit,"
" set `max_epochs=-1`.",
category=PossibleUserWarning,
)
max_epochs = 1000
else:
max_epochs = -1
if min_epochs is None and min_steps is not None:
# setting this allows FitLoop.done to re-evaluate should_stop when it gets triggered `on_fit_start`
min_epochs = 1
if min_epochs is None:
# the default value is 0 so no training will be done when should_stop is triggered `on_fit_start`
min_epochs = 0
return min_epochs, max_epochs | This utility computes the default values for the minimum and maximum number of steps and epochs given the values the user has selected. Args: min_steps: Minimum number of steps. max_steps: Maximum number of steps. min_epochs: Minimum number of epochs. max_epochs: Maximum number of epochs. trainer: Trainer instance. Returns: The parsed limits, with default values being set for the ones that the user did not specify. |
155,438 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
class ParallelStrategy(Strategy, ABC):
"""Strategy for training with multiple processes in parallel."""
def __init__(
self,
accelerator: Optional["pl.accelerators.Accelerator"] = None,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[Precision] = None,
):
super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin)
self.parallel_devices = parallel_devices
self.cluster_environment: Optional[ClusterEnvironment] = cluster_environment
self._layer_sync: Optional[LayerSync] = None
def root_device(self) -> torch.device:
"""Return the root device."""
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
def is_global_zero(self) -> bool:
return self.global_rank == 0
def parallel_devices(self) -> Optional[List[torch.device]]:
return self._parallel_devices
def parallel_devices(self, parallel_devices: Optional[List[torch.device]]) -> None:
self._parallel_devices = parallel_devices
def distributed_sampler_kwargs(self) -> Dict[str, Any]:
return {
"num_replicas": len(self.parallel_devices) if self.parallel_devices is not None else 0,
"rank": self.global_rank,
}
def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor:
"""Perform a all_gather on all processes."""
return _all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
"""Reduces a boolean decision over distributed processes. By default is analagous to ``all`` from the standard
library, returning ``True`` only if all input decisions evaluate to ``True``. If ``all`` is set to ``False``,
it behaves like ``any`` instead.
Args:
decision: A single input decision.
all: Whether to logically emulate ``all`` or ``any``. Defaults to True.
Returns:
bool: The reduced boolean decision.
"""
decision = torch.tensor(int(decision), device=self.root_device)
decision = self.reduce(
decision,
reduce_op=ReduceOp.SUM, # type: ignore[arg-type]
)
decision = bool(decision == self.world_size) if all else bool(decision)
return decision
def block_backward_sync(self) -> Generator:
"""Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, pl.utilities.types.DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
assert self.cluster_environment is not None
self.cluster_environment.teardown()
super().teardown()
class Strategy(ABC):
"""Base class for all strategies that change the behaviour of the training, validation and test- loop."""
def __init__(
self,
accelerator: Optional["pl.accelerators.Accelerator"] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[Precision] = None,
) -> None:
self._accelerator: Optional["pl.accelerators.Accelerator"] = accelerator
self._checkpoint_io: Optional[CheckpointIO] = checkpoint_io
self._precision_plugin: Optional[Precision] = None
# Call the precision setter for input validation
self.precision_plugin = precision_plugin # type: ignore[assignment]
self._lightning_module: Optional[pl.LightningModule] = None
self._model: Optional[Module] = None
self._launcher: Optional[_Launcher] = None
self._forward_redirection: _ForwardRedirection = _ForwardRedirection()
self._optimizers: List[Optimizer] = []
self._lightning_optimizers: List[LightningOptimizer] = []
self.lr_scheduler_configs: List[LRSchedulerConfig] = []
def launcher(self) -> Optional[_Launcher]:
return self._launcher
def accelerator(self) -> Optional["pl.accelerators.Accelerator"]:
return self._accelerator
def accelerator(self, accelerator: "pl.accelerators.Accelerator") -> None:
self._accelerator = accelerator
def checkpoint_io(self) -> CheckpointIO:
if self._checkpoint_io is None:
self._checkpoint_io = TorchCheckpointIO()
elif isinstance(self._checkpoint_io, _WrappingCheckpointIO):
self._checkpoint_io.checkpoint_io = TorchCheckpointIO()
return self._checkpoint_io
def checkpoint_io(self, io: CheckpointIO) -> None:
self._checkpoint_io = io
def precision_plugin(self) -> Precision:
return self._precision_plugin if self._precision_plugin is not None else Precision()
def precision_plugin(self, precision_plugin: Optional[Precision]) -> None:
self._precision_plugin = precision_plugin
def optimizers(self) -> List[Optimizer]:
return self._optimizers
def optimizers(self, optimizers: List[Optimizer]) -> None:
self._optimizers = optimizers
self._lightning_optimizers = [LightningOptimizer._to_lightning_optimizer(opt, self) for opt in optimizers]
def connect(self, model: "pl.LightningModule") -> None:
"""Called by the Trainer to connect the strategy with the model."""
# model conversions cannot be applied at this point because `LightningModule.{setup,configure_model}` haven't
# run yet
self._lightning_module = model
self.model = model
def _configure_launcher(self) -> None:
"""Attach the launcher based on Strategy."""
def setup_environment(self) -> None:
"""Setup any processes or distributed connections.
This is called before the LightningModule/DataModule setup hook which allows the user to access the accelerator
environment before setup is complete.
"""
assert self.accelerator is not None
self.accelerator.setup_device(self.root_device)
def setup_optimizers(self, trainer: "pl.Trainer") -> None:
"""Creates optimizers and schedulers.
Args:
trainer: the Trainer, these optimizers should be connected to
"""
assert self.lightning_module is not None
self.optimizers, self.lr_scheduler_configs = _init_optimizers_and_lr_schedulers(self.lightning_module)
def setup(self, trainer: "pl.Trainer") -> None:
"""Sets up the accelerator, plugins and initializes the optimizers (if needed).
Args:
trainer: the trainer instance
"""
assert self.accelerator is not None
self.accelerator.setup(trainer)
assert self.model is not None
# let the precision plugin convert the module here so that this strategy hook can decide the order
# of operations
self.model = self.precision_plugin.convert_module(self.model)
self.model_to_device()
self.model = self._setup_model(self.model)
if trainer.state.fn == TrainerFn.FITTING:
self.setup_optimizers(trainer)
self.setup_precision_plugin()
if trainer.state.fn == TrainerFn.FITTING:
_optimizers_to_device(self.optimizers, self.root_device)
def setup_precision_plugin(self) -> None:
"""Attaches the precision plugin to the strategy."""
assert self.model is not None
model, optimizers, lr_scheduler_configs = self.precision_plugin.connect(
self.model, self.optimizers, self.lr_scheduler_configs
)
self.model = model
self.optimizers = optimizers
self.lr_scheduler_configs = lr_scheduler_configs
def optimizer_state(self, optimizer: Optimizer) -> Dict[str, Tensor]:
"""Returns state of an optimizer.
Allows for syncing/collating optimizer state from processes in custom strategies.
"""
if isinstance(optimizer, LightningOptimizer):
optimizer = optimizer._optimizer
if hasattr(optimizer, "consolidate_state_dict"):
# there are optimizers like PyTorch's ZeroRedundancyOptimizer that shard their
# states, and to avoid OOM we consolidate the full state on rank 0 only
optimizer.consolidate_state_dict()
return optimizer.state_dict() if self.is_global_zero else {}
# for optimizers that are not sharded, we return the state dict on all ranks
return optimizer.state_dict()
def backward(
self,
closure_loss: Tensor,
optimizer: Optional[Optimizer],
*args: Any,
**kwargs: Any,
) -> Tensor:
r"""Forwards backward-calls to the precision plugin.
Args:
closure_loss: a tensor holding the loss value to backpropagate
optimizer: An optional optimizer that gets passed down to the precision plugin's backward
\*args: Positional arguments that get passed down to the precision plugin's backward, intended as arguments
for the actual function that performs the backward, like :meth:`~torch.Tensor.backward`.
\**kwargs: Keyword arguments for the same purpose as ``*args``.
"""
self.pre_backward(closure_loss)
assert self.lightning_module is not None
closure_loss = self.precision_plugin.pre_backward(closure_loss, self.lightning_module)
self.precision_plugin.backward(closure_loss, self.lightning_module, optimizer, *args, **kwargs)
closure_loss = self.precision_plugin.post_backward(closure_loss, self.lightning_module)
self.post_backward(closure_loss)
return closure_loss
def optimizer_step(
self,
optimizer: Optimizer,
closure: Callable[[], Any],
model: Optional[Union["pl.LightningModule", Module]] = None,
**kwargs: Any,
) -> Any:
r"""Performs the actual optimizer step.
Args:
optimizer: the optimizer performing the step
closure: closure calculating the loss value
model: reference to the model, optionally defining optimizer step related hooks
\**kwargs: Keyword arguments to ``optimizer.step``
"""
model = model or self.lightning_module
# TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
assert isinstance(model, pl.LightningModule)
return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
def _setup_model_and_optimizers(self, model: Module, optimizers: List[Optimizer]) -> Tuple[Module, List[Optimizer]]:
"""Setup a model and multiple optimizers together.
The returned objects are expected to be in the same order they were passed in. The default implementation will
call :meth:`_setup_model` and :meth:`_setup_optimizer` on the inputs.
"""
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
model = self._setup_model(model)
optimizers = [self._setup_optimizer(optimizer) for optimizer in optimizers]
return model, optimizers
def _setup_model(self, model: Module) -> Module:
"""Performs setup for the model, e.g., by wrapping it by another class."""
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
return model
def _setup_optimizer(self, optimizer: Optimizer) -> Optimizer:
"""Performs setup for the optimizer, e.g., by wrapping it by another class."""
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
return optimizer
def batch_to_device(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0) -> Any:
"""Moves the batch to the correct device.
The returned batch is of the same type as the input batch, just
having all tensors on the correct device.
Args:
batch: The batch of samples to move to the correct device
device: The target device
dataloader_idx: The index of the dataloader to which the batch belongs.
"""
model = self.lightning_module
device = device or self.root_device
if model is not None:
return model._apply_batch_transfer_handler(batch, device=device, dataloader_idx=dataloader_idx)
return move_data_to_device(batch, device)
def root_device(self) -> torch.device:
"""Returns the root device."""
def model_to_device(self) -> None:
"""Moves the model to the correct device."""
def is_global_zero(self) -> bool:
"""Whether the current process is the rank zero process not only on the local node, but for all nodes."""
def reduce(
self,
tensor: Union[Tensor, Any],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = "mean",
) -> Union[Tensor, Any]:
"""Reduces the given tensor (e.g. across GPUs/processes).
Args:
tensor: the tensor to sync and reduce
group: the process group to reduce
reduce_op: the reduction operation. Defaults to 'mean'.
Can also be a string 'sum' or ReduceOp.
"""
def barrier(self, name: Optional[str] = None) -> None:
"""Synchronizes all processes which blocks processes until the whole group enters this function.
Args:
name: an optional name to pass into barrier.
"""
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
"""Broadcasts an object to all processes.
Args:
obj: the object to broadcast
src: source rank
"""
def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor:
"""Perform an all_gather on all processes.
Args:
tensor: the tensor to all_gather
group: the process group to gather results from
sync_grads: flag that allows users to synchronize gradients for all_gather op
"""
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
"""Reduce a boolean decision across all processes."""
return decision
def pre_backward(self, closure_loss: Tensor) -> None:
"""Run before precision plugin executes backward."""
def post_backward(self, closure_loss: Tensor) -> None:
"""Run after precision plugin executes backward."""
def model(self) -> Optional[Module]:
"""Returns the potentially wrapped LightningModule."""
return self._model if self._model is not None else self._lightning_module
def model(self, new_model: Optional[Module]) -> None:
self._model = new_model
def lightning_module(self) -> Optional["pl.LightningModule"]:
"""Returns the pure LightningModule without potential wrappers."""
return self._lightning_module
def load_checkpoint(self, checkpoint_path: _PATH) -> Dict[str, Any]:
torch.cuda.empty_cache()
return self.checkpoint_io.load_checkpoint(checkpoint_path)
def load_model_state_dict(self, checkpoint: Mapping[str, Any], strict: bool = True) -> None:
assert self.lightning_module is not None
self.lightning_module.load_state_dict(checkpoint["state_dict"], strict=strict)
def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
optimizer_states = checkpoint["optimizer_states"]
for optimizer, opt_state in zip(self.optimizers, optimizer_states):
optimizer.load_state_dict(opt_state)
_optimizer_to_device(optimizer, self.root_device)
def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
"""The actual training step.
See :meth:`~lightning.pytorch.core.LightningModule.training_step` for more details
"""
assert self.lightning_module is not None
assert self.model is not None
with self.precision_plugin.train_step_context():
if self.model != self.lightning_module:
return self._forward_redirection(self.model, self.lightning_module, "training_step", *args, **kwargs)
return self.lightning_module.training_step(*args, **kwargs)
def post_training_step(self) -> None:
"""This hook is deprecated.
Override :meth:`training_step` instead.
"""
pass
def validation_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
"""The actual validation step.
See :meth:`~lightning.pytorch.core.LightningModule.validation_step` for more details
"""
assert self.lightning_module is not None
assert self.model is not None
with self.precision_plugin.val_step_context():
if self.model != self.lightning_module:
return self._forward_redirection(self.model, self.lightning_module, "validation_step", *args, **kwargs)
return self.lightning_module.validation_step(*args, **kwargs)
def test_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
"""The actual test step.
See :meth:`~lightning.pytorch.core.LightningModule.test_step` for more details
"""
assert self.lightning_module is not None
assert self.model is not None
with self.precision_plugin.test_step_context():
if self.model != self.lightning_module:
return self._forward_redirection(self.model, self.lightning_module, "test_step", *args, **kwargs)
return self.lightning_module.test_step(*args, **kwargs)
def predict_step(self, *args: Any, **kwargs: Any) -> Any:
"""The actual predict step.
See :meth:`~lightning.pytorch.core.LightningModule.predict_step` for more details
"""
assert self.lightning_module is not None
assert self.model is not None
with self.precision_plugin.predict_step_context():
if self.model != self.lightning_module:
return self._forward_redirection(self.model, self.lightning_module, "predict_step", *args, **kwargs)
return self.lightning_module.predict_step(*args, **kwargs)
def process_dataloader(self, dataloader: object) -> object:
"""Wraps the dataloader if necessary.
Args:
dataloader: iterable. Ideally of type: :class:`torch.utils.data.DataLoader`
"""
return dataloader
def restore_checkpoint_after_setup(self) -> bool:
"""Override to delay restoring from checkpoint till after the setup phase has completed. This is useful when
the strategy requires all the setup hooks to run before loading checkpoint.
Returns:
If ``True``, restore checkpoint after strategy setup.
"""
return False
def lightning_restore_optimizer(self) -> bool:
"""Override to disable Lightning restoring optimizers/schedulers.
This is useful for strategies which manage restoring optimizers/schedulers.
"""
return True
def handles_gradient_accumulation(self) -> bool:
"""Whether the strategy handles gradient accumulation internally."""
return False
def lightning_module_state_dict(self) -> Dict[str, Any]:
"""Returns model state."""
assert self.lightning_module is not None
return self.lightning_module.state_dict()
def save_checkpoint(
self, checkpoint: Dict[str, Any], filepath: _PATH, storage_options: Optional[Any] = None
) -> None:
"""Save model/training states as a checkpoint file through state-dump and file-write.
Args:
checkpoint: dict containing model and trainer state
filepath: write-target file's path
storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin
"""
if self.is_global_zero:
self.checkpoint_io.save_checkpoint(checkpoint, filepath, storage_options=storage_options)
def remove_checkpoint(self, filepath: _PATH) -> None:
"""Remove checkpoint filepath from the filesystem.
Args:
filepath: Path to checkpoint
"""
if self.is_global_zero:
self.checkpoint_io.remove_checkpoint(filepath)
def tensor_init_context(self, empty_init: Optional[bool] = None) -> Generator[None, None, None]:
"""Controls how tensors get created (device, dtype).
Args:
empty_init: Whether to initialize the model with empty weights (uninitialized memory).
If ``None``, the strategy will decide. Some strategies may not support all options.
"""
device_context = self.root_device if _TORCH_GREATER_EQUAL_2_0 else nullcontext()
empty_init_context = _EmptyInit(enabled=bool(empty_init))
with empty_init_context, device_context, self.precision_plugin.tensor_init_context():
yield
def model_sharded_context(self) -> Generator[None, None, None]:
"""Provide hook to create modules in a distributed aware context. This is useful for when we'd like to shard
the model instantly, which is useful for extremely large models which can save memory and initialization time.
Returns: Model parallel context.
"""
yield
def teardown(self) -> None:
"""This method is called to teardown the training process.
It is the right place to release memory and free other resources.
"""
_optimizers_to_device(self.optimizers, torch.device("cpu"))
if self.lightning_module is not None:
log.debug(f"{self.__class__.__name__}: moving model to CPU")
self.lightning_module.cpu()
self.precision_plugin.teardown()
assert self.accelerator is not None
self.accelerator.teardown()
self.checkpoint_io.teardown()
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
pass
def on_train_start(self) -> None:
"""Called when train begins."""
pass
def on_validation_start(self) -> None:
"""Called when validation begins."""
pass
def on_test_start(self) -> None:
"""Called when test begins."""
pass
def on_predict_start(self) -> None:
"""Called when predict begins."""
pass
def on_train_end(self) -> None:
"""Called when train ends."""
pass
def on_validation_end(self) -> None:
"""Called when validation ends."""
pass
def on_test_end(self) -> None:
"""Called when test end."""
pass
def on_predict_end(self) -> None:
"""Called when predict ends."""
pass
def on_train_batch_start(self, batch: Any, batch_idx: int) -> None:
"""Called in the training loop before anything happens for that batch."""
pass
def on_exception(self, exception: BaseException) -> None:
"""Called when the trainer execution is interrupted by an exception."""
pass
def _reset_optimizers_and_schedulers(self) -> None:
self._optimizers = []
self._lightning_optimizers = []
self.lr_scheduler_configs = []
def __getstate__(self) -> Dict:
# `LightningOptimizer` overrides `self.__class__` so they cannot be pickled
state = dict(vars(self)) # copy
state["_lightning_optimizers"] = []
return state
def __setstate__(self, state: Dict) -> None:
self.__dict__ = state
self.optimizers = self.optimizers # re-create the `_lightning_optimizers`
The provided code snippet includes necessary dependencies for implementing the `_block_parallel_sync_behavior` function. Write a Python function `def _block_parallel_sync_behavior(strategy: Strategy, block: bool = True) -> Generator[None, None, None]` to solve the following problem:
Blocks synchronization in :class:`~lightning.pytorch.strategies.parallel.ParallelStrategy`. This is useful for example when accumulating gradients to reduce communication when it is not needed. Args: strategy: the strategy instance to use. block: whether the context manager is enabled or not Returns: context manager with sync behaviour off
Here is the function:
def _block_parallel_sync_behavior(strategy: Strategy, block: bool = True) -> Generator[None, None, None]:
"""Blocks synchronization in :class:`~lightning.pytorch.strategies.parallel.ParallelStrategy`. This is useful for
example when accumulating gradients to reduce communication when it is not needed.
Args:
strategy: the strategy instance to use.
block: whether the context manager is enabled or not
Returns:
context manager with sync behaviour off
"""
if isinstance(strategy, ParallelStrategy) and block:
with strategy.block_backward_sync():
yield None
else:
yield None | Blocks synchronization in :class:`~lightning.pytorch.strategies.parallel.ParallelStrategy`. This is useful for example when accumulating gradients to reduce communication when it is not needed. Args: strategy: the strategy instance to use. block: whether the context manager is enabled or not Returns: context manager with sync behaviour off |
155,439 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
The provided code snippet includes necessary dependencies for implementing the `_is_max_limit_reached` function. Write a Python function `def _is_max_limit_reached(current: int, maximum: int = -1) -> bool` to solve the following problem:
Check if the limit has been reached (if enabled). Args: current: the current value maximum: the maximum value (or -1 to disable limit) Returns: bool: whether the limit has been reached
Here is the function:
def _is_max_limit_reached(current: int, maximum: int = -1) -> bool:
"""Check if the limit has been reached (if enabled).
Args:
current: the current value
maximum: the maximum value (or -1 to disable limit)
Returns:
bool: whether the limit has been reached
"""
return maximum != -1 and current >= maximum | Check if the limit has been reached (if enabled). Args: current: the current value maximum: the maximum value (or -1 to disable limit) Returns: bool: whether the limit has been reached |
155,440 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
class _BaseProgress:
"""Mixin that implements state-loading utilities for dataclasses."""
def state_dict(self) -> dict:
return asdict(self)
def load_state_dict(self, state_dict: dict) -> None:
self.__dict__.update(state_dict)
def from_state_dict(cls, state_dict: dict) -> "_BaseProgress":
obj = cls()
obj.load_state_dict(state_dict)
return obj
def reset(self) -> None:
"""Reset the object's state."""
raise NotImplementedError
def _reset_progress(loop: _Loop) -> None:
for v in vars(loop).values():
if isinstance(v, _BaseProgress):
v.reset()
elif isinstance(v, _Loop):
_reset_progress(v) | null |
155,441 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
class _DataFetcher(Iterator):
def __init__(self) -> None:
self._combined_loader: Optional[CombinedLoader] = None
self.iterator: Optional[Iterator] = None
self.fetched: int = 0
self.done: bool = False
self.length: Optional[int] = None
self._start_profiler = _profile_nothing
self._stop_profiler = _profile_nothing
def combined_loader(self) -> CombinedLoader:
if self._combined_loader is None:
raise MisconfigurationException(
f"`{self.__class__.__name__}` should have been `setup` with a `CombinedLoader`."
)
return self._combined_loader
def setup(self, combined_loader: CombinedLoader) -> None:
self._combined_loader = combined_loader
def __iter__(self) -> "_DataFetcher":
self.iterator = iter(self.combined_loader)
self.reset()
return self
def __next__(self) -> _ITERATOR_RETURN:
assert self.iterator is not None
self._start_profiler()
try:
batch = next(self.iterator)
except StopIteration:
self.done = True
raise
finally:
self._stop_profiler()
self.fetched += 1
if self.length is not None:
self.done = self.fetched >= self.length
return batch
def reset(self) -> None:
self.fetched = 0
# teardown calls `reset()`, and if it happens early, `combined_loader` can still be None
if self._combined_loader is not None:
self.length = sized_len(self.combined_loader)
self.done = self.length == 0
def teardown(self) -> None:
self.reset()
if self._combined_loader is not None:
self._combined_loader.reset()
self.iterator = None
class _PrefetchDataFetcher(_DataFetcher):
"""This class is used to control batch fetching flow.
Args:
prefetch_batches: Number of batches to pre-fetch. Pre-fetching at least 1 batch is necessary to properly track
whether a batch is the last one (available with :attr:`self.done`) when the length is not available. The
value of this argument is ignored when the length is available.
"""
def __init__(self, prefetch_batches: int = 1) -> None:
super().__init__()
if prefetch_batches < 0:
raise ValueError("`prefetch_batches` should at least be 0.")
self.prefetch_batches = prefetch_batches
self.batches: List[Any] = []
def __iter__(self) -> "_PrefetchDataFetcher":
super().__iter__()
if self.length is not None:
# ignore pre-fetching, it's not necessary
return self
# prefetch batches to know when the iterator will be exhausted in advance
for _ in range(self.prefetch_batches):
try:
batch = super().__next__()
self.batches.append(batch)
except StopIteration:
# this would only happen when prefetch_batches > the number of batches available and makes
# `__next__` jump directly to the empty iterator case without trying to fetch again
break
return self
def __next__(self) -> _ITERATOR_RETURN:
if self.batches:
# there are pre-fetched batches already from a previous `prefetching` call.
# consume one
batch = self.batches.pop(0)
try:
# refill the consumed batch
self.batches.append(super().__next__())
except StopIteration:
# no more batches to fetch. we are done only if all pre-fetched batches were returned
self.done = not self.batches
elif not self.done:
# this will run only when no pre-fetching was done.
batch = super().__next__()
else:
# the iterator is empty
raise StopIteration
return batch
def reset(self) -> None:
super().reset()
self.batches = []
class _DataLoaderIterDataFetcher(_DataFetcher):
"""This class is used to return directly the `dataloader_iter` to the ``LightningModule`` training_step for users
to implement their own pre-fetching logic. This feature can be activated as follows:
Example::
Class MyModel(LightningModule):
def training_step(self, dataloader_iter: Iterator) -> None:
# it is the user responsibility to fetch and move the batch to the right device.
batch, batch_idx, dataloader_idx = next(dataloader_iter)
batch = batch.to(self.device)
...
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._batch: Any = None
self._batch_idx: int = 0
self._dataloader_idx: int = 0
def __iter__(self) -> "_DataLoaderIterDataFetcher":
super().__iter__()
self.iterator_wrapper = iter(_DataFetcherWrapper(self))
return self
def __next__(self) -> Iterator["_DataFetcherWrapper"]: # type: ignore[override]
if self.done:
raise StopIteration
return self.iterator_wrapper
def reset(self) -> None:
super().reset()
self._batch = None
self._batch_idx = 0
self._dataloader_idx = 0
class RunningStage(LightningEnum):
"""Enum for the current running stage.
This stage complements :class:`TrainerFn` by specifying the current running stage for each function.
More than one running stage value can be set while a :class:`TrainerFn` is running:
- ``TrainerFn.FITTING`` - ``RunningStage.{SANITY_CHECKING,TRAINING,VALIDATING}``
- ``TrainerFn.VALIDATING`` - ``RunningStage.VALIDATING``
- ``TrainerFn.TESTING`` - ``RunningStage.TESTING``
- ``TrainerFn.PREDICTING`` - ``RunningStage.PREDICTING``
"""
TRAINING = "train"
SANITY_CHECKING = "sanity_check"
VALIDATING = "validate"
TESTING = "test"
PREDICTING = "predict"
def evaluating(self) -> bool:
return self in (self.VALIDATING, self.TESTING, self.SANITY_CHECKING)
def dataloader_prefix(self) -> Optional[str]:
if self in (self.VALIDATING, self.SANITY_CHECKING):
return "val"
return self.value
def is_param_in_hook_signature(
hook_fx: Callable, param: str, explicit: bool = False, min_args: Optional[int] = None
) -> bool:
"""
Args:
hook_fx: the hook callable
param: the name of the parameter to check
explicit: whether the parameter has to be explicitly declared
min_args: whether the `signature` has at least `min_args` parameters
"""
if hasattr(hook_fx, "__wrapped__"):
# in case the hook has a decorator
hook_fx = hook_fx.__wrapped__
parameters = inspect.getfullargspec(hook_fx)
args = parameters.args[1:] # ignore `self`
return (
param in args
or (not explicit and (parameters.varargs is not None))
or (isinstance(min_args, int) and len(args) >= min_args)
)
def _select_data_fetcher(trainer: "pl.Trainer", stage: RunningStage) -> _DataFetcher:
lightning_module = trainer.lightning_module
if stage == RunningStage.TESTING:
step_fx_name = "test_step"
elif stage == RunningStage.TRAINING:
step_fx_name = "training_step"
elif stage in (RunningStage.VALIDATING, RunningStage.SANITY_CHECKING):
step_fx_name = "validation_step"
elif stage == RunningStage.PREDICTING:
step_fx_name = "predict_step"
else:
raise RuntimeError(f"DataFetcher is unsupported for {trainer.state.stage}")
step_fx = getattr(lightning_module, step_fx_name)
if is_param_in_hook_signature(step_fx, "dataloader_iter", explicit=True):
rank_zero_warn(
f"Found `dataloader_iter` argument in the `{step_fx_name}`. Note that the support for "
"this signature is experimental and the behavior is subject to change."
)
return _DataLoaderIterDataFetcher()
return _PrefetchDataFetcher() | null |
155,442 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
def _distributed_is_initialized() -> bool:
# `is_initialized` is only defined conditionally
# https://github.com/pytorch/pytorch/blob/v2.1.0/torch/distributed/__init__.py#L25
# this might happen to MacOS builds from source (default) or any build from source that sets `USE_DISTRIBUTED=0`
return torch.distributed.is_available() and torch.distributed.is_initialized()
_TORCH_EQUAL_2_0 = _TORCH_GREATER_EQUAL_2_0 and not _TORCH_GREATER_EQUAL_2_1
class XLAAccelerator(Accelerator, FabricXLAAccelerator):
"""Accelerator for XLA devices, normally TPUs.
.. warning:: Use of this accelerator beyond import and instantiation is experimental.
"""
def get_device_stats(self, device: _DEVICE) -> Dict[str, Any]:
"""Gets stats for the given XLA device.
Args:
device: XLA device for which to get stats
Returns:
A dictionary mapping the metrics (free memory and peak memory) to their values.
"""
import torch_xla.core.xla_model as xm
memory_info = xm.get_memory_info(device)
free_memory = memory_info["kb_free"]
peak_memory = memory_info["kb_total"] - free_memory
return {
"avg. free memory (MB)": free_memory,
"avg. peak memory (MB)": peak_memory,
}
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
accelerator_registry.register("tpu", cls, description=cls.__name__)
def _no_grad_context(loop_run: Callable) -> Callable:
def _decorator(self: _Loop, *args: Any, **kwargs: Any) -> Any:
if not isinstance(self, _Loop):
raise TypeError(f"`{type(self).__name__}` needs to be a Loop.")
if not hasattr(self, "inference_mode"):
raise TypeError(f"`{type(self).__name__}.inference_mode` needs to be defined")
context_manager: Type[ContextManager]
if _distributed_is_initialized() and dist.get_backend() == "gloo":
# gloo backend does not work properly.
# https://github.com/Lightning-AI/lightning/pull/12715/files#r854569110
# TODO: explore why and possibly open an issue in PyTorch repository
context_manager = torch.no_grad
elif isinstance(self.trainer.accelerator, XLAAccelerator):
context_manager = torch.no_grad
elif isinstance(self.trainer.strategy, FSDPStrategy):
# https://github.com/pytorch/pytorch/issues/95957
context_manager = torch.no_grad
elif _TORCH_EQUAL_2_0 and self.trainer.lightning_module._compiler_ctx is not None:
# avoid: `RuntimeError: Inference tensors do not track version counter` fixed in v2.1
context_manager = torch.no_grad
elif self.inference_mode:
context_manager = torch.inference_mode
else:
context_manager = torch.no_grad
with context_manager():
return loop_run(self, *args, **kwargs)
return _decorator | null |
155,443 | import inspect
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torch import Tensor
import lightning.pytorch as pl
from lightning.fabric.utilities.distributed import _distributed_is_initialized
from lightning.fabric.utilities.imports import _TORCH_EQUAL_2_0
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.callbacks.timer import Timer
from lightning.pytorch.loops import _Loop
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher, _PrefetchDataFetcher
from lightning.pytorch.loops.progress import _BaseProgress
from lightning.pytorch.strategies import FSDPStrategy
from lightning.pytorch.strategies.parallel import ParallelStrategy
from lightning.pytorch.strategies.strategy import Strategy
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities.rank_zero import rank_zero_warn
from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature
class RunningStage(LightningEnum):
"""Enum for the current running stage.
This stage complements :class:`TrainerFn` by specifying the current running stage for each function.
More than one running stage value can be set while a :class:`TrainerFn` is running:
- ``TrainerFn.FITTING`` - ``RunningStage.{SANITY_CHECKING,TRAINING,VALIDATING}``
- ``TrainerFn.VALIDATING`` - ``RunningStage.VALIDATING``
- ``TrainerFn.TESTING`` - ``RunningStage.TESTING``
- ``TrainerFn.PREDICTING`` - ``RunningStage.PREDICTING``
"""
TRAINING = "train"
SANITY_CHECKING = "sanity_check"
VALIDATING = "validate"
TESTING = "test"
PREDICTING = "predict"
def evaluating(self) -> bool:
return self in (self.VALIDATING, self.TESTING, self.SANITY_CHECKING)
def dataloader_prefix(self) -> Optional[str]:
if self in (self.VALIDATING, self.SANITY_CHECKING):
return "val"
return self.value
def is_param_in_hook_signature(
hook_fx: Callable, param: str, explicit: bool = False, min_args: Optional[int] = None
) -> bool:
"""
Args:
hook_fx: the hook callable
param: the name of the parameter to check
explicit: whether the parameter has to be explicitly declared
min_args: whether the `signature` has at least `min_args` parameters
"""
if hasattr(hook_fx, "__wrapped__"):
# in case the hook has a decorator
hook_fx = hook_fx.__wrapped__
parameters = inspect.getfullargspec(hook_fx)
args = parameters.args[1:] # ignore `self`
return (
param in args
or (not explicit and (parameters.varargs is not None))
or (isinstance(min_args, int) and len(args) >= min_args)
)
def _verify_dataloader_idx_requirement(
hooks: Tuple[str, ...], is_expected: bool, stage: RunningStage, pl_module: "pl.LightningModule"
) -> None:
for hook in hooks:
fx = getattr(pl_module, hook)
# this validation only works if "dataloader_idx" is used, no other names such as "dl_idx"
param_present = is_param_in_hook_signature(fx, "dataloader_idx")
if not is_expected:
if param_present:
params = inspect.signature(fx).parameters
if "dataloader_idx" in params and params["dataloader_idx"].default is inspect.Parameter.empty:
raise RuntimeError(
f"You provided only a single `{stage.dataloader_prefix}_dataloader`, but have included "
f"`dataloader_idx` in `{type(pl_module).__name__}.{hook}()`. Either remove the"
" argument or give it a default value i.e. `dataloader_idx=0`."
)
elif not param_present:
raise RuntimeError(
f"You provided multiple `{stage.dataloader_prefix}_dataloader`, but no `dataloader_idx`"
f" argument in `{type(pl_module).__name__}.{hook}()`. Try adding `dataloader_idx=0` to its"
" signature."
) | null |
155,444 | import argparse
import json
import logging
import os
import platform
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.plugins import ClusterEnvironment
from lightning.fabric.strategies import _StrategyRegistry
from lightning.fabric.strategies.deepspeed import (
_DEEPSPEED_AVAILABLE,
_format_precision_config,
_validate_checkpoint_directory,
_validate_device_index_selection,
)
from lightning.fabric.utilities.optimizer import _optimizers_to_device
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, LRScheduler, ReduceLROnPlateau
from lightning.pytorch.accelerators.cuda import CUDAAccelerator
from lightning.pytorch.core.optimizer import _init_optimizers_and_lr_schedulers
from lightning.pytorch.plugins.precision import Precision
from lightning.pytorch.strategies.ddp import DDPStrategy
from lightning.pytorch.trainer.states import TrainerFn
from lightning.pytorch.utilities import GradClipAlgorithmType
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_info, rank_zero_warn
from lightning.pytorch.utilities.types import LRSchedulerConfig
def remove_module_hooks(model: torch.nn.Module) -> None:
# todo (tchaton) awaiting this feature to move upstream to DeepSpeed
for module in model.modules():
module._backward_hooks = OrderedDict()
module._is_full_backward_hook = None
module._forward_hooks = OrderedDict()
module._forward_pre_hooks = OrderedDict()
module._state_dict_hooks = OrderedDict()
module._load_state_dict_pre_hooks = OrderedDict() | null |
155,445 | import math
import os
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from torch.nn.modules import MultiheadAttention
from torch.utils.data import DataLoader, Dataset
from lightning.pytorch import LightningModule
class Dictionary:
def __init__(self) -> None:
self.word2idx: Dict[str, int] = {}
self.idx2word: List[str] = []
def add_word(self, word: str) -> int:
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self) -> int:
return len(self.idx2word)
def tokenize(path: Path) -> Tuple[Tensor, Dictionary]:
dictionary = Dictionary()
assert os.path.exists(path)
# Add words to the dictionary
with open(path, encoding="utf8") as f:
for line in f:
words = line.split() + ["<eos>"]
for word in words:
dictionary.add_word(word)
# Tokenize file content
with open(path, encoding="utf8") as f:
idss: List[Tensor] = []
for line in f:
words = line.split() + ["<eos>"]
ids: List[int] = []
for word in words:
ids.append(dictionary.word2idx[word])
idss.append(torch.tensor(ids).type(torch.int64))
return torch.cat(idss), dictionary | null |
155,446 | import logging
import os
import random
import time
import urllib
from typing import Any, Callable, Optional, Sized, Tuple, Union
from urllib.error import HTTPError
from warnings import warn
import torch
from torch import Tensor
from torch.utils.data import DataLoader, Dataset, random_split
from lightning.fabric.utilities.imports import _IS_WINDOWS
from lightning.pytorch import LightningDataModule
from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE
_DATASETS_PATH = "./data"
class _MNIST(Dataset):
"""Carbon copy of ``tests_pytorch.helpers.datasets.MNIST``.
We cannot import the tests as they are not distributed with the package.
See https://github.com/Lightning-AI/lightning/pull/7614#discussion_r671183652 for more context.
.. warning:: This is meant for testing/debugging and is experimental.
"""
RESOURCES = (
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/training.pt",
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/test.pt",
)
TRAIN_FILE_NAME = "training.pt"
TEST_FILE_NAME = "test.pt"
cache_folder_name = "complete"
def __init__(
self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs: Any
) -> None:
super().__init__()
self.root = root
self.train = train # training set or test set
self.normalize = normalize
self.prepare_data(download)
data_file = self.TRAIN_FILE_NAME if self.train else self.TEST_FILE_NAME
self.data, self.targets = self._try_load(os.path.join(self.cached_folder_path, data_file))
def __getitem__(self, idx: int) -> Tuple[Tensor, int]:
img = self.data[idx].float().unsqueeze(0)
target = int(self.targets[idx])
if self.normalize is not None and len(self.normalize) == 2:
img = self.normalize_tensor(img, *self.normalize)
return img, target
def __len__(self) -> int:
return len(self.data)
def cached_folder_path(self) -> str:
return os.path.join(self.root, "MNIST", self.cache_folder_name)
def _check_exists(self, data_folder: str) -> bool:
existing = True
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
existing = existing and os.path.isfile(os.path.join(data_folder, fname))
return existing
def prepare_data(self, download: bool = True) -> None:
if download and not self._check_exists(self.cached_folder_path):
self._download(self.cached_folder_path)
if not self._check_exists(self.cached_folder_path):
raise RuntimeError("Dataset not found.")
def _download(self, data_folder: str) -> None:
os.makedirs(data_folder, exist_ok=True)
for url in self.RESOURCES:
logging.info(f"Downloading {url}")
fpath = os.path.join(data_folder, os.path.basename(url))
urllib.request.urlretrieve(url, fpath) # noqa: S310
def _try_load(path_data: str, trials: int = 30, delta: float = 1.0) -> Tuple[Tensor, Tensor]:
"""Resolving loading from the same time from multiple concurrent processes."""
res, exception = None, None
assert trials, "at least some trial has to be set"
assert os.path.isfile(path_data), f"missing file: {path_data}"
for _ in range(trials):
try:
res = torch.load(path_data)
# todo: specify the possible exception
except Exception as ex:
exception = ex
time.sleep(delta * random.random()) # noqa: S311
else:
break
assert res is not None
if exception is not None:
# raise the caught exception
raise exception
return res
def normalize_tensor(tensor: Tensor, mean: Union[int, float] = 0.0, std: Union[int, float] = 1.0) -> Tensor:
mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
return tensor.sub(mean).div(std)
def MNIST(*args: Any, **kwargs: Any) -> Dataset:
torchvision_mnist_available = not bool(os.getenv("PL_USE_MOCKED_MNIST", False))
if torchvision_mnist_available:
try:
from torchvision.datasets import MNIST
MNIST(_DATASETS_PATH, download=True)
except HTTPError as ex:
print(f"Error {ex} downloading `torchvision.datasets.MNIST`")
torchvision_mnist_available = False
if not torchvision_mnist_available:
print("`torchvision.datasets.MNIST` not available. Using our hosted version")
MNIST = _MNIST
return MNIST(*args, **kwargs) | null |
155,447 | from typing import Callable
import torchmetrics
from lightning_utilities.core.imports import compare_version as _compare_version
from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_0_8_0
from lightning.pytorch.utilities.migration.utils import _patch_pl_to_mirror_if_necessary
def _patch_pl_to_mirror_if_necessary(module: str) -> str:
_pl = "pytorch_" + "lightning" # avoids replacement during mirror package generation
if module.startswith(_pl):
# for the standalone package this won't do anything,
# for the unified mirror package it will redirect the imports
return "lightning.pytorch" + module[len(_pl) :]
return module
def compare_version(package: str, op: Callable, version: str, use_base_version: bool = False) -> bool:
new_package = _patch_pl_to_mirror_if_necessary(package)
return _compare_version(new_package, op, version, use_base_version) | null |
155,448 | import sys
from typing import TYPE_CHECKING, Any, Literal, Optional
import lightning.pytorch as pl
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation
from lightning.pytorch.plugins.precision import (
BitsandbytesPrecision,
DeepSpeedPrecision,
DoublePrecision,
FSDPPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
def _patch_sys_modules() -> None:
sys.modules["lightning.pytorch.plugins.precision.precision_plugin"] = sys.modules[
"lightning.pytorch.plugins.precision.precision"
] | null |
155,449 | import sys
from typing import TYPE_CHECKING, Any, Literal, Optional
import lightning.pytorch as pl
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation
from lightning.pytorch.plugins.precision import (
BitsandbytesPrecision,
DeepSpeedPrecision,
DoublePrecision,
FSDPPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
class FSDPMixedPrecisionPlugin(FSDPPrecision):
def __init__(
self, precision: Literal["16-mixed", "bf16-mixed"], device: str, scaler: Optional["ShardedGradScaler"] = None
) -> None:
def _create_class(deprecated_name: str, new_class: type) -> type:
def _patch_classes() -> None:
classes_map = (
# module name, old name, new class
("bitsandbytes", "BitsandbytesPrecisionPlugin", BitsandbytesPrecision),
("deepspeed", "DeepSpeedPrecisionPlugin", DeepSpeedPrecision),
("double", "DoublePrecisionPlugin", DoublePrecision),
("fsdp", "FSDPPrecisionPlugin", FSDPPrecision),
("fsdp", "FSDPMixedPrecisionPlugin", FSDPPrecision),
("half", "HalfPrecisionPlugin", HalfPrecision),
("amp", "MixedPrecisionPlugin", MixedPrecision),
("precision", "PrecisionPlugin", Precision),
("transformer_engine", "TransformerEnginePrecisionPlugin", TransformerEnginePrecision),
("xla", "XLAPrecisionPlugin", XLAPrecision),
)
for module_name, deprecated_name, new_class in classes_map:
deprecated_class = _create_class(deprecated_name, new_class)
setattr(getattr(pl.plugins.precision, module_name), deprecated_name, deprecated_class)
setattr(pl.plugins.precision, deprecated_name, deprecated_class)
setattr(pl.plugins, deprecated_name, deprecated_class)
# special treatment for `FSDPMixedPrecisionPlugin` because it has a different signature
setattr(pl.plugins.precision.fsdp, "FSDPMixedPrecisionPlugin", FSDPMixedPrecisionPlugin)
setattr(pl.plugins.precision, "FSDPMixedPrecisionPlugin", FSDPMixedPrecisionPlugin)
setattr(pl.plugins, "FSDPMixedPrecisionPlugin", FSDPMixedPrecisionPlugin) | null |
155,450 | import sys
from typing import Any
import lightning.pytorch as pl
from lightning.fabric.strategies import _StrategyRegistry
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.plugins.precision import XLAPrecision
from lightning.pytorch.strategies.single_xla import SingleDeviceXLAStrategy
from lightning.pytorch.utilities.rank_zero import rank_zero_deprecation
def _patch_sys_modules() -> None:
self = sys.modules[__name__]
sys.modules["lightning.pytorch.strategies.single_tpu"] = self
sys.modules["lightning.pytorch.accelerators.tpu"] = self
sys.modules["lightning.pytorch.plugins.precision.tpu"] = self
sys.modules["lightning.pytorch.plugins.precision.tpu_bf16"] = self
sys.modules["lightning.pytorch.plugins.precision.xlabf16"] = self | null |
155,451 | import sys
from typing import Any
import lightning.pytorch as pl
from lightning.fabric.strategies import _StrategyRegistry
from lightning.pytorch.accelerators.xla import XLAAccelerator
from lightning.pytorch.plugins.precision import XLAPrecision
from lightning.pytorch.strategies.single_xla import SingleDeviceXLAStrategy
from lightning.pytorch.utilities.rank_zero import rank_zero_deprecation
class SingleTPUStrategy(SingleDeviceXLAStrategy):
"""Legacy class.
Use :class:`~lightning.pytorch.strategies.single_xla.SingleDeviceXLAStrategy` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation("The 'single_tpu' strategy is deprecated. Use 'single_xla' instead.")
super().__init__(*args, **kwargs)
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
if "single_tpu" not in strategy_registry:
strategy_registry.register("single_tpu", cls, description="Legacy class. Use `single_xla` instead.")
class TPUAccelerator(XLAAccelerator):
"""Legacy class.
Use :class:`~lightning.pytorch.accelerators.xla.XLAAccelerator` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUAccelerator` class is deprecated. Use `lightning.pytorch.accelerators.XLAAccelerator` instead."
)
super().__init__(*args, **kwargs)
class TPUPrecisionPlugin(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.pytorch.plugins.precision.xla.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUPrecisionPlugin` class is deprecated. Use `lightning.pytorch.plugins.precision.XLAPrecision`"
" instead."
)
super().__init__(precision="32-true")
class TPUBf16PrecisionPlugin(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.pytorch.plugins.precision.xlabf16.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUBf16PrecisionPlugin` class is deprecated. Use"
" `lightning.pytorch.plugins.precision.XLAPrecision` instead."
)
super().__init__(precision="bf16-true")
class XLABf16PrecisionPlugin(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.pytorch.plugins.precision.xlabf16.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `XLABf16PrecisionPlugin` class is deprecated. Use"
" `lightning.pytorch.plugins.precision.XLAPrecision` instead."
)
super().__init__(precision="bf16-true")
SingleTPUStrategy.register_strategies(pl.strategies.StrategyRegistry)
def _patch_classes() -> None:
setattr(pl.strategies, "SingleTPUStrategy", SingleTPUStrategy)
setattr(pl.accelerators, "TPUAccelerator", TPUAccelerator)
setattr(pl.plugins, "TPUPrecisionPlugin", TPUPrecisionPlugin)
setattr(pl.plugins.precision, "TPUPrecisionPlugin", TPUPrecisionPlugin)
setattr(pl.plugins, "TPUBf16PrecisionPlugin", TPUBf16PrecisionPlugin)
setattr(pl.plugins.precision, "TPUBf16PrecisionPlugin", TPUBf16PrecisionPlugin)
setattr(pl.plugins, "XLABf16PrecisionPlugin", XLABf16PrecisionPlugin)
setattr(pl.plugins.precision, "XLABf16PrecisionPlugin", XLABf16PrecisionPlugin) | null |
155,452 | import sys
from typing import Any
import lightning.pytorch as pl
def _patch_sys_modules() -> None:
self = sys.modules[__name__]
sys.modules["lightning.pytorch.accelerators.hpu"] = self
sys.modules["lightning.pytorch.strategies.hpu_parallel"] = self
sys.modules["lightning.pytorch.strategies.single_hpu"] = self
sys.modules["lightning.pytorch.plugins.io.hpu_plugin"] = self
sys.modules["lightning.pytorch.plugins.precision.hpu"] = self | null |
155,453 | import sys
from typing import Any
import lightning.pytorch as pl
class HPUAccelerator:
auto_device_count = ...
get_parallel_devices = ...
is_available = ...
parse_devices = ...
setup_device = ...
teardown = ...
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `HPUAccelerator` class has been moved to an external package."
" Install the extension package as `pip install lightning-habana`"
" and import with `from lightning_habana import HPUAccelerator`."
" Please see: https://github.com/Lightning-AI/lightning-Habana for more details."
)
class HPUParallelStrategy:
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `HPUParallelStrategy` class has been moved to an external package."
" Install the extension package as `pip install lightning-habana`"
" and import with `from lightning_habana import HPUParallelStrategy`."
" Please see: https://github.com/Lightning-AI/lightning-Habana for more details."
)
def setup(self, *_: Any, **__: Any) -> None:
raise NotImplementedError
def get_device_stats(self, *_: Any, **__: Any) -> dict:
raise NotImplementedError
class SingleHPUStrategy:
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `SingleHPUStrategy` class has been moved to an external package."
" Install the extension package as `pip install lightning-habana`"
" and import with `from lightning_habana import SingleHPUStrategy`."
" Please see: https://github.com/Lightning-AI/lightning-Habana for more details."
)
class HPUCheckpointIO:
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `HPUCheckpointIO` class has been moved to an external package."
" Install the extension package as `pip install lightning-habana`"
" and import with `from lightning_habana import HPUCheckpointIO`."
" Please see: https://github.com/Lightning-AI/lightning-Habana for more details."
)
class HPUPrecisionPlugin:
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `HPUPrecisionPlugin` class has been moved to an external package."
" Install the extension package as `pip install lightning-habana`"
" and import with `from lightning_habana import HPUPrecisionPlugin`."
" Please see: https://github.com/Lightning-AI/lightning-Habana for more details."
)
def _patch_classes() -> None:
setattr(pl.accelerators, "HPUAccelerator", HPUAccelerator)
setattr(pl.strategies, "HPUParallelStrategy", HPUParallelStrategy)
setattr(pl.strategies, "SingleHPUStrategy", SingleHPUStrategy)
setattr(pl.plugins, "HPUCheckpointIO", HPUCheckpointIO)
setattr(pl.plugins.io, "HPUCheckpointIO", HPUCheckpointIO)
setattr(pl.plugins, "HPUPrecisionPlugin", HPUPrecisionPlugin)
setattr(pl.plugins.precision, "HPUPrecisionPlugin", HPUPrecisionPlugin) | null |
155,454 | from typing import Any, Dict, List, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.accelerators import _AcceleratorRegistry
from lightning.fabric.accelerators.cpu import _parse_cpu_cores
from lightning.fabric.utilities.types import _DEVICE
from lightning.pytorch.accelerators.accelerator import Accelerator
from lightning.pytorch.utilities.exceptions import MisconfigurationException
_CPU_VM_PERCENT = "cpu_vm_percent"
_CPU_PERCENT = "cpu_percent"
_CPU_SWAP_PERCENT = "cpu_swap_percent"
_PSUTIL_AVAILABLE = RequirementCache("psutil")
def get_cpu_stats() -> Dict[str, float]:
if not _PSUTIL_AVAILABLE:
raise ModuleNotFoundError(
f"Fetching CPU device stats requires `psutil` to be installed. {str(_PSUTIL_AVAILABLE)}"
)
import psutil
return {
_CPU_VM_PERCENT: psutil.virtual_memory().percent,
_CPU_PERCENT: psutil.cpu_percent(),
_CPU_SWAP_PERCENT: psutil.swap_memory().percent,
} | null |
155,455 | from typing import Any, Dict, List, Optional, Union
import torch
from typing_extensions import override
from lightning.fabric.accelerators import _AcceleratorRegistry
from lightning.fabric.accelerators.mps import MPSAccelerator as _MPSAccelerator
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.types import _DEVICE
from lightning.pytorch.accelerators.accelerator import Accelerator
from lightning.pytorch.accelerators.cpu import _PSUTIL_AVAILABLE
from lightning.pytorch.utilities.exceptions import MisconfigurationException
_VM_PERCENT = "M1_vm_percent"
_PERCENT = "M1_percent"
_SWAP_PERCENT = "M1_swap_percent"
_PSUTIL_AVAILABLE = RequirementCache("psutil")
def get_device_stats() -> Dict[str, float]:
if not _PSUTIL_AVAILABLE:
raise ModuleNotFoundError(
f"Fetching MPS device stats requires `psutil` to be installed. {str(_PSUTIL_AVAILABLE)}"
)
import psutil
return {
_VM_PERCENT: psutil.virtual_memory().percent,
_PERCENT: psutil.cpu_percent(),
_SWAP_PERCENT: psutil.swap_memory().percent,
} | null |
155,456 | import logging
import os
import shutil
import subprocess
from typing import Any, Dict, List, Optional, Union
import torch
from typing_extensions import override
import lightning.pytorch as pl
from lightning.fabric.accelerators import _AcceleratorRegistry
from lightning.fabric.accelerators.cuda import _check_cuda_matmul_precision, _clear_cuda_memory, num_cuda_devices
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.types import _DEVICE
from lightning.pytorch.accelerators.accelerator import Accelerator
from lightning.pytorch.utilities.exceptions import MisconfigurationException
def _get_gpu_id(device_id: int) -> str:
"""Get the unmasked real GPU IDs."""
# All devices if `CUDA_VISIBLE_DEVICES` unset
default = ",".join(str(i) for i in range(num_cuda_devices()))
cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES", default=default).split(",")
return cuda_visible_devices[device_id].strip()
_DEVICE = Union[torch.device, str, int]
The provided code snippet includes necessary dependencies for implementing the `get_nvidia_gpu_stats` function. Write a Python function `def get_nvidia_gpu_stats(device: _DEVICE) -> Dict[str, float]` to solve the following problem:
Get GPU stats including memory, fan speed, and temperature from nvidia-smi. Args: device: GPU device for which to get stats Returns: A dictionary mapping the metrics to their values. Raises: FileNotFoundError: If nvidia-smi installation not found
Here is the function:
def get_nvidia_gpu_stats(device: _DEVICE) -> Dict[str, float]: # pragma: no-cover
"""Get GPU stats including memory, fan speed, and temperature from nvidia-smi.
Args:
device: GPU device for which to get stats
Returns:
A dictionary mapping the metrics to their values.
Raises:
FileNotFoundError:
If nvidia-smi installation not found
"""
nvidia_smi_path = shutil.which("nvidia-smi")
if nvidia_smi_path is None:
raise FileNotFoundError("nvidia-smi: command not found")
gpu_stat_metrics = [
("utilization.gpu", "%"),
("memory.used", "MB"),
("memory.free", "MB"),
("utilization.memory", "%"),
("fan.speed", "%"),
("temperature.gpu", "°C"),
("temperature.memory", "°C"),
]
gpu_stat_keys = [k for k, _ in gpu_stat_metrics]
gpu_query = ",".join(gpu_stat_keys)
index = torch._utils._get_device_index(device)
gpu_id = _get_gpu_id(index)
result = subprocess.run(
[nvidia_smi_path, f"--query-gpu={gpu_query}", "--format=csv,nounits,noheader", f"--id={gpu_id}"],
encoding="utf-8",
capture_output=True,
check=True,
)
def _to_float(x: str) -> float:
try:
return float(x)
except ValueError:
return 0.0
s = result.stdout.strip()
stats = [_to_float(x) for x in s.split(", ")]
return {f"{x} ({unit})": stat for (x, unit), stat in zip(gpu_stat_metrics, stats)} | Get GPU stats including memory, fan speed, and temperature from nvidia-smi. Args: device: GPU device for which to get stats Returns: A dictionary mapping the metrics to their values. Raises: FileNotFoundError: If nvidia-smi installation not found |
155,457 | import inspect
from copy import deepcopy
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import torch
from lightning_utilities import is_overridden
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch import nn as nn
from torch.nn.modules.module import _IncompatibleKeys
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import Strategy
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.data import _set_sampler_epoch
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.types import Optimizable
class _FabricOptimizer:
def __init__(self, optimizer: Optimizer, strategy: Strategy, callbacks: Optional[List[Callable]] = None) -> None:
"""FabricOptimizer is a thin wrapper around the :class:`~torch.optim.Optimizer` that delegates the optimizer
step calls to the strategy.
The underlying wrapped optimizer object can be accessed via the property :attr:`optimizer`.
Args:
optimizer: The optimizer to wrap
strategy: Reference to the strategy for handling the optimizer step
"""
self._optimizer = optimizer
self._strategy = strategy
self._callbacks = callbacks or []
# imitate the class of the wrapped object to make isinstance checks work
self.__class__ = type("Fabric" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
def optimizer(self) -> Optimizer:
return self._optimizer
def state_dict(self) -> Dict[str, Tensor]:
return self._strategy.get_optimizer_state(self.optimizer)
def load_state_dict(self, state_dict: Dict[str, Tensor]) -> None:
self.optimizer.load_state_dict(state_dict)
def step(self, closure: Optional[Callable] = None) -> Any:
kwargs = {"closure": closure} if closure is not None else {}
if hasattr(self._strategy, "model") and isinstance(self._strategy.model, Optimizable):
# only DeepSpeed defines this
optimizer = self._strategy.model
else:
optimizer = self.optimizer
output = self._strategy.optimizer_step(
optimizer,
**kwargs,
)
for callback in self._callbacks:
hook = getattr(callback, "on_after_optimizer_step", None)
if callable(hook):
hook(strategy=self._strategy, optimizer=optimizer)
return output
def __getattr__(self, item: Any) -> Any:
return getattr(self._optimizer, item)
class _FabricModule(_DeviceDtypeModuleMixin):
def __init__(
self, forward_module: nn.Module, strategy: Strategy, original_module: Optional[nn.Module] = None
) -> None:
"""The FabricModule is a thin wrapper around the :class:`torch.nn.Module` and handles precision / autocast
automatically for the forward pass.
The underlying wrapped module can be accessed via the property :attr:`module`.
Args:
forward_module: The module to wrap the ``forward`` method on.
strategy: Reference to the strategy for handling precision etc.
original_module: The original, unmodified module as passed into the
:meth:`lightning.fabric.fabric.Fabric.setup` method. This is needed when attribute lookup
on this wrapper should pass through to the original module.
"""
super().__init__()
self._forward_module = forward_module
self._original_module = original_module or forward_module
self._strategy = strategy
self._fabric_module_initialized = True
def module(self) -> nn.Module:
return self._original_module or self._forward_module
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Casts all inputs to the right precision and handles autocast for operations in the module forward method."""
precision = self._strategy.precision
args, kwargs = precision.convert_input((args, kwargs))
with precision.forward_context():
output = self._forward_module(*args, **kwargs)
output = precision.convert_output(output)
apply_to_collection(output, dtype=Tensor, function=self._register_backward_hook)
return output
def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ...
def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: ...
def state_dict(
self, destination: Optional[T_destination] = None, prefix: str = "", keep_vars: bool = False
) -> Optional[Dict[str, Any]]:
return self._original_module.state_dict(
destination=destination, # type: ignore[type-var]
prefix=prefix,
keep_vars=keep_vars,
)
def load_state_dict( # type: ignore[override]
self, state_dict: Mapping[str, Any], strict: bool = True, **kwargs: Any
) -> _IncompatibleKeys:
return self._original_module.load_state_dict(state_dict=state_dict, strict=strict, **kwargs)
def _redirection_through_forward(self, method_name: str) -> Callable:
assert method_name != "forward"
original_forward = self._original_module.forward
def wrapped_forward(*args: Any, **kwargs: Any) -> Any:
# Unpatch ourselves immediately before calling the method `method_name`
# because itself may want to call the real `forward`
self._original_module.forward = original_forward
# Call the actual method e.g. `.training_step(...)`
method = getattr(self._original_module, method_name)
return method(*args, **kwargs)
# We make the caller "unknowingly" send their arguments through the forward_module's `__call__`.
# We expect that the `forward_module` will eventually call `original_module.forward`, which we
# have patched to redirect back to `original_module.method_name()`.
def call_forward_module(*args: Any, **kwargs: Any) -> Any:
# Patch the original_module's forward, so we can redirect the arguments back to the real method
self._original_module.forward = wrapped_forward
return self.forward(*args, **kwargs)
return call_forward_module
def _wrap_method_with_module_call_tracker(self, method: Callable, name: str) -> Callable:
"""Tracks whether any submodule in ``self._original_module`` was called during the execution of ``method`` by
registering forward hooks on all submodules."""
module_called = False
def hook(*_: Any, **__: Any) -> None:
nonlocal module_called
module_called = True
def _wrapped_method(*args: Any, **kwargs: Any) -> Any:
handles = []
for module in self._original_module.modules():
handles.append(module.register_forward_hook(hook))
output = method(*args, **kwargs)
if module_called:
raise RuntimeError(
f"You are calling the method `{type(self._original_module).__name__}.{name}()` from outside the"
" model. This will bypass the wrapper from the strategy and result in incorrect behavior in"
" `.backward()`. You should pass your inputs through `forward()`.",
)
for handle in handles:
handle.remove()
return output
return _wrapped_method
def _register_backward_hook(self, tensor: Tensor) -> Tensor:
if not tensor.requires_grad:
return tensor
strategy_requires = is_overridden("backward", self._strategy, parent=Strategy)
precision_requires = any(
is_overridden(method, self._strategy.precision, parent=Precision)
for method in ("pre_backward", "backward", "post_backward")
)
hook = partial(_backward_hook, (strategy_requires or precision_requires))
tensor.register_hook(hook)
return tensor
def __getattr__(self, item: Any) -> Any:
if item in _LIGHTNING_MODULE_STEP_METHODS and self._forward_module != self._original_module:
# Special support for `LightningModule`, to prevent bypassing DDP's forward
return self._redirection_through_forward(item)
try:
# __getattr__ gets called as a last resort if the attribute does not exist
# call nn.Module's implementation first
return super().__getattr__(item)
except AttributeError:
# If the attribute is not available on the _FabricModule wrapper, redirect to the wrapped nn.Module
original_module = super().__getattr__("_original_module")
attr = getattr(original_module, item)
if inspect.ismethod(attr) and self._forward_module != self._original_module:
attr = self._wrap_method_with_module_call_tracker(attr, item)
return attr
def __setattr__(self, name: str, value: Any) -> None:
if not getattr(self, "_fabric_module_initialized", False):
super().__setattr__(name, value)
return
# Get the _original_module attribute
original_module = self._original_module
original_has_attr = hasattr(original_module, name)
# Can't use super().__getattr__ because nn.Module only checks _parameters, _buffers, and _modules
# Can't use self.__getattr__ because it would pass through to the original module
fabric_has_attr = name in self.__dict__
if not (original_has_attr or fabric_has_attr):
setattr(original_module, name, value)
return
# The original module can also inherit from _DeviceDtypeModuleMixin,
# in this case, both the Fabric module and original module have attributes like _dtype
# set attribute on both
if original_has_attr:
setattr(original_module, name, value)
if fabric_has_attr:
super().__setattr__(name, value)
class _FabricDataLoader:
def __init__(self, dataloader: DataLoader, device: Optional[torch.device] = None) -> None:
"""The FabricDataLoader is a wrapper for the :class:`~torch.utils.data.DataLoader`. It moves the data to the
device automatically if the device is specified.
Args:
dataloader: The dataloader to wrap
device: The device to which the data should be moved. By default the device is `None` and no data
transfers will be made (identical behavior as :class:`~torch.utils.data.DataLoader`).
"""
self.__dict__.update(dataloader.__dict__)
self._dataloader = dataloader
self._device = device
self._num_iter_calls = 0
def device(self) -> Optional[torch.device]:
return self._device
def __len__(self) -> int:
return len(self._dataloader)
def __iter__(self) -> Union[Iterator[Any], Generator[Any, None, None]]:
# Without setting the epoch, the distributed sampler would return the same indices every time, even when
# shuffling is enabled. In PyTorch, the user would normally have to call `.set_epoch()` on the sampler.
# In Fabric, we take care of this boilerplate code.
_set_sampler_epoch(self._dataloader, self._num_iter_calls)
self._num_iter_calls += 1
if self._device is None:
yield from iter(self._dataloader)
else:
for item in self._dataloader:
yield move_data_to_device(item, self._device)
def _unwrap_compiled(obj: Union[Any, "OptimizedModule"]) -> Tuple[Union[Any, nn.Module], Optional[Dict[str, Any]]]:
"""Removes the :class:`torch._dynamo.OptimizedModule` around the object if it is wrapped.
Use this function before instance checks against e.g. :class:`_FabricModule`.
"""
if not _TORCH_GREATER_EQUAL_2_0:
# obj can't be an `OptimizedModule` anyway
return obj, None
from torch._dynamo import OptimizedModule
if isinstance(obj, OptimizedModule):
if (compile_kwargs := getattr(obj, "_compile_kwargs", None)) is None:
raise RuntimeError(
"Failed to determine the arguments that were used to compile the module. Make sure to import"
" lightning before `torch.compile` is used."
)
return obj._orig_mod, compile_kwargs
return obj, None
if _TORCH_GREATER_EQUAL_2_0:
torch.compile = _capture_compile_kwargs(torch.compile)
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
def _unwrap_objects(collection: Any) -> Any:
def _unwrap(
obj: Union[_FabricModule, _FabricOptimizer, _FabricDataLoader],
) -> Union[nn.Module, Optimizer, DataLoader]:
if isinstance(unwrapped := _unwrap_compiled(obj)[0], _FabricModule):
return _unwrap_compiled(unwrapped._forward_module)[0]
if isinstance(obj, _FabricOptimizer):
return obj.optimizer
if isinstance(obj, _FabricDataLoader):
return obj._dataloader
return obj
types = [_FabricModule, _FabricOptimizer, _FabricDataLoader]
if _TORCH_GREATER_EQUAL_2_0:
from torch._dynamo import OptimizedModule
types.append(OptimizedModule)
return apply_to_collection(collection, dtype=tuple(types), function=_unwrap) | null |
155,458 | import inspect
from copy import deepcopy
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import torch
from lightning_utilities import is_overridden
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch import nn as nn
from torch.nn.modules.module import _IncompatibleKeys
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import Strategy
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.data import _set_sampler_epoch
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.types import Optimizable
if _TORCH_GREATER_EQUAL_2_0:
torch.compile = _capture_compile_kwargs(torch.compile)
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
def _to_compiled(module: nn.Module, compile_kwargs: Dict[str, Any]) -> "OptimizedModule":
if not _TORCH_GREATER_EQUAL_2_0:
raise RuntimeError("Converting to a compiled module is only supported in PyTorch >= 2.0.0")
return torch.compile(module, **compile_kwargs) # type: ignore[return-value] | null |
155,459 | import inspect
from copy import deepcopy
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import torch
from lightning_utilities import is_overridden
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch import nn as nn
from torch.nn.modules.module import _IncompatibleKeys
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import Strategy
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.data import _set_sampler_epoch
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.types import Optimizable
_in_fabric_backward: bool = False
def _backward_hook(requires_backward: bool, *_: Any) -> None:
if requires_backward and not _in_fabric_backward:
raise RuntimeError(
"The current strategy and precision selection requires you to call `fabric.backward(loss)`"
" instead of `loss.backward()`."
) | null |
155,460 | import inspect
from copy import deepcopy
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import torch
from lightning_utilities import is_overridden
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch import nn as nn
from torch.nn.modules.module import _IncompatibleKeys
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import Strategy
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.data import _set_sampler_epoch
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.types import Optimizable
class _FabricOptimizer:
def __init__(self, optimizer: Optimizer, strategy: Strategy, callbacks: Optional[List[Callable]] = None) -> None:
"""FabricOptimizer is a thin wrapper around the :class:`~torch.optim.Optimizer` that delegates the optimizer
step calls to the strategy.
The underlying wrapped optimizer object can be accessed via the property :attr:`optimizer`.
Args:
optimizer: The optimizer to wrap
strategy: Reference to the strategy for handling the optimizer step
"""
self._optimizer = optimizer
self._strategy = strategy
self._callbacks = callbacks or []
# imitate the class of the wrapped object to make isinstance checks work
self.__class__ = type("Fabric" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
def optimizer(self) -> Optimizer:
return self._optimizer
def state_dict(self) -> Dict[str, Tensor]:
return self._strategy.get_optimizer_state(self.optimizer)
def load_state_dict(self, state_dict: Dict[str, Tensor]) -> None:
self.optimizer.load_state_dict(state_dict)
def step(self, closure: Optional[Callable] = None) -> Any:
kwargs = {"closure": closure} if closure is not None else {}
if hasattr(self._strategy, "model") and isinstance(self._strategy.model, Optimizable):
# only DeepSpeed defines this
optimizer = self._strategy.model
else:
optimizer = self.optimizer
output = self._strategy.optimizer_step(
optimizer,
**kwargs,
)
for callback in self._callbacks:
hook = getattr(callback, "on_after_optimizer_step", None)
if callable(hook):
hook(strategy=self._strategy, optimizer=optimizer)
return output
def __getattr__(self, item: Any) -> Any:
return getattr(self._optimizer, item)
class _FabricModule(_DeviceDtypeModuleMixin):
def __init__(
self, forward_module: nn.Module, strategy: Strategy, original_module: Optional[nn.Module] = None
) -> None:
"""The FabricModule is a thin wrapper around the :class:`torch.nn.Module` and handles precision / autocast
automatically for the forward pass.
The underlying wrapped module can be accessed via the property :attr:`module`.
Args:
forward_module: The module to wrap the ``forward`` method on.
strategy: Reference to the strategy for handling precision etc.
original_module: The original, unmodified module as passed into the
:meth:`lightning.fabric.fabric.Fabric.setup` method. This is needed when attribute lookup
on this wrapper should pass through to the original module.
"""
super().__init__()
self._forward_module = forward_module
self._original_module = original_module or forward_module
self._strategy = strategy
self._fabric_module_initialized = True
def module(self) -> nn.Module:
return self._original_module or self._forward_module
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Casts all inputs to the right precision and handles autocast for operations in the module forward method."""
precision = self._strategy.precision
args, kwargs = precision.convert_input((args, kwargs))
with precision.forward_context():
output = self._forward_module(*args, **kwargs)
output = precision.convert_output(output)
apply_to_collection(output, dtype=Tensor, function=self._register_backward_hook)
return output
def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ...
def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: ...
def state_dict(
self, destination: Optional[T_destination] = None, prefix: str = "", keep_vars: bool = False
) -> Optional[Dict[str, Any]]:
return self._original_module.state_dict(
destination=destination, # type: ignore[type-var]
prefix=prefix,
keep_vars=keep_vars,
)
def load_state_dict( # type: ignore[override]
self, state_dict: Mapping[str, Any], strict: bool = True, **kwargs: Any
) -> _IncompatibleKeys:
return self._original_module.load_state_dict(state_dict=state_dict, strict=strict, **kwargs)
def _redirection_through_forward(self, method_name: str) -> Callable:
assert method_name != "forward"
original_forward = self._original_module.forward
def wrapped_forward(*args: Any, **kwargs: Any) -> Any:
# Unpatch ourselves immediately before calling the method `method_name`
# because itself may want to call the real `forward`
self._original_module.forward = original_forward
# Call the actual method e.g. `.training_step(...)`
method = getattr(self._original_module, method_name)
return method(*args, **kwargs)
# We make the caller "unknowingly" send their arguments through the forward_module's `__call__`.
# We expect that the `forward_module` will eventually call `original_module.forward`, which we
# have patched to redirect back to `original_module.method_name()`.
def call_forward_module(*args: Any, **kwargs: Any) -> Any:
# Patch the original_module's forward, so we can redirect the arguments back to the real method
self._original_module.forward = wrapped_forward
return self.forward(*args, **kwargs)
return call_forward_module
def _wrap_method_with_module_call_tracker(self, method: Callable, name: str) -> Callable:
"""Tracks whether any submodule in ``self._original_module`` was called during the execution of ``method`` by
registering forward hooks on all submodules."""
module_called = False
def hook(*_: Any, **__: Any) -> None:
nonlocal module_called
module_called = True
def _wrapped_method(*args: Any, **kwargs: Any) -> Any:
handles = []
for module in self._original_module.modules():
handles.append(module.register_forward_hook(hook))
output = method(*args, **kwargs)
if module_called:
raise RuntimeError(
f"You are calling the method `{type(self._original_module).__name__}.{name}()` from outside the"
" model. This will bypass the wrapper from the strategy and result in incorrect behavior in"
" `.backward()`. You should pass your inputs through `forward()`.",
)
for handle in handles:
handle.remove()
return output
return _wrapped_method
def _register_backward_hook(self, tensor: Tensor) -> Tensor:
if not tensor.requires_grad:
return tensor
strategy_requires = is_overridden("backward", self._strategy, parent=Strategy)
precision_requires = any(
is_overridden(method, self._strategy.precision, parent=Precision)
for method in ("pre_backward", "backward", "post_backward")
)
hook = partial(_backward_hook, (strategy_requires or precision_requires))
tensor.register_hook(hook)
return tensor
def __getattr__(self, item: Any) -> Any:
if item in _LIGHTNING_MODULE_STEP_METHODS and self._forward_module != self._original_module:
# Special support for `LightningModule`, to prevent bypassing DDP's forward
return self._redirection_through_forward(item)
try:
# __getattr__ gets called as a last resort if the attribute does not exist
# call nn.Module's implementation first
return super().__getattr__(item)
except AttributeError:
# If the attribute is not available on the _FabricModule wrapper, redirect to the wrapped nn.Module
original_module = super().__getattr__("_original_module")
attr = getattr(original_module, item)
if inspect.ismethod(attr) and self._forward_module != self._original_module:
attr = self._wrap_method_with_module_call_tracker(attr, item)
return attr
def __setattr__(self, name: str, value: Any) -> None:
if not getattr(self, "_fabric_module_initialized", False):
super().__setattr__(name, value)
return
# Get the _original_module attribute
original_module = self._original_module
original_has_attr = hasattr(original_module, name)
# Can't use super().__getattr__ because nn.Module only checks _parameters, _buffers, and _modules
# Can't use self.__getattr__ because it would pass through to the original module
fabric_has_attr = name in self.__dict__
if not (original_has_attr or fabric_has_attr):
setattr(original_module, name, value)
return
# The original module can also inherit from _DeviceDtypeModuleMixin,
# in this case, both the Fabric module and original module have attributes like _dtype
# set attribute on both
if original_has_attr:
setattr(original_module, name, value)
if fabric_has_attr:
super().__setattr__(name, value)
class _FabricDataLoader:
def __init__(self, dataloader: DataLoader, device: Optional[torch.device] = None) -> None:
"""The FabricDataLoader is a wrapper for the :class:`~torch.utils.data.DataLoader`. It moves the data to the
device automatically if the device is specified.
Args:
dataloader: The dataloader to wrap
device: The device to which the data should be moved. By default the device is `None` and no data
transfers will be made (identical behavior as :class:`~torch.utils.data.DataLoader`).
"""
self.__dict__.update(dataloader.__dict__)
self._dataloader = dataloader
self._device = device
self._num_iter_calls = 0
def device(self) -> Optional[torch.device]:
return self._device
def __len__(self) -> int:
return len(self._dataloader)
def __iter__(self) -> Union[Iterator[Any], Generator[Any, None, None]]:
# Without setting the epoch, the distributed sampler would return the same indices every time, even when
# shuffling is enabled. In PyTorch, the user would normally have to call `.set_epoch()` on the sampler.
# In Fabric, we take care of this boilerplate code.
_set_sampler_epoch(self._dataloader, self._num_iter_calls)
self._num_iter_calls += 1
if self._device is None:
yield from iter(self._dataloader)
else:
for item in self._dataloader:
yield move_data_to_device(item, self._device)
def _unwrap_compiled(obj: Union[Any, "OptimizedModule"]) -> Tuple[Union[Any, nn.Module], Optional[Dict[str, Any]]]:
"""Removes the :class:`torch._dynamo.OptimizedModule` around the object if it is wrapped.
Use this function before instance checks against e.g. :class:`_FabricModule`.
"""
if not _TORCH_GREATER_EQUAL_2_0:
# obj can't be an `OptimizedModule` anyway
return obj, None
from torch._dynamo import OptimizedModule
if isinstance(obj, OptimizedModule):
if (compile_kwargs := getattr(obj, "_compile_kwargs", None)) is None:
raise RuntimeError(
"Failed to determine the arguments that were used to compile the module. Make sure to import"
" lightning before `torch.compile` is used."
)
return obj._orig_mod, compile_kwargs
return obj, None
The provided code snippet includes necessary dependencies for implementing the `is_wrapped` function. Write a Python function `def is_wrapped(obj: object) -> bool` to solve the following problem:
Checks if an object was set up by Fabric. A :class:`~torch.nn.Module` may be wrapped by a :class:`_FabricModule`, a :class:`~torch.optim.Optimizer` may be wrapped by a :class:`_FabricOptimizer`, or a :class:`~torch.utils.data.DataLoader` may be wrapped by :class:`_FabricDataLoader`. Args: obj: The object to test.
Here is the function:
def is_wrapped(obj: object) -> bool:
"""Checks if an object was set up by Fabric.
A :class:`~torch.nn.Module` may be wrapped by a :class:`_FabricModule`, a :class:`~torch.optim.Optimizer`
may be wrapped by a :class:`_FabricOptimizer`, or a :class:`~torch.utils.data.DataLoader` may be wrapped by
:class:`_FabricDataLoader`.
Args:
obj: The object to test.
"""
obj, _ = _unwrap_compiled(obj)
return isinstance(obj, (_FabricModule, _FabricOptimizer, _FabricDataLoader)) | Checks if an object was set up by Fabric. A :class:`~torch.nn.Module` may be wrapped by a :class:`_FabricModule`, a :class:`~torch.optim.Optimizer` may be wrapped by a :class:`_FabricOptimizer`, or a :class:`~torch.utils.data.DataLoader` may be wrapped by :class:`_FabricDataLoader`. Args: obj: The object to test. |
155,461 | import inspect
from copy import deepcopy
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import torch
from lightning_utilities import is_overridden
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch import nn as nn
from torch.nn.modules.module import _IncompatibleKeys
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import Strategy
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.data import _set_sampler_epoch
from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.types import Optimizable
The provided code snippet includes necessary dependencies for implementing the `_capture_compile_kwargs` function. Write a Python function `def _capture_compile_kwargs(compile_fn: Callable) -> Callable` to solve the following problem:
Wraps the ``torch.compile`` function and captures the compile arguments. We extract the compile arguments so that we can reapply ``torch.compile`` in ``Fabric.setup()`` with the same arguments as the user passed to the original call. The arguments get stored in a dictionary ``_compile_kwargs`` on the returned compiled module.
Here is the function:
def _capture_compile_kwargs(compile_fn: Callable) -> Callable:
"""Wraps the ``torch.compile`` function and captures the compile arguments.
We extract the compile arguments so that we can reapply ``torch.compile`` in ``Fabric.setup()`` with the
same arguments as the user passed to the original call. The arguments get stored in a dictionary
``_compile_kwargs`` on the returned compiled module.
"""
# Limitation: Currently, the global compile config does not get captured on a per-model basis.
# PyTorch will resolve this in the future: https://github.com/pytorch/pytorch/issues/116575
@wraps(compile_fn)
def _capture(*args: Any, **kwargs: Any) -> Any:
if not args or not isinstance(args[0], nn.Module):
# either torch.compile is being applied as a decorator or we're compiling something else
return compile_fn(*args, **kwargs)
model = args[0]
compiled_model = compile_fn(model, **kwargs)
compiled_model._compile_kwargs = deepcopy(kwargs)
return compiled_model
return _capture | Wraps the ``torch.compile`` function and captures the compile arguments. We extract the compile arguments so that we can reapply ``torch.compile`` in ``Fabric.setup()`` with the same arguments as the user passed to the original call. The arguments get stored in a dictionary ``_compile_kwargs`` on the returned compiled module. |
155,462 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
STRATEGY_REGISTRY = _StrategyRegistry()
The provided code snippet includes necessary dependencies for implementing the `_get_supported_strategies` function. Write a Python function `def _get_supported_strategies() -> List[str]` to solve the following problem:
Returns strategy choices from the registry, with the ones removed that are incompatible to be launched from the CLI or ones that require further configuration by the user.
Here is the function:
def _get_supported_strategies() -> List[str]:
"""Returns strategy choices from the registry, with the ones removed that are incompatible to be launched from the
CLI or ones that require further configuration by the user."""
available_strategies = STRATEGY_REGISTRY.available_strategies()
excluded = r".*(spawn|fork|notebook|xla|tpu|offload).*"
return [strategy for strategy in available_strategies if not re.match(excluded, strategy)] | Returns strategy choices from the registry, with the ones removed that are incompatible to be launched from the CLI or ones that require further configuration by the user. |
155,463 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
_LIGHTNING_SDK_AVAILABLE = RequirementCache("lightning_sdk")
if _CLICK_AVAILABLE:
import click
def _main() -> None:
pass
"run",
context_settings={
"ignore_unknown_options": True,
},
)
"script",
type=click.Path(exists=True),
)
"--accelerator",
type=click.Choice(_SUPPORTED_ACCELERATORS),
default=None,
help="The hardware accelerator to run on.",
)
"--strategy",
type=click.Choice(_get_supported_strategies()),
default=None,
help="Strategy for how to run across multiple devices.",
)
"--devices",
type=str,
default="1",
help=(
"Number of devices to run on (``int``), which devices to run on (``list`` or ``str``), or ``'auto'``."
" The value applies per node."
),
)
"--num-nodes",
"--num_nodes",
type=int,
default=1,
help="Number of machines (nodes) for distributed execution.",
)
"--node-rank",
"--node_rank",
type=int,
default=0,
help=(
"The index of the machine (node) this command gets started on. Must be a number in the range"
" 0, ..., num_nodes - 1."
),
)
"--main-address",
"--main_address",
type=str,
default="127.0.0.1",
help="The hostname or IP address of the main machine (usually the one with node_rank = 0).",
)
"--main-port",
"--main_port",
type=int,
default=29400,
help="The main port to connect to the main machine.",
)
"--precision",
type=click.Choice(get_args(_PRECISION_INPUT_STR) + get_args(_PRECISION_INPUT_STR_ALIAS)),
default=None,
help=(
"Double precision (``64-true`` or ``64``), full precision (``32-true`` or ``64``), "
"half precision (``16-mixed`` or ``16``) or bfloat16 precision (``bf16-mixed`` or ``bf16``)"
),
)
)
"checkpoint_folder",
type=click.Path(exists=True),
)
"--output_file",
type=click.Path(exists=True),
default=None,
help=(
"Path to the file where the converted checkpoint should be saved. The file should not already exist."
" If no path is provided, the file will be saved next to the input checkpoint folder with the same name"
" and a '.consolidated' suffix."
),
)
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
The provided code snippet includes necessary dependencies for implementing the `_legacy_main` function. Write a Python function `def _legacy_main() -> None` to solve the following problem:
Legacy CLI handler for fabric. Raises deprecation warning and runs through fabric cli if necessary, else runs the entrypoint directly
Here is the function:
def _legacy_main() -> None:
"""Legacy CLI handler for fabric.
Raises deprecation warning and runs through fabric cli if necessary, else runs the entrypoint directly
"""
print(
"`lightning run model` is deprecated and will be removed in future versions."
" Please call `fabric run` instead."
)
args = sys.argv[1:]
if args and args[0] == "run" and args[1] == "model":
_main()
return
if _LIGHTNING_SDK_AVAILABLE:
subprocess.run([sys.executable, "-m", "lightning_sdk.cli.entrypoint"] + args)
return | Legacy CLI handler for fabric. Raises deprecation warning and runs through fabric cli if necessary, else runs the entrypoint directly |
155,464 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
def main(args: Namespace, script_args: Optional[List[str]] = None) -> None:
_set_env_variables(args)
_torchrun_launch(args, script_args or [])
The provided code snippet includes necessary dependencies for implementing the `_run` function. Write a Python function `def _run(**kwargs: Any) -> None` to solve the following problem:
Run a Lightning Fabric script. SCRIPT is the path to the Python script with the code to run. The script must contain a Fabric object. SCRIPT_ARGS are the remaining arguments that you can pass to the script itself and are expected to be parsed there.
Here is the function:
def _run(**kwargs: Any) -> None:
"""Run a Lightning Fabric script.
SCRIPT is the path to the Python script with the code to run. The script must contain a Fabric object.
SCRIPT_ARGS are the remaining arguments that you can pass to the script itself and are expected to be parsed
there.
"""
script_args = list(kwargs.pop("script_args", []))
main(args=Namespace(**kwargs), script_args=script_args) | Run a Lightning Fabric script. SCRIPT is the path to the Python script with the code to run. The script must contain a Fabric object. SCRIPT_ARGS are the remaining arguments that you can pass to the script itself and are expected to be parsed there. |
155,465 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
def _process_cli_args(args: Namespace) -> Namespace:
if not _TORCH_GREATER_EQUAL_2_3:
_log.error("Processing distributed checkpoints requires PyTorch >= 2.3.")
exit(1)
checkpoint_folder = Path(args.checkpoint_folder)
if not checkpoint_folder.exists():
_log.error(f"The provided checkpoint folder does not exist: {checkpoint_folder}")
exit(1)
if not checkpoint_folder.is_dir():
_log.error(
f"The provided checkpoint path must be a folder, containing the checkpoint shards: {checkpoint_folder}"
)
exit(1)
if not (checkpoint_folder / _METADATA_FILENAME).is_file():
_log.error(
"Only FSDP-sharded checkpoints saved with Lightning are supported for consolidation. The provided folder"
f" is not in that format: {checkpoint_folder}"
)
exit(1)
if args.output_file is None:
output_file = checkpoint_folder.with_suffix(checkpoint_folder.suffix + ".consolidated")
else:
output_file = Path(args.output_file)
if output_file.exists():
_log.error(
"The path for the converted checkpoint already exists. Choose a different path by providing"
f" `--output_file` or move/delete the file first: {output_file}"
)
exit(1)
return Namespace(checkpoint_folder=checkpoint_folder, output_file=output_file)
def _load_distributed_checkpoint(checkpoint_folder: Path) -> Dict[str, Any]:
"""Loads a sharded checkpoint saved with the `torch.distributed.checkpoint` into a full state dict.
The current implementation assumes that the entire checkpoint fits in CPU memory.
"""
if not _TORCH_GREATER_EQUAL_2_3:
raise ImportError("Processing distributed checkpoints requires PyTorch >= 2.3.")
from torch.distributed.checkpoint import FileSystemReader
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict
checkpoint: Dict[str, Any] = {}
_load_state_dict(
checkpoint,
storage_reader=FileSystemReader(checkpoint_folder),
planner=_EmptyStateDictLoadPlanner(),
no_dist=True,
)
# This is the extra file saved by Fabric, with user data separate from weights and optimizer states
extra_file = checkpoint_folder / _METADATA_FILENAME
extra = torch.load(extra_file, map_location="cpu") if extra_file.is_file() else {}
checkpoint.update(extra)
return checkpoint
The provided code snippet includes necessary dependencies for implementing the `_consolidate` function. Write a Python function `def _consolidate(checkpoint_folder: str, output_file: Optional[str]) -> None` to solve the following problem:
Convert a distributed/sharded checkpoint into a single file that can be loaded with `torch.load()`. Only supports FSDP sharded checkpoints at the moment.
Here is the function:
def _consolidate(checkpoint_folder: str, output_file: Optional[str]) -> None:
"""Convert a distributed/sharded checkpoint into a single file that can be loaded with `torch.load()`.
Only supports FSDP sharded checkpoints at the moment.
"""
args = Namespace(checkpoint_folder=checkpoint_folder, output_file=output_file)
config = _process_cli_args(args)
checkpoint = _load_distributed_checkpoint(config.checkpoint_folder)
torch.save(checkpoint, config.output_file) | Convert a distributed/sharded checkpoint into a single file that can be loaded with `torch.load()`. Only supports FSDP sharded checkpoints at the moment. |
155,466 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
import os
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__about__.py")):
from lightning.fabric.__about__ import * # noqa: F403
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__version__.py")):
from lightning.fabric.__version__ import version as __version__
elif package_available("lightning"):
from lightning import __version__ # noqa: F401
os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
if os.environ.get("POSSIBLE_USER_WARNINGS", "").lower() in ("0", "off"):
disable_possible_user_warnings()
The provided code snippet includes necessary dependencies for implementing the `_set_env_variables` function. Write a Python function `def _set_env_variables(args: Namespace) -> None` to solve the following problem:
Set the environment variables for the new processes. The Fabric connector will parse the arguments set here.
Here is the function:
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision) | Set the environment variables for the new processes. The Fabric connector will parse the arguments set here. |
155,467 | import logging
import os
import re
import subprocess
import sys
from argparse import Namespace
from typing import Any, List, Optional
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import get_args
from lightning.fabric.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator
from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS
from lightning.fabric.strategies import STRATEGY_REGISTRY
from lightning.fabric.utilities.consolidate_checkpoint import _process_cli_args
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
from lightning.fabric.utilities.distributed import _suggested_max_num_threads
from lightning.fabric.utilities.load import _load_distributed_checkpoint
def _set_env_variables(args: Namespace) -> None:
"""Set the environment variables for the new processes.
The Fabric connector will parse the arguments set here.
"""
os.environ["LT_CLI_USED"] = "1"
if args.accelerator is not None:
os.environ["LT_ACCELERATOR"] = str(args.accelerator)
if args.strategy is not None:
os.environ["LT_STRATEGY"] = str(args.strategy)
os.environ["LT_DEVICES"] = str(args.devices)
os.environ["LT_NUM_NODES"] = str(args.num_nodes)
if args.precision is not None:
os.environ["LT_PRECISION"] = str(args.precision)
def _get_num_processes(accelerator: str, devices: str) -> int:
"""Parse the `devices` argument to determine how many processes need to be launched on the current machine."""
if accelerator == "gpu":
parsed_devices = _parse_gpu_ids(devices, include_cuda=True, include_mps=True)
elif accelerator == "cuda":
parsed_devices = CUDAAccelerator.parse_devices(devices)
elif accelerator == "mps":
parsed_devices = MPSAccelerator.parse_devices(devices)
elif accelerator == "tpu":
raise ValueError("Launching processes for TPU through the CLI is not supported.")
else:
return CPUAccelerator.parse_devices(devices)
return len(parsed_devices) if parsed_devices is not None else 0
def main(args: Namespace, script_args: Optional[List[str]] = None) -> None:
_set_env_variables(args)
_torchrun_launch(args, script_args or [])
import os
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__about__.py")):
from lightning.fabric.__about__ import * # noqa: F403
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__version__.py")):
from lightning.fabric.__version__ import version as __version__
elif package_available("lightning"):
from lightning import __version__ # noqa: F401
os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
if os.environ.get("POSSIBLE_USER_WARNINGS", "").lower() in ("0", "off"):
disable_possible_user_warnings()
def _suggested_max_num_threads(num_processes: int = 1) -> int:
if num_processes < 1:
raise ValueError(f"`num_processes` should be >= 1, got {num_processes}.")
return max(1, _num_cpus_available() // num_processes)
The provided code snippet includes necessary dependencies for implementing the `_torchrun_launch` function. Write a Python function `def _torchrun_launch(args: Namespace, script_args: List[str]) -> None` to solve the following problem:
This will invoke `torchrun` programmatically to launch the given script in new processes.
Here is the function:
def _torchrun_launch(args: Namespace, script_args: List[str]) -> None:
"""This will invoke `torchrun` programmatically to launch the given script in new processes."""
import torch.distributed.run as torchrun
num_processes = 1 if args.strategy == "dp" else _get_num_processes(args.accelerator, args.devices)
torchrun_args = [
f"--nproc_per_node={num_processes}",
f"--nnodes={args.num_nodes}",
f"--node_rank={args.node_rank}",
f"--master_addr={args.main_address}",
f"--master_port={args.main_port}",
args.script,
]
torchrun_args.extend(script_args)
# set a good default number of threads for OMP to avoid warnings being emitted to the user
os.environ.setdefault("OMP_NUM_THREADS", str(_suggested_max_num_threads()))
torchrun.main(torchrun_args) | This will invoke `torchrun` programmatically to launch the given script in new processes. |
155,468 | import os
from collections import Counter
from typing import Any, Dict, List, Optional, Union, cast
import torch
from typing_extensions import get_args
from lightning.fabric.accelerators import ACCELERATOR_REGISTRY
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.cuda import CUDAAccelerator
from lightning.fabric.accelerators.mps import MPSAccelerator
from lightning.fabric.accelerators.xla import XLAAccelerator
from lightning.fabric.plugins import (
BitsandbytesPrecision,
CheckpointIO,
DeepSpeedPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
from lightning.fabric.plugins.environments import (
ClusterEnvironment,
LightningEnvironment,
LSFEnvironment,
MPIEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from lightning.fabric.plugins.precision.double import DoublePrecision
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.plugins.precision.precision import (
_PRECISION_INPUT,
_PRECISION_INPUT_INT,
_PRECISION_INPUT_STR,
_PRECISION_INPUT_STR_ALIAS,
_PRECISION_INPUT_STR_ALIAS_CONVERSION,
)
from lightning.fabric.strategies import (
STRATEGY_REGISTRY,
DeepSpeedStrategy,
ParallelStrategy,
SingleDeviceStrategy,
SingleDeviceXLAStrategy,
Strategy,
XLAFSDPStrategy,
XLAStrategy,
)
from lightning.fabric.strategies.ddp import _DDP_FORK_ALIASES
from lightning.fabric.strategies.fsdp import _FSDP_ALIASES, FSDPStrategy
from lightning.fabric.utilities import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.device_parser import _determine_root_gpu_device
from lightning.fabric.utilities.imports import _IS_INTERACTIVE
_PRECISION_INPUT_INT = Literal[64, 32, 16]
_PRECISION_INPUT_STR_ALIAS_CONVERSION = {"64": "64-true", "32": "32-true", "16": "16-mixed", "bf16": "bf16-mixed"}
_PRECISION_INPUT_STR_ALIAS = Literal["64", "32", "16", "bf16"]
_PRECISION_INPUT_STR = Literal[
"transformer-engine",
"transformer-engine-float16",
"16-true",
"16-mixed",
"bf16-true",
"bf16-mixed",
"32-true",
"64-true",
]
_PRECISION_INPUT = Union[_PRECISION_INPUT_INT, _PRECISION_INPUT_STR, _PRECISION_INPUT_STR_ALIAS]
def _convert_precision_to_unified_args(precision: Optional[_PRECISION_INPUT]) -> Optional[_PRECISION_INPUT_STR]:
if precision is None:
return None
supported_precision = (
get_args(_PRECISION_INPUT_STR) + get_args(_PRECISION_INPUT_INT) + get_args(_PRECISION_INPUT_STR_ALIAS)
)
if precision not in supported_precision:
raise ValueError(f"Precision {repr(precision)} is invalid. Allowed precision values: {supported_precision}")
precision = str(precision) # convert int flags to str here to enable the legacy-conversion below
if precision in get_args(_PRECISION_INPUT_STR_ALIAS):
if str(precision)[:2] not in ("32", "64"):
rank_zero_warn(
f"`precision={precision}` is supported for historical reasons but its usage is discouraged. "
f"Please set your precision to {_PRECISION_INPUT_STR_ALIAS_CONVERSION[precision]} instead!"
)
precision = _PRECISION_INPUT_STR_ALIAS_CONVERSION[precision]
return cast(_PRECISION_INPUT_STR, precision) | null |
155,469 | import os
from collections import Counter
from typing import Any, Dict, List, Optional, Union, cast
import torch
from typing_extensions import get_args
from lightning.fabric.accelerators import ACCELERATOR_REGISTRY
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.cuda import CUDAAccelerator
from lightning.fabric.accelerators.mps import MPSAccelerator
from lightning.fabric.accelerators.xla import XLAAccelerator
from lightning.fabric.plugins import (
BitsandbytesPrecision,
CheckpointIO,
DeepSpeedPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
from lightning.fabric.plugins.environments import (
ClusterEnvironment,
LightningEnvironment,
LSFEnvironment,
MPIEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from lightning.fabric.plugins.precision.double import DoublePrecision
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.plugins.precision.precision import (
_PRECISION_INPUT,
_PRECISION_INPUT_INT,
_PRECISION_INPUT_STR,
_PRECISION_INPUT_STR_ALIAS,
_PRECISION_INPUT_STR_ALIAS_CONVERSION,
)
from lightning.fabric.strategies import (
STRATEGY_REGISTRY,
DeepSpeedStrategy,
ParallelStrategy,
SingleDeviceStrategy,
SingleDeviceXLAStrategy,
Strategy,
XLAFSDPStrategy,
XLAStrategy,
)
from lightning.fabric.strategies.ddp import _DDP_FORK_ALIASES
from lightning.fabric.strategies.fsdp import _FSDP_ALIASES, FSDPStrategy
from lightning.fabric.utilities import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.device_parser import _determine_root_gpu_device
from lightning.fabric.utilities.imports import _IS_INTERACTIVE
import os
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__about__.py")):
from lightning.fabric.__about__ import * # noqa: F403
if os.path.isfile(os.path.join(os.path.dirname(__file__), "__version__.py")):
from lightning.fabric.__version__ import version as __version__
elif package_available("lightning"):
from lightning import __version__ # noqa: F401
os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
if os.environ.get("POSSIBLE_USER_WARNINGS", "").lower() in ("0", "off"):
disable_possible_user_warnings()
def _is_using_cli() -> bool:
return bool(int(os.environ.get("LT_CLI_USED", "0"))) | null |
155,470 | import inspect
import os
from contextlib import contextmanager, nullcontext
from functools import partial
from pathlib import Path
from typing import (
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
overload,
)
import torch
import torch.nn as nn
from lightning_utilities.core.apply_func import apply_to_collection
from lightning_utilities.core.overrides import is_overridden
from torch import Tensor
from torch.optim import Optimizer
from torch.utils.data import BatchSampler, DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import lightning.fabric
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.connector import _PLUGIN_INPUT, _PRECISION_INPUT, _Connector, _is_using_cli
from lightning.fabric.loggers import Logger
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import (
DataParallelStrategy,
DeepSpeedStrategy,
FSDPStrategy,
SingleDeviceStrategy,
Strategy,
XLAFSDPStrategy,
XLAStrategy,
)
from lightning.fabric.strategies.fsdp import _has_meta_device_parameters
from lightning.fabric.strategies.launchers import _MultiProcessingLauncher, _XLALauncher
from lightning.fabric.strategies.strategy import TBroadcast, _Sharded
from lightning.fabric.utilities import move_data_to_device
from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars, convert_to_tensors
from lightning.fabric.utilities.data import (
_auto_add_worker_init_fn,
_replace_dunder_methods,
_update_dataloader,
has_iterable_dataset,
)
from lightning.fabric.utilities.device_dtype_mixin import _update_properties
from lightning.fabric.utilities.distributed import DistributedSamplerWrapper, _InfiniteBarrier
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from lightning.fabric.utilities.registry import _load_external_callbacks
from lightning.fabric.utilities.seed import seed_everything
from lightning.fabric.utilities.types import ReduceOp
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.fabric.wrappers import (
_FabricDataLoader,
_FabricModule,
_FabricOptimizer,
_to_compiled,
_unwrap_compiled,
_unwrap_objects,
)
def _do_nothing(*_: Any) -> None:
pass | null |
155,471 | import logging
import os
import random
from random import getstate as python_get_rng_state
from random import setstate as python_set_rng_state
from typing import Any, Dict, Optional
import numpy as np
import torch
from lightning.fabric.utilities.rank_zero import _get_rank, rank_prefixed_message, rank_zero_only, rank_zero_warn
def seed_everything(seed: Optional[int] = None, workers: bool = False) -> int:
r"""Function that sets the seed for pseudo-random number generators in: torch, numpy, and Python's random module.
In addition, sets the following environment variables:
- ``PL_GLOBAL_SEED``: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
- ``PL_SEED_WORKERS``: (optional) is set to 1 if ``workers=True``.
Args:
seed: the integer value seed for global random state in Lightning.
If ``None``, it will read the seed from ``PL_GLOBAL_SEED`` env variable. If ``None`` and the
``PL_GLOBAL_SEED`` env variable is not set, then the seed defaults to 0.
workers: if set to ``True``, will properly configure all dataloaders passed to the
Trainer with a ``worker_init_fn``. If the user already provides such a function
for their dataloaders, setting this argument will have no influence. See also:
:func:`~lightning.fabric.utilities.seed.pl_worker_init_function`.
"""
if seed is None:
env_seed = os.environ.get("PL_GLOBAL_SEED")
if env_seed is None:
seed = 0
rank_zero_warn(f"No seed found, seed set to {seed}")
else:
try:
seed = int(env_seed)
except ValueError:
seed = 0
rank_zero_warn(f"Invalid seed found: {repr(env_seed)}, seed set to {seed}")
elif not isinstance(seed, int):
seed = int(seed)
if not (min_seed_value <= seed <= max_seed_value):
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
seed = 0
log.info(rank_prefixed_message(f"Seed set to {seed}", _get_rank()))
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
return seed
The provided code snippet includes necessary dependencies for implementing the `reset_seed` function. Write a Python function `def reset_seed() -> None` to solve the following problem:
r"""Reset the seed to the value that :func:`~lightning.fabric.utilities.seed.seed_everything` previously set. If :func:`~lightning.fabric.utilities.seed.seed_everything` is unused, this function will do nothing.
Here is the function:
def reset_seed() -> None:
r"""Reset the seed to the value that :func:`~lightning.fabric.utilities.seed.seed_everything` previously set.
If :func:`~lightning.fabric.utilities.seed.seed_everything` is unused, this function will do nothing.
"""
seed = os.environ.get("PL_GLOBAL_SEED", None)
if seed is None:
return
workers = os.environ.get("PL_SEED_WORKERS", "0")
seed_everything(int(seed), workers=bool(int(workers))) | r"""Reset the seed to the value that :func:`~lightning.fabric.utilities.seed.seed_everything` previously set. If :func:`~lightning.fabric.utilities.seed.seed_everything` is unused, this function will do nothing. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.