id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
155,472 | import itertools
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from torch.overrides import TorchFunctionMode
from typing_extensions import override
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from lightning.fabric.utilities.types import _DEVICE
def _materialize(module: torch.nn.Module, device: _DEVICE) -> None:
"""Materialize a module."""
if not _TORCH_GREATER_EQUAL_2_1:
raise RuntimeError("recurse=False requires torch 2.1")
module.to_empty(device=device, recurse=False)
if not hasattr(module, "reset_parameters"):
raise TypeError(
f"Materialization requires that the `{type(module).__name__}.reset_parameters` method is implemented."
" This method is used to initialize any children parameters or buffers in this module."
)
module.reset_parameters()
_DEVICE = Union[torch.device, str, int]
The provided code snippet includes necessary dependencies for implementing the `_materialize_meta_tensors` function. Write a Python function `def _materialize_meta_tensors(module: torch.nn.Module, device: _DEVICE) -> None` to solve the following problem:
Materialize all tensors in a given module.
Here is the function:
def _materialize_meta_tensors(module: torch.nn.Module, device: _DEVICE) -> None:
"""Materialize all tensors in a given module."""
for module in module.modules():
if any(t.is_meta for t in itertools.chain(module.parameters(recurse=False), module.buffers(recurse=False))):
_materialize(module, device) | Materialize all tensors in a given module. |
155,473 | import io
import logging
from pathlib import Path
from typing import IO, Any, Dict, Union
import fsspec
import fsspec.utils
import torch
from fsspec.core import url_to_fs
from fsspec.implementations.local import AbstractFileSystem
from lightning_utilities.core.imports import module_available
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
def get_filesystem(path: _PATH, **kwargs: Any) -> AbstractFileSystem:
fs, _ = url_to_fs(str(path), **kwargs)
return fs
_PATH = Union[str, Path]
_MAP_LOCATION_TYPE = Optional[
Union[_DEVICE, Callable[[UntypedStorage, str], Optional[UntypedStorage]], Dict[_DEVICE, _DEVICE]]
]
The provided code snippet includes necessary dependencies for implementing the `_load` function. Write a Python function `def _load( path_or_url: Union[IO, _PATH], map_location: _MAP_LOCATION_TYPE = None, ) -> Any` to solve the following problem:
Loads a checkpoint. Args: path_or_url: Path or URL of the checkpoint. map_location: a function, ``torch.device``, string or a dict specifying how to remap storage locations.
Here is the function:
def _load(
path_or_url: Union[IO, _PATH],
map_location: _MAP_LOCATION_TYPE = None,
) -> Any:
"""Loads a checkpoint.
Args:
path_or_url: Path or URL of the checkpoint.
map_location: a function, ``torch.device``, string or a dict specifying how to remap storage locations.
"""
if not isinstance(path_or_url, (str, Path)):
# any sort of BytesIO or similar
return torch.load(
path_or_url,
map_location=map_location, # type: ignore[arg-type] # upstream annotation is not correct
)
if str(path_or_url).startswith("http"):
return torch.hub.load_state_dict_from_url(
str(path_or_url),
map_location=map_location, # type: ignore[arg-type]
)
fs = get_filesystem(path_or_url)
with fs.open(path_or_url, "rb") as f:
return torch.load(f, map_location=map_location) # type: ignore[arg-type] | Loads a checkpoint. Args: path_or_url: Path or URL of the checkpoint. map_location: a function, ``torch.device``, string or a dict specifying how to remap storage locations. |
155,474 | import io
import logging
from pathlib import Path
from typing import IO, Any, Dict, Union
import fsspec
import fsspec.utils
import torch
from fsspec.core import url_to_fs
from fsspec.implementations.local import AbstractFileSystem
from lightning_utilities.core.imports import module_available
from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH
log = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_atomic_save` function. Write a Python function `def _atomic_save(checkpoint: Dict[str, Any], filepath: Union[str, Path]) -> None` to solve the following problem:
Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints. Args: checkpoint: The object to save. Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save`` accepts. filepath: The path to which the checkpoint will be saved. This points to the file that the checkpoint will be stored in.
Here is the function:
def _atomic_save(checkpoint: Dict[str, Any], filepath: Union[str, Path]) -> None:
"""Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
Args:
checkpoint: The object to save.
Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
accepts.
filepath: The path to which the checkpoint will be saved.
This points to the file that the checkpoint will be stored in.
"""
bytesbuffer = io.BytesIO()
log.debug(f"Saving checkpoint: {filepath}")
torch.save(checkpoint, bytesbuffer)
with fsspec.open(filepath, "wb") as f:
f.write(bytesbuffer.getvalue()) | Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints. Args: checkpoint: The object to save. Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save`` accepts. filepath: The path to which the checkpoint will be saved. This points to the file that the checkpoint will be stored in. |
155,475 | import logging
from argparse import ArgumentParser, Namespace
from pathlib import Path
import torch
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_3
from lightning.fabric.utilities.load import _METADATA_FILENAME, _load_distributed_checkpoint
def _parse_cli_args() -> Namespace:
parser = ArgumentParser(
description=(
"Converts a distributed/sharded checkpoint into a single file that can be loaded with `torch.load()`."
" Only supports FSDP sharded checkpoints at the moment."
),
)
parser.add_argument(
"checkpoint_folder",
type=str,
help=(
"Path to a checkpoint folder, containing the sharded checkpoint files saved using the"
" `torch.distributed.checkpoint` API."
),
)
parser.add_argument(
"--output_file",
type=str,
help=(
"Path to the file where the converted checkpoint should be saved. The file should not already exist."
" If no path is provided, the file will be saved next to the input checkpoint folder with the same name"
" and a '.consolidated' suffix."
),
)
return parser.parse_args() | null |
155,476 | from typing import Any, List, Optional, Union
import torch
from torch.nn import Module
from typing_extensions import Self, override
class _DeviceDtypeModuleMixin(Module):
__jit_unused_properties__: List[str] = ["device", "dtype"]
def __init__(self) -> None:
super().__init__()
self._dtype: Union[str, torch.dtype] = torch.get_default_dtype()
self._device = torch.device("cpu")
def dtype(self) -> Union[str, torch.dtype]:
return self._dtype
def dtype(self, new_dtype: Union[str, torch.dtype]) -> None:
# necessary to avoid infinite recursion
raise RuntimeError("Cannot set the dtype explicitly. Please use module.to(new_dtype).")
def device(self) -> torch.device:
device = self._device
# make this more explicit to always include the index
if device.type == "cuda" and device.index is None:
return torch.device(f"cuda:{torch.cuda.current_device()}")
return device
def to(self, *args: Any, **kwargs: Any) -> Self:
"""See :meth:`torch.nn.Module.to`."""
# this converts `str` device to `torch.device`
device, dtype = torch._C._nn._parse_to(*args, **kwargs)[:2]
_update_properties(self, device=device, dtype=dtype)
return super().to(*args, **kwargs)
def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self:
"""Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers
different objects. So it should be called before constructing optimizer if the module will live on GPU while
being optimized.
Arguments:
device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device
index will be used.
Returns:
Module: self
"""
if device is None:
device = torch.device("cuda", torch.cuda.current_device())
elif isinstance(device, int):
device = torch.device("cuda", index=device)
_update_properties(self, device=device)
return super().cuda(device=device)
def cpu(self) -> Self:
"""See :meth:`torch.nn.Module.cpu`."""
_update_properties(self, device=torch.device("cpu"))
return super().cpu()
def type(self, dst_type: Union[str, torch.dtype]) -> Self:
"""See :meth:`torch.nn.Module.type`."""
_update_properties(self, dtype=dst_type)
return super().type(dst_type=dst_type)
def float(self) -> Self:
"""See :meth:`torch.nn.Module.float`."""
_update_properties(self, dtype=torch.float)
return super().float()
def double(self) -> Self:
"""See :meth:`torch.nn.Module.double`."""
_update_properties(self, dtype=torch.double)
return super().double()
def half(self) -> Self:
"""See :meth:`torch.nn.Module.half`."""
_update_properties(self, dtype=torch.half)
return super().half()
def _update_properties(
root: torch.nn.Module, device: Optional[torch.device] = None, dtype: Optional[Union[str, torch.dtype]] = None
) -> None:
def apply_fn(module: Union[_DeviceDtypeModuleMixin, Module]) -> None:
if not isinstance(module, _DeviceDtypeModuleMixin):
return
# cannot use `module.to()` because we don't actually want to move the model in case there are multiple
# devices types (such as partial meta parameters)
if device is not None:
module._device = device
if dtype is not None:
module._dtype = dtype
root.apply(apply_fn) | null |
155,477 | import warnings
from pathlib import Path
from typing import Optional, Type, Union
from lightning.fabric.utilities.rank_zero import LightningDeprecationWarning
warnings.simplefilter("default", category=LightningDeprecationWarning)
warnings.formatwarning = _custom_format_warning
class PossibleUserWarning(UserWarning):
"""Warnings that could be false positives."""
The provided code snippet includes necessary dependencies for implementing the `disable_possible_user_warnings` function. Write a Python function `def disable_possible_user_warnings(module: str = "") -> None` to solve the following problem:
Ignore warnings of the category ``PossibleUserWarning`` from Lightning. For more granular control over which warnings to ignore, use :func:`warnings.filterwarnings` directly. Args: module: Name of the module for which the warnings should be ignored (e.g., ``'lightning.pytorch.strategies'``). Default: Disables warnings from all modules.
Here is the function:
def disable_possible_user_warnings(module: str = "") -> None:
"""Ignore warnings of the category ``PossibleUserWarning`` from Lightning.
For more granular control over which warnings to ignore, use :func:`warnings.filterwarnings` directly.
Args:
module: Name of the module for which the warnings should be ignored (e.g., ``'lightning.pytorch.strategies'``).
Default: Disables warnings from all modules.
"""
warnings.filterwarnings("ignore", module=module, category=PossibleUserWarning) | Ignore warnings of the category ``PossibleUserWarning`` from Lightning. For more granular control over which warnings to ignore, use :func:`warnings.filterwarnings` directly. Args: module: Name of the module for which the warnings should be ignored (e.g., ``'lightning.pytorch.strategies'``). Default: Disables warnings from all modules. |
155,478 | import warnings
from pathlib import Path
from typing import Optional, Type, Union
from lightning.fabric.utilities.rank_zero import LightningDeprecationWarning
_default_format_warning = warnings.formatwarning
def _is_path_in_lightning(path: Path) -> bool:
"""Naive check whether the path looks like a path from the lightning package."""
return "lightning" in str(path.absolute())
The provided code snippet includes necessary dependencies for implementing the `_custom_format_warning` function. Write a Python function `def _custom_format_warning( message: Union[Warning, str], category: Type[Warning], filename: str, lineno: int, line: Optional[str] = None ) -> str` to solve the following problem:
Custom formatting that avoids an extra line in case warnings are emitted from the `rank_zero`-functions.
Here is the function:
def _custom_format_warning(
message: Union[Warning, str], category: Type[Warning], filename: str, lineno: int, line: Optional[str] = None
) -> str:
"""Custom formatting that avoids an extra line in case warnings are emitted from the `rank_zero`-functions."""
if _is_path_in_lightning(Path(filename)):
# The warning originates from the Lightning package
return f"{filename}:{lineno}: {message}\n"
return _default_format_warning(message, category, filename, lineno, line) | Custom formatting that avoids an extra line in case warnings are emitted from the `rank_zero`-functions. |
155,479 | import logging
from inspect import getmembers, isclass
from types import ModuleType
from typing import Any, List, Type, Union
from lightning_utilities import is_overridden
from lightning.fabric.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0, _PYTHON_GREATER_EQUAL_3_10_0
_log = logging.getLogger(__name__)
_PYTHON_GREATER_EQUAL_3_8_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 8)
_PYTHON_GREATER_EQUAL_3_10_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 10)
The provided code snippet includes necessary dependencies for implementing the `_load_external_callbacks` function. Write a Python function `def _load_external_callbacks(group: str) -> List[Any]` to solve the following problem:
Collect external callbacks registered through entry points. The entry points are expected to be functions returning a list of callbacks. Args: group: The entry point group name to load callbacks from. Return: A list of all callbacks collected from external factories.
Here is the function:
def _load_external_callbacks(group: str) -> List[Any]:
"""Collect external callbacks registered through entry points.
The entry points are expected to be functions returning a list of callbacks.
Args:
group: The entry point group name to load callbacks from.
Return:
A list of all callbacks collected from external factories.
"""
if _PYTHON_GREATER_EQUAL_3_8_0:
from importlib.metadata import entry_points
factories = (
entry_points(group=group) if _PYTHON_GREATER_EQUAL_3_10_0 else entry_points().get(group, {}) # type: ignore[arg-type]
)
else:
from pkg_resources import iter_entry_points
factories = iter_entry_points(group) # type: ignore[assignment]
external_callbacks: List[Any] = []
for factory in factories:
callback_factory = factory.load()
callbacks_list: Union[List[Any], Any] = callback_factory()
callbacks_list = [callbacks_list] if not isinstance(callbacks_list, list) else callbacks_list
if callbacks_list:
_log.info(
f"Adding {len(callbacks_list)} callbacks from entry point '{factory.name}':"
f" {', '.join(type(cb).__name__ for cb in callbacks_list)}"
)
external_callbacks.extend(callbacks_list)
return external_callbacks | Collect external callbacks registered through entry points. The entry points are expected to be functions returning a list of callbacks. Args: group: The entry point group name to load callbacks from. Return: A list of all callbacks collected from external factories. |
155,480 | from typing import List, MutableSequence, Optional, Tuple, Union
import torch
from lightning.fabric.utilities.exceptions import MisconfigurationException
from lightning.fabric.utilities.types import _DEVICE
_DEVICE = Union[torch.device, str, int]
The provided code snippet includes necessary dependencies for implementing the `_determine_root_gpu_device` function. Write a Python function `def _determine_root_gpu_device(gpus: List[_DEVICE]) -> Optional[_DEVICE]` to solve the following problem:
Args: gpus: Non-empty list of ints representing which GPUs to use Returns: Designated root GPU device id Raises: TypeError: If ``gpus`` is not a list AssertionError: If GPU list is empty
Here is the function:
def _determine_root_gpu_device(gpus: List[_DEVICE]) -> Optional[_DEVICE]:
"""
Args:
gpus: Non-empty list of ints representing which GPUs to use
Returns:
Designated root GPU device id
Raises:
TypeError:
If ``gpus`` is not a list
AssertionError:
If GPU list is empty
"""
if gpus is None:
return None
if not isinstance(gpus, list):
raise TypeError("GPUs should be a list")
assert len(gpus) > 0, "GPUs should be a non-empty list"
# set root gpu
return gpus[0] | Args: gpus: Non-empty list of ints representing which GPUs to use Returns: Designated root GPU device id Raises: TypeError: If ``gpus`` is not a list AssertionError: If GPU list is empty |
155,481 | import functools
import inspect
import os
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from typing import Any, Callable, Dict, Generator, Iterable, Optional, Sized, Tuple, Type, Union
from lightning_utilities.core.inheritance import get_all_subclasses
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, Sampler
from typing_extensions import TypeGuard
from lightning.fabric.utilities.enums import LightningEnum
from lightning.fabric.utilities.exceptions import MisconfigurationException
from lightning.fabric.utilities.rank_zero import rank_zero_warn
from lightning.fabric.utilities.seed import pl_worker_init_function
def has_iterable_dataset(dataloader: object) -> bool:
return hasattr(dataloader, "dataset") and isinstance(dataloader.dataset, IterableDataset)
def sized_len(dataloader: object) -> Optional[int]:
"""Try to get the length of an object, return ``None`` otherwise."""
try:
# try getting the length
length = len(dataloader) # type: ignore [arg-type]
except (TypeError, NotImplementedError):
length = None
return length
The provided code snippet includes necessary dependencies for implementing the `has_len` function. Write a Python function `def has_len(dataloader: object) -> TypeGuard[Sized]` to solve the following problem:
Checks if a given object has ``__len__`` method implemented.
Here is the function:
def has_len(dataloader: object) -> TypeGuard[Sized]:
"""Checks if a given object has ``__len__`` method implemented."""
length = sized_len(dataloader)
if length == 0:
rank_zero_warn(
f"`{dataloader.__class__.__name__}` returned 0 length. Please make sure this was your intention."
)
if length is not None and has_iterable_dataset(dataloader):
rank_zero_warn(
"Your `IterableDataset` has `__len__` defined."
" In combination with multi-process data loading (when num_workers > 1),"
" `__len__` could be inaccurate if each worker is not configured independently"
" to avoid having duplicate data."
)
return length is not None | Checks if a given object has ``__len__`` method implemented. |
155,482 | from collections import deque
from typing import TYPE_CHECKING, Any, Callable, Deque, Dict, List, Optional, TypeVar, Union
import torch
from typing_extensions import override
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from lightning.fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
_TORCH_GREATER_EQUAL_2_1 = compare_version("torch", operator.ge, "2.1.0")
The provided code snippet includes necessary dependencies for implementing the `measure_flops` function. Write a Python function `def measure_flops( model: torch.nn.Module, forward_fn: Callable[[], torch.Tensor], loss_fn: Optional[Callable[[torch.Tensor], torch.Tensor]] = None, ) -> int` to solve the following problem:
Utility to compute the total number of FLOPs used by a module during training or during inference. It's recommended to create a meta-device model for this: Example:: with torch.device("meta"): model = MyModel() x = torch.randn(2, 32) model_fwd = lambda: model(x) fwd_flops = measure_flops(model, model_fwd) model_loss = lambda y: y.sum() fwd_and_bwd_flops = measure_flops(model, model_fwd, model_loss) Args: model: The model whose FLOPs should be measured. forward_fn: A function that runs ``forward`` on the model and returns the result. loss_fn: A function that computes the loss given the ``forward_fn`` output. If provided, the loss and `backward` FLOPs will be included in the result.
Here is the function:
def measure_flops(
model: torch.nn.Module,
forward_fn: Callable[[], torch.Tensor],
loss_fn: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
) -> int:
"""Utility to compute the total number of FLOPs used by a module during training or during inference.
It's recommended to create a meta-device model for this:
Example::
with torch.device("meta"):
model = MyModel()
x = torch.randn(2, 32)
model_fwd = lambda: model(x)
fwd_flops = measure_flops(model, model_fwd)
model_loss = lambda y: y.sum()
fwd_and_bwd_flops = measure_flops(model, model_fwd, model_loss)
Args:
model: The model whose FLOPs should be measured.
forward_fn: A function that runs ``forward`` on the model and returns the result.
loss_fn: A function that computes the loss given the ``forward_fn`` output. If provided, the loss and `backward`
FLOPs will be included in the result.
"""
if not _TORCH_GREATER_EQUAL_2_1:
raise ImportError("`measure_flops` requires PyTorch >= 2.1.")
from torch.utils.flop_counter import FlopCounterMode
flop_counter = FlopCounterMode(model, display=False)
with flop_counter:
if loss_fn is None:
forward_fn()
else:
loss_fn(forward_fn()).backward()
return flop_counter.get_total_flops() | Utility to compute the total number of FLOPs used by a module during training or during inference. It's recommended to create a meta-device model for this: Example:: with torch.device("meta"): model = MyModel() x = torch.randn(2, 32) model_fwd = lambda: model(x) fwd_flops = measure_flops(model, model_fwd) model_loss = lambda y: y.sum() fwd_and_bwd_flops = measure_flops(model, model_fwd, model_loss) Args: model: The model whose FLOPs should be measured. forward_fn: A function that runs ``forward`` on the model and returns the result. loss_fn: A function that computes the loss given the ``forward_fn`` output. If provided, the loss and `backward` FLOPs will be included in the result. |
155,483 | from collections import deque
from typing import TYPE_CHECKING, Any, Callable, Deque, Dict, List, Optional, TypeVar, Union
import torch
from typing_extensions import override
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from lightning.fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
_CUDA_FLOPS: Dict[str, Dict[Union[str, torch.dtype], float]] = {
# Hopper
# source: https://resources.nvidia.com/en-us-tensor-core
"h100 nvl": {
torch.float64: 67e12,
torch.float32: 133.8e12,
"tfloat32": 989.4e12,
torch.bfloat16: 1978.8e12,
torch.float16: 1978.8e12,
torch.int8: 3957.8e12,
},
"h100 sxm": {
torch.float64: 33.5e12,
torch.float32: 66.9e12,
"tfloat32": 494.7e12,
torch.bfloat16: 989.4e12,
torch.float16: 989.4e12,
torch.int8: 1978.9e12,
},
"h100 pcie": {
torch.float64: 25.6e12,
torch.float32: 51.2e12,
"tfloat32": 378e12,
torch.bfloat16: 756e12,
torch.float16: 756e12,
torch.int8: 1513e12,
},
# Ada
# source: https://images.nvidia.com/aem-dam/Solutions/Data-Center/l4/nvidia-ada-gpu-architecture-whitepaper-v2.1.pdf
"rtx 4090": {
torch.float32: 82.6e12,
"tfloat32": 82.6e12,
torch.bfloat16: 82.6e12,
torch.float16: 82.6e12,
torch.int8: 660.6e12,
"int4": 1321.2e12,
},
"rtx 4080": {
torch.float32: 48.7e12,
"tfloat32": 48.7e12,
torch.bfloat16: 48.7e12,
torch.float16: 48.7e12,
torch.int8: 389.9e12,
"int4": 779.8e12,
},
"l4": {
torch.float32: 30.3e12,
"tfloat32": 60e12,
torch.bfloat16: 121e12,
torch.float16: 121e12,
torch.int8: 242e12,
"int4": 484e12,
},
"l40": {
torch.float32: 90.5e12,
"tfloat32": 90.5e12,
torch.bfloat16: 181e12,
torch.float16: 181e12,
torch.int8: 362e12,
"int4": 724e12,
},
# Ampere
# source: https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf
# sxm and pcie have same flop counts
"a100": {
torch.float64: 9.7e12,
torch.float32: 19.5e12,
"tfloat32": 156e12,
torch.bfloat16: 312e12,
torch.float16: 312e12,
torch.int8: 624e12,
},
"a6000": {
torch.float32: 38.7e12,
"tfloat32": 77.4e12,
torch.bfloat16: 38.7e12,
torch.float16: 38.7e12,
torch.int8: 309.7e12,
"int4": 619.3e12,
},
"a40": {
torch.float32: 37.4e12,
"tfloat32": 74.8e12,
torch.bfloat16: 37.4e12,
torch.float16: 37.4e12,
torch.int8: 299.3e12,
"int4": 598.7e12,
},
# source: https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a10/pdf/a10-datasheet.pdf
"a10g": {
torch.float32: 31.2e12,
"tfloat32": 62.5e12,
torch.bfloat16: 125e12,
torch.float16: 125e12,
torch.int8: 250e12,
"int4": 500e12,
},
"rtx 3090 ti": {
torch.float32: 40e12,
"tfloat32": 40e12,
torch.bfloat16: 40e12,
torch.float16: 40e12,
torch.int8: 320e12,
"int4": 640e12,
},
"rtx 3090": {
torch.float32: 35.6e12,
"tfloat32": 35.6e12,
torch.bfloat16: 35.6e12,
torch.float16: 35.6e12,
torch.int8: 284e12,
"int4": 568e12,
},
"rtx 3080 ti": {
torch.float32: 34.1e12,
"tfloat32": 34.1e12,
torch.bfloat16: 34.1e12,
torch.float16: 34.1e12,
torch.int8: 272.8e12,
"int4": 546.6e12,
},
"rtx 3080": {
torch.float32: 29.8e12,
"tfloat32": 29.8e12,
torch.bfloat16: 29.8e12,
torch.float16: 29.8e12,
torch.int8: 238e12,
"int4": 476e12,
},
"rtx 3070": {
torch.float32: 20.3e12,
"tfloat32": 20.3e12,
torch.bfloat16: 20.3e12,
torch.float16: 20.3e12,
torch.int8: 162.6e12,
"int4": 325.2e12,
},
# Turing
# source: https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/tesla-t4/t4-tensor-core-datasheet-951643.pdf
# sxm and pcie have same flop counts
"t4": {
torch.float32: 8.1e12,
torch.float16: 65e12,
torch.int8: 130e12,
"int4": 260e12,
},
# https://www.nvidia.com/content/dam/en-zz/Solutions/design-visualization/quadro-product-literature/quadro-rtx-5000-data-sheet-us-nvidia-704120-r4-web.pdf
"quadro rtx 5000": {
torch.float32: 11.2e12,
torch.float16: 89.2e12,
},
"rtx 2080 super": {
torch.float32: 11.2e12,
torch.float16: 22.3e12,
torch.int8: 178.4e12,
"int4": 356.8e12,
},
"rtx 2080 ti": {
torch.float32: 14.2e12,
torch.float16: 28.5e12,
torch.int8: 227.7e12,
"int4": 455.4e12,
},
"rtx 2080": {
torch.float32: 10.6e12,
torch.float16: 21.2e12,
torch.int8: 169.6e12,
"int4": 339.1e12,
},
# https://www.nvidia.com/content/PDF/nvidia-ampere-ga-102-gpu-architecture-whitepaper-v2.pdf
"rtx 2070 super": {
torch.float32: 9.1e12,
torch.float16: 18.1e12,
torch.int8: 145e12,
"int4": 290e12,
},
"titan rtx": {
torch.float32: 16.3e12,
torch.float16: 32.6e12,
torch.int8: 261e12,
"int4": 522e12,
},
# Volta
# source: https://images.nvidia.com/content/technologies/volta/pdf/volta-v100-datasheet-update-us-1165301-r5.pdf
"v100 sxm": {
torch.float64: 7.8e12,
torch.float32: 15.7e12,
torch.float16: 125e12,
},
"v100 pcie": {
torch.float64: 7e12,
torch.float32: 14e12,
torch.float16: 112e12,
},
"v100s pcie": {
torch.float64: 8.2e12,
torch.float32: 16.4e12,
torch.float16: 130e12,
},
}
_TPU_FLOPS = {
# flop count for each TPU generation is the same for all precisions
# since bfloat16 precision is always used for performing matrix operations
# for more info: https://cloud.google.com/tpu/docs/bfloat16#choosing_bfloat16
# source: https://arxiv.org/pdf/1907.10701.pdf
"v2": 45e12,
# source: https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#tpu_v3
"v3": 123e12,
# source: https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#tpu_v4
"v4": 275e12,
# source: https://cloud.google.com/tpu/docs/v5e-training
"v5litepod": 197e12,
}
def _is_ampere_or_later(device: Optional[torch.device] = None) -> bool:
major, _ = torch.cuda.get_device_capability(device)
return major >= 8 # Ampere and later leverage tensor cores, where this setting becomes useful
_XLA_GREATER_EQUAL_2_1 = RequirementCache("torch_xla>=2.1")
The provided code snippet includes necessary dependencies for implementing the `get_available_flops` function. Write a Python function `def get_available_flops(device: torch.device, dtype: Union[torch.dtype, str]) -> Optional[int]` to solve the following problem:
Returns the available theoretical FLOPs. This is an optimistic upper limit that could only be achievable if only thick matmuls were run in a benchmark environment.
Here is the function:
def get_available_flops(device: torch.device, dtype: Union[torch.dtype, str]) -> Optional[int]:
"""Returns the available theoretical FLOPs.
This is an optimistic upper limit that could only be achievable if only thick matmuls were run in a benchmark
environment.
"""
if device.type == "cuda":
device_name = torch.cuda.get_device_name(device)
chip = device_name.lower()
if "h100" in chip:
if "hbm3" in chip:
chip = "h100 sxm"
elif "nvl" in chip:
chip = "h100 nvl"
elif "pcie" in chip or "hbm2e" in chip:
chip = "h100 pcie"
elif "l4" in chip:
chip = "l40" if "tesla" in chip else "l4"
elif "geforce rtx" in chip:
number = chip.split(" ")[3]
extra = ""
if "super" in chip:
extra = " super"
elif "ti" in chip:
extra = " ti"
chip = f"rtx {number}{extra}"
elif "a6000" in chip:
chip = "a6000"
elif "a100" in chip:
chip = "a100"
elif "a40" in chip:
chip = "a40"
elif "a10g" in chip:
chip = "a10g"
elif "t4" in chip:
chip = "t4"
elif "quadro rtx 5000" in chip:
chip = "quadro rtx 5000"
elif "titan rtx" in chip:
chip = "titan rtx"
elif "v100-sxm" in chip:
chip = "v100 sxm"
elif "v100-pcie" in chip:
chip = "v100 pcie"
elif "v100s-pcie" in chip:
chip = "v100s pcie"
else:
# the flops list is not exhaustive, return with a warning
rank_zero_warn(f"FLOPs not found for {device_name!r}")
return None
if chip not in _CUDA_FLOPS:
# parsing is implemented but we don't have the stats
rank_zero_warn(f"FLOPs not found for {device_name!r}, chip is {chip!r}")
return None
dtype_to_flops = _CUDA_FLOPS[chip]
if dtype is torch.float32:
from lightning.fabric.accelerators.cuda import _is_ampere_or_later
if _is_ampere_or_later() and torch.get_float32_matmul_precision() != "highest":
dtype = "tfloat32"
if dtype not in dtype_to_flops:
# for example, T4 doesn't support bfloat16. it might also be that we are missing this dtype from the list
rank_zero_warn(f"{device_name!r} does not support {dtype}")
return None
return int(dtype_to_flops[dtype])
if device.type == "xla":
from lightning.fabric.accelerators.xla import _XLA_GREATER_EQUAL_2_1
if _XLA_GREATER_EQUAL_2_1:
from torch_xla._internal import tpu
else:
from torch_xla.experimental import tpu
tpu_env = tpu.get_tpu_env()
# not all TPU generations define the "TYPE" envar. example: TYPE="V4", ACCELERATOR_TYPE="v4-8"
device_name = tpu_env.get("TYPE") or tpu_env["ACCELERATOR_TYPE"].split("-")[0]
chip = device_name.lower()
assert isinstance(device_name, str)
if chip not in _TPU_FLOPS:
rank_zero_warn(f"FLOPs not found for TPU {device_name!r} with {dtype}")
return None
return int(_TPU_FLOPS[chip]) | Returns the available theoretical FLOPs. This is an optimistic upper limit that could only be achievable if only thick matmuls were run in a benchmark environment. |
155,484 | from collections import deque
from typing import TYPE_CHECKING, Any, Callable, Deque, Dict, List, Optional, TypeVar, Union
import torch
from typing_extensions import override
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from lightning.fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
def _plugin_to_compute_dtype(plugin: "Precision") -> torch.dtype:
# TODO: integrate this into the precision plugins
from lightning.fabric.plugins import (
BitsandbytesPrecision,
DeepSpeedPrecision,
DoublePrecision,
FSDPPrecision,
HalfPrecision,
MixedPrecision,
Precision,
TransformerEnginePrecision,
XLAPrecision,
)
if not isinstance(plugin, Precision):
raise RuntimeError(f"Expected a precision plugin, got {plugin}")
if isinstance(plugin, BitsandbytesPrecision):
return plugin.dtype
if isinstance(plugin, (HalfPrecision, MixedPrecision)):
return plugin._desired_input_dtype
if isinstance(plugin, DoublePrecision):
return torch.double
if isinstance(plugin, (XLAPrecision, DeepSpeedPrecision)):
return plugin._desired_dtype
if isinstance(plugin, TransformerEnginePrecision):
return torch.int8
if isinstance(plugin, FSDPPrecision):
return plugin.mixed_precision_config.reduce_dtype or torch.float32
if isinstance(plugin, Precision):
return torch.float32
raise NotImplementedError(plugin) | null |
155,485 | import os
import pickle
import warnings
from functools import partial
from io import BytesIO
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, OrderedDict, Sequence, Set, Union
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch._C import _TensorMeta
from torch.nn import Parameter
from typing_extensions import override
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.types import _PATH, _Stateful
class _NotYetLoadedTensor:
def __init__(
self,
metatensor: Tensor,
archiveinfo: "_LazyLoadingUnpickler",
storageinfo: tuple,
rebuild_args: tuple,
) -> None:
self.metatensor = metatensor
self.archiveinfo = archiveinfo
self.storageinfo = storageinfo
self.rebuild_args = rebuild_args
def rebuild_from_type_v2(
cls,
func: Callable,
new_type: _TensorMeta,
args: tuple,
state: dict,
*,
archiveinfo: Optional["_LazyLoadingUnpickler"] = None,
) -> Any:
ret = func(*args)
if isinstance(ret, _NotYetLoadedTensor):
old_lt = ret._load_tensor
def _load_tensor() -> Any:
t = old_lt()
return torch._tensor._rebuild_from_type_v2(lambda: t, new_type, (), state)
ret._load_tensor = _load_tensor # type: ignore[method-assign]
return ret
return torch._tensor._rebuild_from_type_v2(func, new_type, args, state)
def rebuild_parameter(
cls,
data: Any,
requires_grad: bool,
backward_hooks: OrderedDict,
*,
archiveinfo: Optional["_LazyLoadingUnpickler"] = None,
) -> Union[Tensor, "_NotYetLoadedTensor"]:
if isinstance(data, _NotYetLoadedTensor):
old_lt = data._load_tensor
def _load_tensor() -> Parameter:
t = old_lt()
return torch._utils._rebuild_parameter(t, requires_grad, backward_hooks)
data._load_tensor = _load_tensor # type: ignore[method-assign]
return data
return torch._utils._rebuild_parameter(data, requires_grad, backward_hooks)
def rebuild_tensor_v2(
cls,
storage: "TypedStorage",
storage_offset: int,
size: tuple,
stride: tuple,
requires_grad: bool,
backward_hooks: OrderedDict,
metadata: Optional[Any] = None,
*,
archiveinfo: "_LazyLoadingUnpickler",
) -> "_NotYetLoadedTensor":
rebuild_args = (storage_offset, size, stride, requires_grad, backward_hooks, metadata)
metatensor = torch._utils._rebuild_tensor_v2(
storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata
)
storageinfo = storage.archiveinfo
return _NotYetLoadedTensor(metatensor, archiveinfo, storageinfo, rebuild_args)
def _load_tensor(self) -> Tensor:
from torch.storage import TypedStorage, UntypedStorage
_, _, fn, _, size = self.storageinfo
dtype = self.metatensor.dtype
storage = self.archiveinfo.file_reader.get_storage_from_record(
f"data/{fn}", size * torch._utils._element_size(dtype), UntypedStorage
)
uts = storage._typed_storage()._untyped_storage
with warnings.catch_warnings():
# The TypedStorage APIs have heavy deprecations in torch, suppress all these warnings for now
warnings.simplefilter("ignore")
storage = TypedStorage(wrap_storage=uts, dtype=dtype, _internal=True)
return torch._utils._rebuild_tensor_v2(storage, *self.rebuild_args)
def __torch_function__(
cls,
func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Optional[Dict] = None,
) -> Any:
kwargs = kwargs or {}
loaded_args = [(arg._load_tensor() if isinstance(arg, _NotYetLoadedTensor) else arg) for arg in args]
return func(*loaded_args, **kwargs)
def __getattr__(self, name: str) -> Any:
# These properties don't require materialization and can be accessed through the meta tensor directly
if name in {
"dtype",
"grad",
"grad_fn",
"is_meta",
"layout",
"names",
"ndim",
"output_nr",
"requires_grad",
"retains_grad",
"size",
"shape",
"volatile",
}:
return getattr(self.metatensor, name)
# materializing these is needed for quantization (see lit-gpt)
if name in {"contiguous", "cuda", "half"}:
return getattr(self._load_tensor(), name)
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self.metatensor)})"
def _materialize_tensors(collection: Any) -> Any:
def _load_tensor(t: _NotYetLoadedTensor) -> Tensor:
return t._load_tensor()
return apply_to_collection(collection, dtype=_NotYetLoadedTensor, function=_load_tensor) | null |
155,486 | import os
import pickle
import warnings
from functools import partial
from io import BytesIO
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, OrderedDict, Sequence, Set, Union
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch._C import _TensorMeta
from torch.nn import Parameter
from typing_extensions import override
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.types import _PATH, _Stateful
class _Stateful(Protocol[_DictKey]):
"""This class is used to detect if an object is stateful using `isinstance(obj, _Stateful)`."""
def state_dict(self) -> Dict[_DictKey, Any]: ...
def load_state_dict(self, state_dict: Dict[_DictKey, Any]) -> None: ...
The provided code snippet includes necessary dependencies for implementing the `_move_state_into` function. Write a Python function `def _move_state_into( source: Dict[str, Any], destination: Dict[str, Union[Any, _Stateful]], keys: Optional[Set[str]] = None ) -> None` to solve the following problem:
Takes the state from the source destination and moves it into the destination dictionary. If an object in the destination follows the stateful protocol, it loads the source state via ``load_state_dict``.
Here is the function:
def _move_state_into(
source: Dict[str, Any], destination: Dict[str, Union[Any, _Stateful]], keys: Optional[Set[str]] = None
) -> None:
"""Takes the state from the source destination and moves it into the destination dictionary.
If an object in the destination follows the stateful protocol, it loads the source state via ``load_state_dict``.
"""
keys = set(source) if keys is None else keys & set(source)
for key in keys:
state = source.pop(key)
if key in destination and isinstance(destination[key], _Stateful):
destination[key].load_state_dict(state)
else:
destination[key] = state | Takes the state from the source destination and moves it into the destination dictionary. If an object in the destination follows the stateful protocol, it loads the source state via ``load_state_dict``. |
155,487 | from typing import Iterable
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch.optim import Optimizer
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.types import _DEVICE
def _optimizer_to_device(optimizer: Optimizer, device: _DEVICE) -> None:
"""Moves the state of a single optimizer to the device."""
for p, v in optimizer.state.items():
optimizer.state[p] = apply_to_collection(v, Tensor, move_data_to_device, device, allow_frozen=True)
_DEVICE = Union[torch.device, str, int]
The provided code snippet includes necessary dependencies for implementing the `_optimizers_to_device` function. Write a Python function `def _optimizers_to_device(optimizers: Iterable[Optimizer], device: _DEVICE) -> None` to solve the following problem:
Moves optimizer states for a sequence of optimizers to the device.
Here is the function:
def _optimizers_to_device(optimizers: Iterable[Optimizer], device: _DEVICE) -> None:
"""Moves optimizer states for a sequence of optimizers to the device."""
for opt in optimizers:
_optimizer_to_device(opt, device) | Moves optimizer states for a sequence of optimizers to the device. |
155,488 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
def _is_local_file_protocol(path: _PATH) -> bool:
return fsspec.utils.get_protocol(str(path)) == "file"
_PATH = Union[str, Path]
The provided code snippet includes necessary dependencies for implementing the `is_shared_filesystem` function. Write a Python function `def is_shared_filesystem(strategy: "Strategy", path: Optional[_PATH] = None, timeout: int = 3) -> bool` to solve the following problem:
Checks whether the filesystem under the given path is shared across all processes. This function should only be used in a context where distributed is initialized. Args: strategy: The strategy being used, either from Fabric (``fabric.strategy``) or from Trainer (``trainer.strategy``). path: The path to check. Defaults to the current working directory. The user must have permissions to write to this path or the parent folder, and the filesystem must be writable. timeout: If any of the processes can't list the file created by rank 0 within this many seconds, the filesystem is determined to be not shared.
Here is the function:
def is_shared_filesystem(strategy: "Strategy", path: Optional[_PATH] = None, timeout: int = 3) -> bool:
"""Checks whether the filesystem under the given path is shared across all processes.
This function should only be used in a context where distributed is initialized.
Args:
strategy: The strategy being used, either from Fabric (``fabric.strategy``) or from Trainer
(``trainer.strategy``).
path: The path to check. Defaults to the current working directory. The user must have permissions to write
to this path or the parent folder, and the filesystem must be writable.
timeout: If any of the processes can't list the file created by rank 0 within this many seconds, the
filesystem is determined to be not shared.
"""
# Fast path: Any non-local filesystem is considered shared (e.g., S3)
if path is not None and not _is_local_file_protocol(path):
return True
path = Path(Path.cwd() if path is None else path).resolve()
# Fast path: Only distributed strategies can detect shared filesystems
if not hasattr(strategy, "world_size") or strategy.world_size == 1:
return True
# Fast path: If the path is not the same on all ranks we know it's not a shared filesystem
rank_zero_path = strategy.broadcast(path)
if not strategy.reduce_boolean_decision(rank_zero_path == path, all=True):
return False
if not strategy.reduce_boolean_decision(path.exists(), all=True):
raise FileNotFoundError(
f"Unable to determine if the path belongs to a shared filesystem. The path does not exist: {path}"
)
path = path.parent if path.is_file() else path
check_file = path / ".lightning_shared_fs_check"
check_file.unlink(missing_ok=True)
strategy.barrier()
if strategy.is_global_zero:
# Rank 0 creates the file
check_file.touch()
found = True
else:
# All other ranks will wait until they find the file or timeout
start = time.perf_counter()
found = False
while not found and (time.perf_counter() - start) < timeout:
found = check_file.exists()
strategy.barrier()
all_found = strategy.reduce_boolean_decision(found, all=True)
with contextlib.suppress(OSError): # handle race condition on deletion
check_file.unlink()
return all_found | Checks whether the filesystem under the given path is shared across all processes. This function should only be used in a context where distributed is initialized. Args: strategy: The strategy being used, either from Fabric (``fabric.strategy``) or from Trainer (``trainer.strategy``). path: The path to check. Defaults to the current working directory. The user must have permissions to write to this path or the parent folder, and the filesystem must be writable. timeout: If any of the processes can't list the file created by rank 0 within this many seconds, the filesystem is determined to be not shared. |
155,489 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
if torch.distributed.is_available():
from torch.distributed import group
else:
def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]:
gathered_result = [torch.zeros_like(result) for _ in range(world_size)]
torch.distributed.all_gather(gathered_result, result, group)
return gathered_result
The provided code snippet includes necessary dependencies for implementing the `_gather_all_tensors` function. Write a Python function `def _gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]` to solve the following problem:
Function to gather all tensors from several DDP processes onto a list that is broadcasted to all processes. Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case tensors are padded, gathered and then trimmed to secure equal workload for all processes. Args: result: The value to sync group: The process group to gather results from. Defaults to all processes (world) Return: gathered_result: List with size equal to the process group where gathered_result[i] corresponds to result tensor from process i
Here is the function:
def _gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]:
"""Function to gather all tensors from several DDP processes onto a list that is broadcasted to all processes.
Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case
tensors are padded, gathered and then trimmed to secure equal workload for all processes.
Args:
result: The value to sync
group: The process group to gather results from. Defaults to all processes (world)
Return:
gathered_result: List with size equal to the process group where
gathered_result[i] corresponds to result tensor from process i
"""
if group is None:
group = torch.distributed.group.WORLD
# Convert tensors to contiguous format
result = result.contiguous()
world_size = torch.distributed.get_world_size(group)
torch.distributed.barrier(group=group)
# If the tensor is scalar, things are easy
if result.ndim == 0:
return _simple_gather_all_tensors(result, group, world_size)
# 1. Gather sizes of all tensors
local_size = torch.tensor(result.shape, device=result.device)
local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]
torch.distributed.all_gather(local_sizes, local_size, group=group)
max_size = torch.stack(local_sizes).max(dim=0).values
all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)
# 2. If shapes are all the same, then do a simple gather:
if all_sizes_equal:
return _simple_gather_all_tensors(result, group, world_size)
# 3. If not, we need to pad each local tensor to maximum size, gather and then truncate
pad_dims = []
pad_by = (max_size - local_size).detach().cpu()
for val in reversed(pad_by):
pad_dims.append(0)
pad_dims.append(val.item())
result_padded = F.pad(result, pad_dims)
gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]
torch.distributed.all_gather(gathered_result, result_padded, group)
for idx, item_size in enumerate(local_sizes):
slice_param = [slice(dim_size) for dim_size in item_size]
gathered_result[idx] = gathered_result[idx][slice_param]
return gathered_result | Function to gather all tensors from several DDP processes onto a list that is broadcasted to all processes. Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case tensors are padded, gathered and then trimmed to secure equal workload for all processes. Args: result: The value to sync group: The process group to gather results from. Defaults to all processes (world) Return: gathered_result: List with size equal to the process group where gathered_result[i] corresponds to result tensor from process i |
155,490 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
def _sync_ddp(result: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None) -> Tensor:
"""Reduces a tensor across several distributed processes.
This operation is performed in-place, meaning the result will be placed back into the input tensor on all processes.
Args:
result: The value to sync and reduce (typically tensor or number)
group: The process group to gather results from. Defaults to all processes (world)
reduce_op: The reduction operation. Defaults to sum.
Can also be a string of 'avg', 'mean' to calculate the mean during reduction.
Return:
The reduced value.
"""
divide_by_world_size = False
group = torch.distributed.group.WORLD if group is None else group
op: Optional[ReduceOp]
if isinstance(reduce_op, str):
reduce_op = "avg" if reduce_op == "mean" else reduce_op
if reduce_op.lower() == "avg" and torch.distributed.get_backend(group) == "gloo":
# The GLOO backend does not support the `ReduceOp.AVG` operation
op = ReduceOp.SUM # type: ignore[assignment]
divide_by_world_size = True
else:
op = getattr(ReduceOp, reduce_op.upper())
else:
op = reduce_op
# HPU doesn't support Long types, forcefully set it to float
# TODO: move this to the `lightning_habana` package
if (
package_available("habana_frameworks")
and os.environ.get("HCCL_DISTRIBUTED_BACKEND") == "1"
and result.type()
in (
"torch.LongTensor",
"torch.hpu.LongTensor",
)
):
rank_zero_info("Long tensor unsupported on HPU, casting to float")
result = result.float()
# Sync all processes before reduction
torch.distributed.barrier(group=group)
torch.distributed.all_reduce(result, op=op, group=group, async_op=False)
world_size = torch.distributed.get_world_size(group)
if not divide_by_world_size:
return result
# `torch.distributed.all_reduce` is in-place, so we should do the division in-place to leave the modified tensors
# with the expected value
if not torch.is_floating_point(result):
return result.copy_(result / world_size)
return result.div_(world_size)
def _distributed_is_initialized() -> bool:
# `is_initialized` is only defined conditionally
# https://github.com/pytorch/pytorch/blob/v2.1.0/torch/distributed/__init__.py#L25
# this might happen to MacOS builds from source (default) or any build from source that sets `USE_DISTRIBUTED=0`
return torch.distributed.is_available() and torch.distributed.is_initialized()
The provided code snippet includes necessary dependencies for implementing the `_sync_ddp_if_available` function. Write a Python function `def _sync_ddp_if_available( result: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None ) -> Tensor` to solve the following problem:
Function to reduce a tensor across worker processes during distributed training. Args: result: The value to sync and reduce (typically tensor or number) group: The process group to gather results from. Defaults to all processes (world) reduce_op: The reduction operation. Defaults to sum. Can also be a string of 'avg', 'mean' to calculate the mean during reduction. Return: reduced value
Here is the function:
def _sync_ddp_if_available(
result: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None
) -> Tensor:
"""Function to reduce a tensor across worker processes during distributed training.
Args:
result: The value to sync and reduce (typically tensor or number)
group: The process group to gather results from. Defaults to all processes (world)
reduce_op: The reduction operation. Defaults to sum.
Can also be a string of 'avg', 'mean' to calculate the mean during reduction.
Return:
reduced value
"""
if _distributed_is_initialized():
return _sync_ddp(result, group=group, reduce_op=reduce_op)
return result | Function to reduce a tensor across worker processes during distributed training. Args: result: The value to sync and reduce (typically tensor or number) group: The process group to gather results from. Defaults to all processes (world) reduce_op: The reduction operation. Defaults to sum. Can also be a string of 'avg', 'mean' to calculate the mean during reduction. Return: reduced value |
155,491 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
if torch.distributed.is_available():
from torch.distributed import group
else:
def _distributed_is_initialized() -> bool:
# `is_initialized` is only defined conditionally
# https://github.com/pytorch/pytorch/blob/v2.1.0/torch/distributed/__init__.py#L25
# this might happen to MacOS builds from source (default) or any build from source that sets `USE_DISTRIBUTED=0`
return torch.distributed.is_available() and torch.distributed.is_initialized()
The provided code snippet includes necessary dependencies for implementing the `_all_gather_ddp_if_available` function. Write a Python function `def _all_gather_ddp_if_available( tensor: Tensor, group: Optional["torch.distributed.ProcessGroup"] = None, sync_grads: bool = False ) -> Tensor` to solve the following problem:
Function to gather a tensor from several distributed processes. Args: tensor: Tensor of shape (batch, ...) group: The process group to gather results from. Defaults to all processes (world) sync_grads: Flag that allows users to synchronize gradients for all_gather op Return: A tensor of shape (world_size, batch, ...)
Here is the function:
def _all_gather_ddp_if_available(
tensor: Tensor, group: Optional["torch.distributed.ProcessGroup"] = None, sync_grads: bool = False
) -> Tensor:
"""Function to gather a tensor from several distributed processes.
Args:
tensor: Tensor of shape (batch, ...)
group: The process group to gather results from. Defaults to all processes (world)
sync_grads: Flag that allows users to synchronize gradients for all_gather op
Return:
A tensor of shape (world_size, batch, ...)
"""
if not _distributed_is_initialized():
return tensor
from torch.distributed.nn.functional import all_gather
tensor = tensor.contiguous() # https://github.com/pytorch/pytorch/issues/73515
with nullcontext() if sync_grads else torch.no_grad():
gathered_tensors = all_gather(tensor, group)
return torch.stack(gathered_tensors) | Function to gather a tensor from several distributed processes. Args: tensor: Tensor of shape (batch, ...) group: The process group to gather results from. Defaults to all processes (world) sync_grads: Flag that allows users to synchronize gradients for all_gather op Return: A tensor of shape (world_size, batch, ...) |
155,492 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
if torch.distributed.is_available():
from torch.distributed import group
else:
log = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_init_dist_connection` function. Write a Python function `def _init_dist_connection( cluster_environment: "ClusterEnvironment", torch_distributed_backend: str, global_rank: Optional[int] = None, world_size: Optional[int] = None, **kwargs: Any, ) -> None` to solve the following problem:
Utility function to initialize distributed connection by setting env variables and initializing the distributed process group. Args: cluster_environment: ``ClusterEnvironment`` instance torch_distributed_backend: Backend to use (includes `nccl` and `gloo`) global_rank: Rank of the current process world_size: Number of processes in the group kwargs: Kwargs for ``init_process_group`` Raises: RuntimeError: If ``torch.distributed`` is not available
Here is the function:
def _init_dist_connection(
cluster_environment: "ClusterEnvironment",
torch_distributed_backend: str,
global_rank: Optional[int] = None,
world_size: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Utility function to initialize distributed connection by setting env variables and initializing the distributed
process group.
Args:
cluster_environment: ``ClusterEnvironment`` instance
torch_distributed_backend: Backend to use (includes `nccl` and `gloo`)
global_rank: Rank of the current process
world_size: Number of processes in the group
kwargs: Kwargs for ``init_process_group``
Raises:
RuntimeError:
If ``torch.distributed`` is not available
"""
if not torch.distributed.is_available():
raise RuntimeError("torch.distributed is not available. Cannot initialize distributed process group")
if torch.distributed.is_initialized():
log.debug("torch.distributed is already initialized. Exiting early")
return
global_rank = global_rank if global_rank is not None else cluster_environment.global_rank()
world_size = world_size if world_size is not None else cluster_environment.world_size()
os.environ["MASTER_ADDR"] = cluster_environment.main_address
os.environ["MASTER_PORT"] = str(cluster_environment.main_port)
log.info(f"Initializing distributed: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}")
torch.distributed.init_process_group(torch_distributed_backend, rank=global_rank, world_size=world_size, **kwargs)
# On rank=0 let everyone know training is starting
rank_zero_info(
f"{'-' * 100}\n"
f"distributed_backend={torch_distributed_backend}\n"
f"All distributed processes registered. Starting with {world_size} processes\n"
f"{'-' * 100}\n"
) | Utility function to initialize distributed connection by setting env variables and initializing the distributed process group. Args: cluster_environment: ``ClusterEnvironment`` instance torch_distributed_backend: Backend to use (includes `nccl` and `gloo`) global_rank: Rank of the current process world_size: Number of processes in the group kwargs: Kwargs for ``init_process_group`` Raises: RuntimeError: If ``torch.distributed`` is not available |
155,493 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
if torch.distributed.is_available():
from torch.distributed import group
else:
def _get_default_process_group_backend_for_device(device: torch.device) -> str:
return "nccl" if device.type == "cuda" else "gloo" | null |
155,494 | import contextlib
import logging
import os
import time
from contextlib import nullcontext
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sized, Union
import torch
import torch.nn.functional as F
from lightning_utilities.core.imports import package_available
from torch import Tensor
from torch.utils.data import Dataset, DistributedSampler, Sampler
from typing_extensions import Self, override
from lightning.fabric.utilities.cloud_io import _is_local_file_protocol
from lightning.fabric.utilities.data import _num_cpus_available
from lightning.fabric.utilities.rank_zero import rank_zero_info
from lightning.fabric.utilities.types import _PATH, ReduceOp
if torch.distributed.is_available():
from torch.distributed import group
else:
def _suggested_max_num_threads(num_processes: int = 1) -> int:
if num_processes < 1:
raise ValueError(f"`num_processes` should be >= 1, got {num_processes}.")
return max(1, _num_cpus_available() // num_processes)
def _set_num_threads_if_needed(num_processes: int = 1) -> None:
if "OMP_NUM_THREADS" not in os.environ:
num_threads = _suggested_max_num_threads(num_processes)
torch.set_num_threads(num_threads)
os.environ["OMP_NUM_THREADS"] = str(num_threads) | null |
155,495 | from abc import ABC
from functools import partial
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from lightning.fabric.utilities.types import _DEVICE
_DEVICE = Union[torch.device, str, int]
def _from_numpy(value: np.ndarray, device: _DEVICE) -> Tensor:
return torch.from_numpy(value).to(device) | null |
155,496 | from abc import ABC
from functools import partial
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from lightning.fabric.utilities.types import _DEVICE
CONVERSION_DTYPES: List[Tuple[Any, Callable[[Any, Any], Tensor]]] = [
# bool -> uint8 as bool -> torch.bool triggers RuntimeError: Unsupported data type for NCCL process group
(bool, partial(torch.tensor, dtype=torch.uint8)),
(int, partial(torch.tensor, dtype=torch.int)),
(float, partial(torch.tensor, dtype=torch.float)),
(np.ndarray, _from_numpy),
]
def move_data_to_device(batch: Any, device: _DEVICE) -> Any:
"""Transfers a collection of data to the given device. Any object that defines a method ``to(device)`` will be
moved and all other objects in the collection will be left untouched.
Args:
batch: A tensor or collection of tensors or anything that has a method ``.to(...)``.
See :func:`apply_to_collection` for a list of supported collection types.
device: The device to which the data should be moved
Return:
the same collection but with all contained tensors residing on the new device.
See Also:
- :meth:`torch.Tensor.to`
- :class:`torch.device`
"""
if isinstance(device, str):
device = torch.device(device)
def batch_to(data: Any) -> Any:
kwargs = {}
# Don't issue non-blocking transfers to CPU
# Same with MPS due to a race condition bug: https://github.com/pytorch/pytorch/issues/83015
if isinstance(data, Tensor) and isinstance(device, torch.device) and device.type not in _BLOCKING_DEVICE_TYPES:
kwargs["non_blocking"] = True
data_output = data.to(device, **kwargs)
if data_output is not None:
return data_output
# user wrongly implemented the `_TransferableDataType` and forgot to return `self`.
return data
return apply_to_collection(batch, dtype=_TransferableDataType, function=batch_to)
_DEVICE = Union[torch.device, str, int]
def convert_to_tensors(data: Any, device: _DEVICE) -> Any:
# convert non-tensors
for src_dtype, conversion_func in CONVERSION_DTYPES:
data = apply_to_collection(data, src_dtype, conversion_func, device=device)
return move_data_to_device(data, device) | null |
155,497 | from abc import ABC
from functools import partial
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from lightning.fabric.utilities.types import _DEVICE
The provided code snippet includes necessary dependencies for implementing the `convert_tensors_to_scalars` function. Write a Python function `def convert_tensors_to_scalars(data: Any) -> Any` to solve the following problem:
Recursively walk through a collection and convert single-item tensors to scalar values. Raises: ValueError: If tensors inside ``metrics`` contains multiple elements, hence preventing conversion to a scalar.
Here is the function:
def convert_tensors_to_scalars(data: Any) -> Any:
"""Recursively walk through a collection and convert single-item tensors to scalar values.
Raises:
ValueError:
If tensors inside ``metrics`` contains multiple elements, hence preventing conversion to a scalar.
"""
def to_item(value: Tensor) -> Union[int, float, bool]:
if value.numel() != 1:
raise ValueError(
f"The metric `{value}` does not contain a single element, thus it cannot be converted to a scalar."
)
return value.item()
return apply_to_collection(data, Tensor, to_item) | Recursively walk through a collection and convert single-item tensors to scalar values. Raises: ValueError: If tensors inside ``metrics`` contains multiple elements, hence preventing conversion to a scalar. |
155,498 | from argparse import Namespace
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Mapping, MutableMapping, Optional, Union
import numpy as np
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `_convert_params` function. Write a Python function `def _convert_params(params: Optional[Union[Dict[str, Any], Namespace]]) -> Dict[str, Any]` to solve the following problem:
Ensure parameters are a dict or convert to dict if necessary. Args: params: Target to be converted to a dictionary Returns: params as a dictionary
Here is the function:
def _convert_params(params: Optional[Union[Dict[str, Any], Namespace]]) -> Dict[str, Any]:
"""Ensure parameters are a dict or convert to dict if necessary.
Args:
params: Target to be converted to a dictionary
Returns:
params as a dictionary
"""
# in case converting from namespace
if isinstance(params, Namespace):
params = vars(params)
if params is None:
params = {}
return params | Ensure parameters are a dict or convert to dict if necessary. Args: params: Target to be converted to a dictionary Returns: params as a dictionary |
155,499 | from argparse import Namespace
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Mapping, MutableMapping, Optional, Union
import numpy as np
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `_sanitize_callable_params` function. Write a Python function `def _sanitize_callable_params(params: Dict[str, Any]) -> Dict[str, Any]` to solve the following problem:
Sanitize callable params dict, e.g. ``{'a': <function_**** at 0x****>} -> {'a': 'function_****'}``. Args: params: Dictionary containing the hyperparameters Returns: dictionary with all callables sanitized
Here is the function:
def _sanitize_callable_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""Sanitize callable params dict, e.g. ``{'a': <function_**** at 0x****>} -> {'a': 'function_****'}``.
Args:
params: Dictionary containing the hyperparameters
Returns:
dictionary with all callables sanitized
"""
def _sanitize_callable(val: Any) -> Any:
# Give them one chance to return a value. Don't go rabbit hole of recursive call
if callable(val):
try:
_val = val()
if callable(_val):
return val.__name__
return _val
# todo: specify the possible exception
except Exception:
return getattr(val, "__name__", None)
return val
return {key: _sanitize_callable(val) for key, val in params.items()} | Sanitize callable params dict, e.g. ``{'a': <function_**** at 0x****>} -> {'a': 'function_****'}``. Args: params: Dictionary containing the hyperparameters Returns: dictionary with all callables sanitized |
155,500 | from argparse import Namespace
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Mapping, MutableMapping, Optional, Union
import numpy as np
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `_flatten_dict` function. Write a Python function `def _flatten_dict(params: MutableMapping[Any, Any], delimiter: str = "/", parent_key: str = "") -> Dict[str, Any]` to solve the following problem:
Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``. Args: params: Dictionary containing the hyperparameters delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``. Returns: Flattened dict. Examples: >>> _flatten_dict({'a': {'b': 'c'}}) {'a/b': 'c'} >>> _flatten_dict({'a': {'b': 123}}) {'a/b': 123} >>> _flatten_dict({5: {'a': 123}}) {'5/a': 123}
Here is the function:
def _flatten_dict(params: MutableMapping[Any, Any], delimiter: str = "/", parent_key: str = "") -> Dict[str, Any]:
"""Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``.
Args:
params: Dictionary containing the hyperparameters
delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``.
Returns:
Flattened dict.
Examples:
>>> _flatten_dict({'a': {'b': 'c'}})
{'a/b': 'c'}
>>> _flatten_dict({'a': {'b': 123}})
{'a/b': 123}
>>> _flatten_dict({5: {'a': 123}})
{'5/a': 123}
"""
result: Dict[str, Any] = {}
for k, v in params.items():
new_key = parent_key + delimiter + str(k) if parent_key else str(k)
if is_dataclass(v):
v = asdict(v)
elif isinstance(v, Namespace):
v = vars(v)
if isinstance(v, MutableMapping):
result = {**result, **_flatten_dict(v, parent_key=new_key, delimiter=delimiter)}
else:
result[new_key] = v
return result | Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``. Args: params: Dictionary containing the hyperparameters delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``. Returns: Flattened dict. Examples: >>> _flatten_dict({'a': {'b': 'c'}}) {'a/b': 'c'} >>> _flatten_dict({'a': {'b': 123}}) {'a/b': 123} >>> _flatten_dict({5: {'a': 123}}) {'5/a': 123} |
155,501 | from argparse import Namespace
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Mapping, MutableMapping, Optional, Union
import numpy as np
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `_sanitize_params` function. Write a Python function `def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]` to solve the following problem:
Returns params with non-primitvies converted to strings for logging. >>> import torch >>> params = {"float": 0.3, ... "int": 1, ... "string": "abc", ... "bool": True, ... "list": [1, 2, 3], ... "namespace": Namespace(foo=3), ... "layer": torch.nn.BatchNorm1d} >>> import pprint >>> pprint.pprint(_sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE {'bool': True, 'float': 0.3, 'int': 1, 'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>", 'list': '[1, 2, 3]', 'namespace': 'Namespace(foo=3)', 'string': 'abc'}
Here is the function:
def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""Returns params with non-primitvies converted to strings for logging.
>>> import torch
>>> params = {"float": 0.3,
... "int": 1,
... "string": "abc",
... "bool": True,
... "list": [1, 2, 3],
... "namespace": Namespace(foo=3),
... "layer": torch.nn.BatchNorm1d}
>>> import pprint
>>> pprint.pprint(_sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE
{'bool': True,
'float': 0.3,
'int': 1,
'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>",
'list': '[1, 2, 3]',
'namespace': 'Namespace(foo=3)',
'string': 'abc'}
"""
for k in params:
# convert relevant np scalars to python types first (instead of str)
if isinstance(params[k], (np.bool_, np.integer, np.floating)):
params[k] = params[k].item()
elif type(params[k]) not in [bool, int, float, str, Tensor]:
params[k] = str(params[k])
return params | Returns params with non-primitvies converted to strings for logging. >>> import torch >>> params = {"float": 0.3, ... "int": 1, ... "string": "abc", ... "bool": True, ... "list": [1, 2, 3], ... "namespace": Namespace(foo=3), ... "layer": torch.nn.BatchNorm1d} >>> import pprint >>> pprint.pprint(_sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE {'bool': True, 'float': 0.3, 'int': 1, 'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>", 'list': '[1, 2, 3]', 'namespace': 'Namespace(foo=3)', 'string': 'abc'} |
155,502 | from argparse import Namespace
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Mapping, MutableMapping, Optional, Union
import numpy as np
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `_add_prefix` function. Write a Python function `def _add_prefix( metrics: Mapping[str, Union[Tensor, float]], prefix: str, separator: str ) -> Mapping[str, Union[Tensor, float]]` to solve the following problem:
Insert prefix before each key in a dict, separated by the separator. Args: metrics: Dictionary with metric names as keys and measured quantities as values prefix: Prefix to insert before each key separator: Separates prefix and original key name Returns: Dictionary with prefix and separator inserted before each key
Here is the function:
def _add_prefix(
metrics: Mapping[str, Union[Tensor, float]], prefix: str, separator: str
) -> Mapping[str, Union[Tensor, float]]:
"""Insert prefix before each key in a dict, separated by the separator.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
prefix: Prefix to insert before each key
separator: Separates prefix and original key name
Returns:
Dictionary with prefix and separator inserted before each key
"""
if not prefix:
return metrics
return {f"{prefix}{separator}{k}": v for k, v in metrics.items()} | Insert prefix before each key in a dict, separated by the separator. Args: metrics: Dictionary with metric names as keys and measured quantities as values prefix: Prefix to insert before each key separator: Separates prefix and original key name Returns: Dictionary with prefix and separator inserted before each key |
155,503 | from abc import ABC, abstractmethod
from argparse import Namespace
from functools import wraps
from typing import Any, Callable, Dict, Optional, Union
from torch import Tensor
from torch.nn import Module
from lightning.fabric.utilities.rank_zero import rank_zero_only
class Logger(ABC):
"""Base class for experiment loggers."""
def name(self) -> Optional[str]:
"""Return the experiment name."""
def version(self) -> Optional[Union[int, str]]:
"""Return the experiment version."""
def root_dir(self) -> Optional[str]:
"""Return the root directory where all versions of an experiment get saved, or `None` if the logger does not
save data locally."""
return None
def log_dir(self) -> Optional[str]:
"""Return directory the current version of the experiment gets saved, or `None` if the logger does not save
data locally."""
return None
def group_separator(self) -> str:
"""Return the default separator used by the logger to group the data into subfolders."""
return "/"
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""Records metrics. This method logs metrics as soon as it received them.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded
"""
pass
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace], *args: Any, **kwargs: Any) -> None:
"""Record hyperparameters.
Args:
params: :class:`~argparse.Namespace` or `Dict` containing the hyperparameters
args: Optional positional arguments, depends on the specific logger being used
kwargs: Optional keyword arguments, depends on the specific logger being used
"""
def log_graph(self, model: Module, input_array: Optional[Tensor] = None) -> None:
"""Record model graph.
Args:
model: the model with an implementation of ``forward``.
input_array: input passes to `model.forward`
"""
pass
def save(self) -> None:
"""Save log data."""
def finalize(self, status: str) -> None:
"""Do any processing that is necessary to finalize an experiment.
Args:
status: Status that the experiment finished with (e.g. success, failed, aborted)
"""
self.save()
class _DummyExperiment:
"""Dummy experiment."""
def nop(self, *args: Any, **kw: Any) -> None:
pass
def __getattr__(self, _: Any) -> Callable:
return self.nop
def __getitem__(self, idx: int) -> "_DummyExperiment":
# enables self.logger.experiment[0].add_image(...)
return self
def __setitem__(self, *args: Any, **kwargs: Any) -> None:
pass
rank_zero_only.rank = getattr(rank_zero_only, "rank", _get_rank() or 0)
The provided code snippet includes necessary dependencies for implementing the `rank_zero_experiment` function. Write a Python function `def rank_zero_experiment(fn: Callable) -> Callable` to solve the following problem:
Returns the real experiment on rank 0 and otherwise the _DummyExperiment.
Here is the function:
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the _DummyExperiment."""
@wraps(fn)
def experiment(self: Logger) -> Union[Any, _DummyExperiment]:
"""
Note:
``self`` is a custom logger instance. The loggers typically wrap an ``experiment`` method
with a ``@rank_zero_experiment`` decorator.
``Union[Any, _DummyExperiment]`` is used because the wrapped hooks have several return
types that are specific to the custom logger. The return type here can be considered as
``Union[return type of logger.experiment, _DummyExperiment]``.
"""
if rank_zero_only.rank > 0:
return _DummyExperiment()
return fn(self)
return experiment | Returns the real experiment on rank 0 and otherwise the _DummyExperiment. |
155,504 | import argparse
import json
import logging
import os
import platform
from contextlib import ExitStack
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator, CUDAAccelerator
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.ddp import DDPStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.distributed import log
from lightning.fabric.utilities.load import _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH
def _get_deepspeed_engines_from_state(state: Dict[str, Any]) -> List["DeepSpeedEngine"]:
from deepspeed import DeepSpeedEngine
modules = chain(*(module.modules() for module in state.values() if isinstance(module, Module)))
return [engine for engine in modules if isinstance(engine, DeepSpeedEngine)] | null |
155,505 | import argparse
import json
import logging
import os
import platform
from contextlib import ExitStack
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator, CUDAAccelerator
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.ddp import DDPStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.distributed import log
from lightning.fabric.utilities.load import _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH
def _validate_state_keys(state: Dict[str, Any]) -> None:
# DeepSpeed merges the client state into its internal engine state when saving, but it does not check for
# colliding keys from the user. We explicitly check it here:
deepspeed_internal_keys = {
"module",
"buffer_names",
"optimizer",
"param_shapes",
"lr_scheduler",
"sparse_tensor_module_names",
"skipped_steps",
"global_steps",
"global_samples",
"dp_world_size",
"mp_world_size",
"ds_config",
"ds_version",
}
colliding_keys = deepspeed_internal_keys.intersection(state.keys())
if colliding_keys:
rank_zero_warn(
"Your state has keys that collide with DeepSpeed's internal engine state. This could result in your"
" values being overwritten by DeepSpeed. Consider changing the name of these keys to something else: "
+ ", ".join(colliding_keys)
) | null |
155,506 | import argparse
import json
import logging
import os
import platform
from contextlib import ExitStack
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator, CUDAAccelerator
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.ddp import DDPStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.distributed import log
from lightning.fabric.utilities.load import _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH
def _validate_device_index_selection(parallel_devices: List[torch.device]) -> None:
selected_device_indices = [device.index for device in parallel_devices]
expected_device_indices = list(range(len(parallel_devices)))
if selected_device_indices != expected_device_indices:
raise RuntimeError(
f"The selected device indices {selected_device_indices!r} don't match the local rank values of processes."
" If you need to select GPUs at a specific index, set the `CUDA_VISIBLE_DEVICES` environment variable"
f" instead. For example: `CUDA_VISIBLE_DEVICES={','.join(str(i) for i in selected_device_indices)}`."
) | null |
155,507 | import argparse
import json
import logging
import os
import platform
from contextlib import ExitStack
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator, CUDAAccelerator
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.ddp import DDPStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.distributed import log
from lightning.fabric.utilities.load import _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH
def _is_deepspeed_checkpoint(path: Path) -> bool:
"""Heuristic check whether the path points to a top-level DeepSpeed checkpoint directory."""
return path.is_dir() and (path / "checkpoint").is_dir()
_PATH = Union[str, Path]
The provided code snippet includes necessary dependencies for implementing the `_validate_checkpoint_directory` function. Write a Python function `def _validate_checkpoint_directory(path: _PATH) -> None` to solve the following problem:
Validates that the path points to a DeepSpeed checkpoint directory and suggests fixes for user error.
Here is the function:
def _validate_checkpoint_directory(path: _PATH) -> None:
"""Validates that the path points to a DeepSpeed checkpoint directory and suggests fixes for user error."""
# Example DeepSpeed checkpoint directory:
#
# epoch=5-step=10999.ckpt
# ├── checkpoint
# │ ├── zero_pp_rank_0_mp_rank_00_model_states.pt
# │ ├── zero_pp_rank_0_mp_rank_00_optim_states.pt
# │ ├── zero_pp_rank_1_mp_rank_00_model_states.pt
# │ └── zero_pp_rank_1_mp_rank_00_optim_states.pt
# ├── latest
# └── zero_to_fp32.py
path = Path(path)
path_is_ds_checkpoint = _is_deepspeed_checkpoint(path)
default_message = f"The provided path is not a valid DeepSpeed checkpoint: {path}"
if not path_is_ds_checkpoint:
# Case 1: User may have accidentally passed the subfolder "checkpoint"
parent_is_ds_checkpoint = _is_deepspeed_checkpoint(path.parent)
if parent_is_ds_checkpoint:
raise FileNotFoundError(
f"{default_message}. It looks like you passed the path to a subfolder."
f" Try to load using this parent directory instead: {path.parent}"
)
# Case 2: User may have accidentally passed the path to a file inside the "checkpoint" subfolder
parent_parent_is_ds_checkpoint = path.is_file() and _is_deepspeed_checkpoint(path.parent.parent)
if parent_parent_is_ds_checkpoint:
raise FileNotFoundError(
f"{default_message}. It looks like you passed the path to a file inside a DeepSpeed checkpoint folder."
f" Try to load using this parent directory instead: {path.parent.parent}"
)
raise FileNotFoundError(default_message) | Validates that the path points to a DeepSpeed checkpoint directory and suggests fixes for user error. |
155,508 | import argparse
import json
import logging
import os
import platform
from contextlib import ExitStack
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from torch.nn import Module
from torch.optim import Optimizer
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator, CUDAAccelerator
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.ddp import DDPStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.distributed import log
from lightning.fabric.utilities.load import _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH
def _format_precision_config(
config: Dict[str, Any],
precision: str,
loss_scale: float,
loss_scale_window: int,
min_loss_scale: int,
initial_scale_power: int,
hysteresis: int,
) -> None:
if "fp16" not in config and precision in ("16-mixed", "16-true"):
# FP16 is a DeepSpeed standalone AMP implementation
rank_zero_info("Enabling DeepSpeed FP16. Model parameters and inputs will be cast to `float16`.")
config["fp16"] = {
"enabled": True,
"loss_scale": loss_scale,
"initial_scale_power": initial_scale_power,
"loss_scale_window": loss_scale_window,
"hysteresis": hysteresis,
"min_loss_scale": min_loss_scale,
}
elif "bf16" not in config and precision in ("bf16-mixed", "bf16-true"):
rank_zero_info("Enabling DeepSpeed BF16. Model parameters and inputs will be cast to `bfloat16`.")
config["bf16"] = {"enabled": True} | null |
155,509 | import queue
import time
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
import torch.multiprocessing as mp
from typing_extensions import override
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.strategies.launchers.multiprocessing import _GlobalStateSnapshot
from lightning.fabric.utilities.apply_func import move_data_to_device
def _rank_teardown(rank: int) -> None:
import torch_xla.core.xla_model as xm
# Make all processes wait for each other before joining
# https://github.com/pytorch/xla/issues/1801#issuecomment-602799542
xm.rendezvous("end-process")
# Ensure that the rank 0 process is the one exiting last
# https://github.com/pytorch/xla/issues/2190#issuecomment-641665358
if rank == 0:
time.sleep(1) | null |
155,510 | import itertools
import os
from dataclasses import dataclass
from multiprocessing.queues import SimpleQueue
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional
import torch
import torch.backends.cudnn
import torch.multiprocessing as mp
from lightning_utilities import apply_to_collection
from torch.nn import Module
from typing_extensions import override
from lightning.fabric.accelerators.cpu import CPUAccelerator
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.imports import _IS_INTERACTIVE
from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states
_IS_INTERACTIVE = hasattr(sys, "ps1") or bool(sys.flags.interactive)
The provided code snippet includes necessary dependencies for implementing the `_check_bad_cuda_fork` function. Write a Python function `def _check_bad_cuda_fork() -> None` to solve the following problem:
Checks whether it is safe to fork and initialize CUDA in the new processes, and raises an exception if not. The error message replaces PyTorch's 'Cannot re-initialize CUDA in forked subprocess' with helpful advice for Lightning users.
Here is the function:
def _check_bad_cuda_fork() -> None:
"""Checks whether it is safe to fork and initialize CUDA in the new processes, and raises an exception if not.
The error message replaces PyTorch's 'Cannot re-initialize CUDA in forked subprocess' with helpful advice for
Lightning users.
"""
if not torch.cuda.is_initialized():
return
message = (
"Lightning can't create new processes if CUDA is already initialized. Did you manually call"
" `torch.cuda.*` functions, have moved the model to the device, or allocated memory on the GPU any"
" other way? Please remove any such calls, or change the selected strategy."
)
if _IS_INTERACTIVE:
message += " You will have to restart the Python kernel."
raise RuntimeError(message) | Checks whether it is safe to fork and initialize CUDA in the new processes, and raises an exception if not. The error message replaces PyTorch's 'Cannot re-initialize CUDA in forked subprocess' with helpful advice for Lightning users. |
155,511 | import itertools
import os
from dataclasses import dataclass
from multiprocessing.queues import SimpleQueue
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional
import torch
import torch.backends.cudnn
import torch.multiprocessing as mp
from lightning_utilities import apply_to_collection
from torch.nn import Module
from typing_extensions import override
from lightning.fabric.accelerators.cpu import CPUAccelerator
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.imports import _IS_INTERACTIVE
from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states
The provided code snippet includes necessary dependencies for implementing the `_disable_module_memory_sharing` function. Write a Python function `def _disable_module_memory_sharing(data: Any) -> Any` to solve the following problem:
Disables memory sharing on parameters and buffers of `nn.Module`s contained in the given collection. Note: This is only required when running on CPU.
Here is the function:
def _disable_module_memory_sharing(data: Any) -> Any:
"""Disables memory sharing on parameters and buffers of `nn.Module`s contained in the given collection.
Note: This is only required when running on CPU.
"""
# PyTorch enables memory sharing automatically on all tensors that are passed through `mp.spawn`.
# For model weights and buffers, this is undesired and can lead to race conditions between processes.
# Hence, we copy the tensors in the entire module to ensure it doesn't share memory with other processes.
@torch.no_grad()
def unshare(module: Module) -> Module:
for tensor in itertools.chain(module.parameters(), module.buffers()):
tensor.data = tensor.data.clone()
return module
return apply_to_collection(data, function=unshare, dtype=Module) | Disables memory sharing on parameters and buffers of `nn.Module`s contained in the given collection. Note: This is only required when running on CPU. |
155,512 | import itertools
import os
from dataclasses import dataclass
from multiprocessing.queues import SimpleQueue
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional
import torch
import torch.backends.cudnn
import torch.multiprocessing as mp
from lightning_utilities import apply_to_collection
from torch.nn import Module
from typing_extensions import override
from lightning.fabric.accelerators.cpu import CPUAccelerator
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.imports import _IS_INTERACTIVE
from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states
The provided code snippet includes necessary dependencies for implementing the `_check_missing_main_guard` function. Write a Python function `def _check_missing_main_guard() -> None` to solve the following problem:
Raises an exception if the ``__name__ == "__main__"`` guard is missing.
Here is the function:
def _check_missing_main_guard() -> None:
"""Raises an exception if the ``__name__ == "__main__"`` guard is missing."""
if not getattr(mp.current_process(), "_inheriting", False):
return
message = dedent(
"""
Launching multiple processes with the 'spawn' start method requires that your script guards the main
function with an `if __name__ == \"__main__\"` clause. For example:
def main():
# Put your code here
...
if __name__ == "__main__":
main()
Alternatively, you can run with `strategy="ddp"` to avoid this error.
"""
)
raise RuntimeError(message) | Raises an exception if the ``__name__ == "__main__"`` guard is missing. |
155,513 | import logging
import os
import signal
import subprocess
import sys
import threading
import time
from typing import Any, Callable, List, Optional, Sequence, Tuple
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.rank_zero import rank_prefixed_message
def _basic_subprocess_cmd() -> Sequence[str]:
import __main__ # local import to avoid https://github.com/Lightning-AI/lightning/issues/15218
if __main__.__spec__ is None: # pragma: no-cover
return [sys.executable, os.path.abspath(sys.argv[0])] + sys.argv[1:]
return [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:] | null |
155,514 | import logging
import os
import signal
import subprocess
import sys
import threading
import time
from typing import Any, Callable, List, Optional, Sequence, Tuple
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.rank_zero import rank_prefixed_message
def _hydra_subprocess_cmd(local_rank: int) -> Tuple[Sequence[str], str]:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path
import __main__ # local import to avoid https://github.com/Lightning-AI/lightning/issues/15218
# when user is using hydra find the absolute path
if __main__.__spec__ is None: # pragma: no-cover
command = [sys.executable, to_absolute_path(sys.argv[0])]
else:
command = [sys.executable, "-m", __main__.__spec__.name]
command += sys.argv[1:]
cwd = get_original_cwd()
rundir = f'"{HydraConfig.get().run.dir}"'
# Set output_subdir null since we don't want different subprocesses trying to write to config.yaml
command += [f"hydra.run.dir={rundir}", f"hydra.job.name=train_ddp_process_{local_rank}", "hydra.output_subdir=null"]
return command, cwd | null |
155,515 | import logging
import os
import signal
import subprocess
import sys
import threading
import time
from typing import Any, Callable, List, Optional, Sequence, Tuple
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.utilities.distributed import _set_num_threads_if_needed
from lightning.fabric.utilities.rank_zero import rank_prefixed_message
class _ChildProcessObserver(threading.Thread):
def __init__(self, main_pid: int, child_processes: List[subprocess.Popen], sleep_period: int = 5) -> None:
super().__init__(daemon=True, name="child-process-observer") # thread stops if the main process exits
self._main_pid = main_pid
self._child_processes = child_processes
self._sleep_period = sleep_period
# Note: SIGTERM is not aggressive enough to terminate processes hanging in collectives
self._termination_signal = signal.SIGTERM if sys.platform == "win32" else signal.SIGKILL
self._finished = False
def run(self) -> None:
while not self._finished:
time.sleep(self._sleep_period)
self._finished = self._run()
def _run(self) -> bool:
"""Runs once over all child processes to check whether they are still running."""
for proc in self._child_processes:
proc.poll()
return_codes = [proc.returncode for proc in self._child_processes]
if all(return_code == 0 for return_code in return_codes):
return True
for i, proc in enumerate(self._child_processes):
if proc.returncode:
message = rank_prefixed_message(
f"Child process with PID {proc.pid} terminated with code {proc.returncode}."
f" Forcefully terminating all other processes to avoid zombies 🧟",
rank=(i + 1),
)
_logger.info(message)
self._terminate_all()
return True
return False
def _terminate_all(self) -> None:
"""Terminates the main process and all its children."""
for p in self._child_processes:
p.send_signal(self._termination_signal)
os.kill(self._main_pid, self._termination_signal)
The provided code snippet includes necessary dependencies for implementing the `_launch_process_observer` function. Write a Python function `def _launch_process_observer(child_processes: List[subprocess.Popen]) -> None` to solve the following problem:
Launches a thread that runs along the main process and monitors the health of all processes.
Here is the function:
def _launch_process_observer(child_processes: List[subprocess.Popen]) -> None:
"""Launches a thread that runs along the main process and monitors the health of all processes."""
_ChildProcessObserver(child_processes=child_processes, main_pid=os.getpid()).start() | Launches a thread that runs along the main process and monitors the health of all processes. |
155,516 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _auto_wrap_policy_kwargs(policy: Optional["_POLICY"], kwargs: Dict) -> Dict:
_TORCH_GREATER_EQUAL_2_1 = compare_version("torch", operator.ge, "2.1.0")
def _activation_checkpointing_kwargs(
activation_checkpointing: Optional[Union[Type[Module], List[Type[Module]]]],
activation_checkpointing_policy: Optional["_POLICY"],
) -> Dict:
if activation_checkpointing is None and activation_checkpointing_policy is None:
return {}
if activation_checkpointing is not None and activation_checkpointing_policy is not None:
raise ValueError(
"You cannot set both `activation_checkpointing` and `activation_checkpointing_policy`. Use the latter."
)
if activation_checkpointing is not None:
if isinstance(activation_checkpointing, list):
classes = tuple(activation_checkpointing)
else:
classes = (activation_checkpointing,)
if _TORCH_GREATER_EQUAL_2_1:
rank_zero_deprecation(
f"`FSDPStrategy(activation_checkpointing={activation_checkpointing})` is deprecated, use "
f"`FSDPStrategy(activation_checkpointing_policy={set(classes)})` instead."
)
return {"check_fn": lambda submodule: isinstance(submodule, classes)}
if isinstance(activation_checkpointing_policy, set):
if _TORCH_GREATER_EQUAL_2_1:
return _auto_wrap_policy_kwargs(activation_checkpointing_policy, {})
return {"check_fn": lambda submodule: isinstance(submodule, tuple(activation_checkpointing_policy))}
if not _TORCH_GREATER_EQUAL_2_1:
raise ValueError("`activation_checkpointing_policy` requires torch >= 2.1.0. HINT: `pip install -U torch`")
return {"auto_wrap_policy": activation_checkpointing_policy} | null |
155,517 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
_TORCH_GREATER_EQUAL_2_2 = compare_version("torch", operator.ge, "2.2.0")
def _setup_activation_checkpointing(module: Module, activation_checkpointing_kwargs: Dict) -> None:
if not activation_checkpointing_kwargs:
return
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import CheckpointWrapper
if any(isinstance(mod, CheckpointWrapper) for mod in module.modules()):
rank_zero_warn(
"FSDP checkpointing is configured, but the model already contains checkpointed layers."
" Checkpointing will be ignored."
)
return
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
if not _TORCH_GREATER_EQUAL_2_2:
checkpoint_wrapper = partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT)
apply_activation_checkpointing(module, checkpoint_wrapper_fn=checkpoint_wrapper, **activation_checkpointing_kwargs) | null |
155,518 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _init_cpu_offload(cpu_offload: Optional[Union[bool, "CPUOffload"]]) -> "CPUOffload":
from torch.distributed.fsdp import CPUOffload
return cpu_offload if isinstance(cpu_offload, CPUOffload) else CPUOffload(offload_params=bool(cpu_offload)) | null |
155,519 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _init_sharding_strategy(sharding_strategy: "_SHARDING_STRATEGY", kwargs: Dict) -> "ShardingStrategy":
from torch.distributed.fsdp import ShardingStrategy
if kwargs.get("process_group") is not None and kwargs.get("device_mesh") is not None:
raise ValueError(
"The arguments `FSDPStrategy(process_group=..., device_mesh=...)` are mutually exclusive."
"Pass only one of them."
)
strategy = ShardingStrategy[sharding_strategy.upper()] if isinstance(sharding_strategy, str) else sharding_strategy
if (
"HYBRID" in strategy.name
and kwargs.get("auto_wrap_policy") is None
and kwargs.get("process_group") is None
and kwargs.get("device_mesh") is None
):
raise RuntimeError(
"The hybrid sharding strategy requires you to pass at least one of the parameters: `auto_wrap_policy`,"
" `process_group` tuple, or `device_mesh`."
)
return strategy | null |
155,520 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _optimizer_has_flat_params(optimizer: Optimizer) -> bool:
return any(
getattr(param, "_fsdp_flattened", False) for group in optimizer.param_groups for param in group["params"]
) | null |
155,521 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _get_sharded_state_dict_context(module: Module) -> Generator[None, None, None]:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType
state_dict_config = ShardedStateDictConfig(offload_to_cpu=True)
optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True)
state_dict_type_context = FSDP.state_dict_type(
module=module,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=state_dict_config,
optim_state_dict_config=optim_state_dict_config,
)
return state_dict_type_context # type: ignore[return-value] | null |
155,522 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
_METADATA_FILENAME = "meta.pt"
The provided code snippet includes necessary dependencies for implementing the `_is_sharded_checkpoint` function. Write a Python function `def _is_sharded_checkpoint(path: Path) -> bool` to solve the following problem:
A heuristic check to determine whether the path points to a directory with checkpoint shards.
Here is the function:
def _is_sharded_checkpoint(path: Path) -> bool:
"""A heuristic check to determine whether the path points to a directory with checkpoint shards."""
return path.is_dir() and (path / _METADATA_FILENAME).is_file() | A heuristic check to determine whether the path points to a directory with checkpoint shards. |
155,523 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _has_fsdp_modules(module: object) -> TypeGuard[Module]:
from torch.distributed.fsdp import FullyShardedDataParallel
return isinstance(module, Module) and any(isinstance(m, FullyShardedDataParallel) for m in module.modules()) | null |
155,524 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _is_full_checkpoint(path: Path) -> bool:
return path.is_file()
def _load_raw_module_state(state_dict: Dict[str, Any], module: Module, world_size: int, strict: bool = True) -> None:
"""Loads the state dict into the module by gathering all weights first and then and writing back to each shard."""
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
if not isinstance(module, FSDP):
module.load_state_dict(state_dict, strict=strict)
else:
with _get_full_state_dict_context(module, world_size=world_size, rank0_only=False):
module.load_state_dict(state_dict, strict=strict)
def _lazy_load(filename: _PATH) -> Any:
if not _TORCH_GREATER_EQUAL_2_0:
raise NotImplementedError("Lazy-loading is only supported with PyTorch >= 2.0.")
if not os.path.isfile(filename):
raise FileNotFoundError(f"Path {str(filename)!r} does not exist or is not a file.")
file_reader = torch.PyTorchFileReader(str(filename))
with BytesIO(file_reader.get_record("data.pkl")) as pkl:
mup = _LazyLoadingUnpickler(pkl, file_reader)
return mup.load()
The provided code snippet includes necessary dependencies for implementing the `_load_raw_module_state_from_path` function. Write a Python function `def _load_raw_module_state_from_path(path: Path, module: Module, world_size: int, strict: bool = True) -> None` to solve the following problem:
Loads the state dict from a file path into the FSDP module.
Here is the function:
def _load_raw_module_state_from_path(path: Path, module: Module, world_size: int, strict: bool = True) -> None:
"""Loads the state dict from a file path into the FSDP module."""
if not _is_full_checkpoint(path):
raise ValueError(
"Failed to load checkpoint directly into the model. The given path must be a single file containing the"
f" full state dict: {path}"
)
# Use `lazy_load` instead of `torch.load` here to avoid storing a copy of the full checkpoint per rank
_load_raw_module_state(state_dict=_lazy_load(path), module=module, world_size=world_size, strict=strict) | Loads the state dict from a file path into the FSDP module. |
155,525 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _has_meta_device_parameters(obj: Union[Module, Optimizer]) -> bool:
if isinstance(obj, Optimizer):
return any(
t.is_meta for param_group in obj.param_groups for t in param_group["params"] if isinstance(t, Parameter)
)
if isinstance(obj, Module):
return any(t.is_meta for t in obj.parameters())
raise TypeError(f"Expected `torch.nn.Module` or `torch.optim.Optimizer`, got: {type(obj).__name__}") | null |
155,526 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
def _move_torchmetrics_to_device(module: torch.nn.Module, device: torch.device) -> None:
# FSDP doesn't move modules without parameters (e.g. Metrics) to the device
# https://github.com/pytorch/pytorch/issues/113113
if not RequirementCache("torchmetrics"):
return
from torchmetrics import Metric
for metric in (m for m in module.modules() if isinstance(m, Metric)):
metric.to(device) # `.to()` is in-place | null |
155,527 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
_TORCH_GREATER_EQUAL_2_2 = compare_version("torch", operator.ge, "2.2.0")
_TORCH_GREATER_EQUAL_2_3 = compare_version("torch", operator.ge, "2.3.0", use_base_version=True)
def _distributed_checkpoint_save(converted_state: Dict[str, Any], path: Path) -> None:
if _TORCH_GREATER_EQUAL_2_3:
from torch.distributed.checkpoint import save
# let torch automatically infer the writer to use. This might also support fsspec paths in the future
# https://github.com/pytorch/pytorch/issues/118036
save(converted_state, checkpoint_id=path) # type: ignore[call-arg]
else: # deprecated
from torch.distributed.checkpoint import FileSystemWriter
if _TORCH_GREATER_EQUAL_2_2:
from torch.distributed.checkpoint import save
else:
from torch.distributed.checkpoint import save_state_dict as save
# FSDP's FileSystemWriter streams the tensors to disk to minimize memory peaks
writer = FileSystemWriter(path=path, single_file_per_rank=True)
save(converted_state, writer) | null |
155,528 | import shutil
from contextlib import ExitStack, nullcontext
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Generator,
List,
Literal,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from lightning_utilities.core.imports import RequirementCache
from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from typing_extensions import TypeGuard, override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, Precision
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
from lightning.fabric.plugins.precision.fsdp import FSDPPrecision
from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
from lightning.fabric.strategies.parallel import ParallelStrategy
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.strategies.strategy import (
TBroadcast,
_apply_filter,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.distributed import (
ReduceOp,
_distributed_is_initialized,
_get_default_process_group_backend_for_device,
_init_dist_connection,
_sync_ddp_if_available,
)
from lightning.fabric.utilities.distributed import group as _group
from lightning.fabric.utilities.imports import (
_TORCH_GREATER_EQUAL_2_0,
_TORCH_GREATER_EQUAL_2_1,
_TORCH_GREATER_EQUAL_2_2,
_TORCH_GREATER_EQUAL_2_3,
)
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.load import _METADATA_FILENAME, _lazy_load, _materialize_tensors, _move_state_into
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.seed import reset_seed
from lightning.fabric.utilities.types import _PATH, _Stateful
_TORCH_GREATER_EQUAL_2_2 = compare_version("torch", operator.ge, "2.2.0")
_TORCH_GREATER_EQUAL_2_3 = compare_version("torch", operator.ge, "2.3.0", use_base_version=True)
def _distributed_checkpoint_load(module_state: Dict[str, Any], path: Path) -> None:
if _TORCH_GREATER_EQUAL_2_3:
from torch.distributed.checkpoint import load
# let torch automatically infer the reader to use. This might also support fsspec paths in the future
# https://github.com/pytorch/pytorch/issues/118036
load(module_state, checkpoint_id=path) # type: ignore[call-arg]
else: # deprecated
from torch.distributed.checkpoint import FileSystemReader
if _TORCH_GREATER_EQUAL_2_2:
from torch.distributed.checkpoint import load
else:
from torch.distributed.checkpoint import load_state_dict as load
reader = FileSystemReader(path=path)
load(module_state, reader) | null |
155,529 | import io
from contextlib import ExitStack, nullcontext
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Literal, Optional, Set, Tuple, Type, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
from lightning.fabric.plugins import XLAPrecision
from lightning.fabric.plugins.environments import XLAEnvironment
from lightning.fabric.plugins.io.xla import XLACheckpointIO
from lightning.fabric.strategies import ParallelStrategy, _StrategyRegistry
from lightning.fabric.strategies.fsdp import _apply_filter
from lightning.fabric.strategies.launchers.xla import _XLALauncher
from lightning.fabric.strategies.strategy import (
TBroadcast,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.cloud_io import get_filesystem
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.types import _PATH, Optimizable, ReduceOp
def _auto_wrap_policy_kwargs(policy: Optional["_POLICY"], kwargs: Dict) -> Dict:
if policy is None:
return kwargs
if isinstance(policy, set):
from torch_xla.distributed.fsdp.wrap import transformer_auto_wrap_policy
# this is not transformer specific despite the name
policy = partial(transformer_auto_wrap_policy, transformer_layer_cls=policy)
kwargs["auto_wrap_policy"] = policy
return kwargs | null |
155,530 | import io
from contextlib import ExitStack, nullcontext
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Literal, Optional, Set, Tuple, Type, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from typing_extensions import override
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
from lightning.fabric.plugins import XLAPrecision
from lightning.fabric.plugins.environments import XLAEnvironment
from lightning.fabric.plugins.io.xla import XLACheckpointIO
from lightning.fabric.strategies import ParallelStrategy, _StrategyRegistry
from lightning.fabric.strategies.fsdp import _apply_filter
from lightning.fabric.strategies.launchers.xla import _XLALauncher
from lightning.fabric.strategies.strategy import (
TBroadcast,
_BackwardSyncControl,
_Sharded,
_validate_keys_for_strict_loading,
)
from lightning.fabric.utilities.cloud_io import get_filesystem
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
from lightning.fabric.utilities.types import _PATH, Optimizable, ReduceOp
_POLICY_SET = Set[Type[Module]]
def _activation_checkpointing_auto_wrapper(policy: _POLICY_SET, module: Module, *args: Any, **kwargs: Any) -> Module:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
from torch_xla.distributed.fsdp import checkpoint_module
module = checkpoint_module(module) if isinstance(module, tuple(policy)) else module
return XLAFSDP(module, *args, **kwargs)
def _activation_checkpointing_kwargs(policy: Optional[_POLICY_SET], kwargs: Dict) -> Dict:
if not policy:
return kwargs
if "auto_wrapper_callable" in kwargs:
raise ValueError(
"You cannot set both `auto_wrapper_callable` and `activation_checkpointing_policy`. Choose one"
)
if not isinstance(policy, set):
raise TypeError(
f"`activation_checkpointing_policy` must be a set, found {policy}. You can try defining and"
" passing `auto_wrapper_callable` instead."
)
auto_wrapper_callable = partial(_activation_checkpointing_auto_wrapper, policy)
kwargs["auto_wrapper_callable"] = auto_wrapper_callable
return kwargs | null |
155,531 | import logging
from abc import ABC, abstractmethod
from contextlib import ExitStack
from typing import Any, Callable, ContextManager, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO
from lightning.fabric.plugins.io.torch_io import TorchCheckpointIO
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.types import _PATH, Optimizable, ReduceOp, _Stateful
def _validate_keys_for_strict_loading(
requested_keys: Iterable[str], checkpoint_keys: Iterable[str], strict: bool
) -> None:
invalid_keys = [k for k in requested_keys if k not in checkpoint_keys]
if strict and invalid_keys:
raise KeyError(
f"The requested state contains a key '{invalid_keys[0]}' that does not exist in the loaded checkpoint."
f" To disable strict loading, set `strict=False`."
) | null |
155,532 | import logging
from abc import ABC, abstractmethod
from contextlib import ExitStack
from typing import Any, Callable, ContextManager, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO
from lightning.fabric.plugins.io.torch_io import TorchCheckpointIO
from lightning.fabric.plugins.precision import Precision
from lightning.fabric.strategies.launchers.launcher import _Launcher
from lightning.fabric.strategies.registry import _StrategyRegistry
from lightning.fabric.utilities.apply_func import move_data_to_device
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.init import _EmptyInit
from lightning.fabric.utilities.types import _PATH, Optimizable, ReduceOp, _Stateful
def _apply_filter(
key: str, filter: Dict[str, Callable[[str, Any], bool]], source_dict: object, target_dict: Dict[str, Any]
) -> None:
# filter out if necessary
if key in filter and isinstance(source_dict, dict):
filter_fn = filter[key]
for k, v in source_dict.items():
if filter_fn(k, v):
# save the state
target_dict.setdefault(key, {})
target_dict[key][k] = v
else:
# save the state
target_dict[key] = source_dict | null |
155,533 | import sys
from typing import Any
import lightning.fabric as fabric
from lightning.fabric.accelerators import XLAAccelerator
from lightning.fabric.plugins.precision import XLAPrecision
from lightning.fabric.strategies import _StrategyRegistry
from lightning.fabric.strategies.single_xla import SingleDeviceXLAStrategy
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation
def _patch_sys_modules() -> None:
self = sys.modules[__name__]
sys.modules["lightning.fabric.strategies.single_tpu"] = self
sys.modules["lightning.fabric.accelerators.tpu"] = self
sys.modules["lightning.fabric.plugins.precision.tpu"] = self
sys.modules["lightning.fabric.plugins.precision.tpu_bf16"] = self
sys.modules["lightning.fabric.plugins.precision.xlabf16"] = self | null |
155,534 | import sys
from typing import Any
import lightning.fabric as fabric
from lightning.fabric.accelerators import XLAAccelerator
from lightning.fabric.plugins.precision import XLAPrecision
from lightning.fabric.strategies import _StrategyRegistry
from lightning.fabric.strategies.single_xla import SingleDeviceXLAStrategy
from lightning.fabric.utilities.rank_zero import rank_zero_deprecation
class SingleTPUStrategy(SingleDeviceXLAStrategy):
"""Legacy class.
Use :class:`~lightning.fabric.strategies.single_xla.SingleDeviceXLAStrategy` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation("The 'single_tpu' strategy is deprecated. Use 'single_xla' instead.")
super().__init__(*args, **kwargs)
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
if "single_tpu" not in strategy_registry:
strategy_registry.register("single_tpu", cls, description="Legacy class. Use `single_xla` instead.")
class TPUAccelerator(XLAAccelerator):
"""Legacy class.
Use :class:`~lightning.fabric.accelerators.xla.XLAAccelerator` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUAccelerator` class is deprecated. Use `lightning.fabric.accelerators.XLAAccelerator` instead."
)
super().__init__(*args, **kwargs)
class TPUPrecision(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.fabric.plugins.precision.xla.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUPrecision` class is deprecated. Use `lightning.fabric.plugins.precision.XLAPrecision`" " instead."
)
super().__init__(precision="32-true")
class XLABf16Precision(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.fabric.plugins.precision.xla.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `XLABf16Precision` class is deprecated. Use"
" `lightning.fabric.plugins.precision.XLAPrecision` instead."
)
super().__init__(precision="bf16-true")
class TPUBf16Precision(XLABf16Precision):
"""Legacy class.
Use :class:`~lightning.fabric.plugins.precision.xla.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUBf16Precision` class is deprecated. Use"
" `lightning.fabric.plugins.precision.XLAPrecision` instead."
)
super().__init__(*args, **kwargs)
SingleTPUStrategy.register_strategies(fabric.strategies.STRATEGY_REGISTRY)
def _patch_classes() -> None:
setattr(fabric.strategies, "SingleTPUStrategy", SingleTPUStrategy)
setattr(fabric.accelerators, "TPUAccelerator", TPUAccelerator)
setattr(fabric.plugins, "TPUPrecision", TPUPrecision)
setattr(fabric.plugins.precision, "TPUPrecision", TPUPrecision)
setattr(fabric.plugins, "TPUBf16Precision", TPUBf16Precision)
setattr(fabric.plugins.precision, "TPUBf16Precision", TPUBf16Precision)
setattr(fabric.plugins, "XLABf16Precision", XLABf16Precision)
setattr(fabric.plugins.precision, "XLABf16Precision", XLABf16Precision) | null |
155,535 | import os
import socket
from typing_extensions import override
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.utilities.rank_zero import rank_zero_only
The provided code snippet includes necessary dependencies for implementing the `find_free_network_port` function. Write a Python function `def find_free_network_port() -> int` to solve the following problem:
Finds a free port on localhost. It is useful in single-node training when we don't want to connect to a real main node but have to set the `MASTER_PORT` environment variable.
Here is the function:
def find_free_network_port() -> int:
"""Finds a free port on localhost.
It is useful in single-node training when we don't want to connect to a real main node but have to set the
`MASTER_PORT` environment variable.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port | Finds a free port on localhost. It is useful in single-node training when we don't want to connect to a real main node but have to set the `MASTER_PORT` environment variable. |
155,536 | import logging
import os
import re
import shutil
import signal
import sys
from typing import Optional
from typing_extensions import override
from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
from lightning.fabric.utilities.imports import _IS_WINDOWS
from lightning.fabric.utilities.rank_zero import rank_zero_warn
from lightning.fabric.utilities.warnings import PossibleUserWarning
def _is_slurm_interactive_mode() -> bool:
return SLURMEnvironment.job_name() in ("bash", "interactive")
def _is_srun_used() -> bool:
return "SLURM_NTASKS" in os.environ and not _is_slurm_interactive_mode() | null |
155,537 | from typing import Any, Mapping, Type, Union
import torch
from torch import Tensor
def _convert_fp_tensor(tensor: Tensor, dst_type: Union[str, torch.dtype]) -> Tensor:
return tensor.to(dst_type) if torch.is_floating_point(tensor) else tensor | null |
155,538 | import logging
from contextlib import ExitStack
from typing import TYPE_CHECKING, Any, ContextManager, Literal, Mapping, Optional, Union
import torch
from lightning_utilities import apply_to_collection
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from typing_extensions import override
from lightning.fabric.plugins.precision.precision import Precision
from lightning.fabric.plugins.precision.utils import (
_ClassReplacementContextManager,
_convert_fp_tensor,
_DtypeContextManager,
)
from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_warn
log = logging.getLogger(__name__)
def _convert_layers(module: torch.nn.Module) -> None:
import transformer_engine.pytorch as te
for name, child in module.named_children():
if isinstance(child, torch.nn.Linear):
if child.in_features % 8 != 0 or child.out_features % 16 != 0:
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html#FP8-autocasting
rank_zero_warn(
"Support for FP8 in the linear layers with this plugin is currently limited to"
" tensors with shapes where the dimensions are divisible by 8 and 16 respectively."
f" The layer {name!r} does not fit this criteria. You might want to add padding to your inputs."
)
continue
has_bias = child.bias is not None
replacement = te.Linear(child.in_features, child.out_features, bias=has_bias)
replacement.weight.data = child.weight.data.clone()
if has_bias:
replacement.bias.data = child.bias.data.clone()
log.debug(f"Replacing layer {name!r} with Transformer Engine equivalent")
module.__setattr__(name, replacement)
elif isinstance(child, torch.nn.LayerNorm):
replacement = te.LayerNorm(child.normalized_shape[0], eps=child.eps)
replacement.weight.data = child.weight.data.clone()
replacement.bias.data = child.bias.data.clone()
log.debug(f"Replacing layer {name!r} with Transformer Engine equivalent")
module.__setattr__(name, replacement)
else:
# there are other transformer engine layers that we could convert but require fusion. full list at:
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/pytorch.html
_convert_layers(child) | null |
155,539 | from typing import Any, ContextManager, Dict, Literal, Optional
import torch
from lightning_utilities.core.apply_func import apply_to_collection
from torch import Tensor
from torch.nn import Module
from torch.optim import LBFGS, Optimizer
from typing_extensions import override
from lightning.fabric.accelerators.cuda import _patch_cuda_is_available
from lightning.fabric.plugins.precision.precision import Precision
from lightning.fabric.plugins.precision.utils import _convert_fp_tensor
from lightning.fabric.utilities.types import Optimizable
The provided code snippet includes necessary dependencies for implementing the `_optimizer_handles_unscaling` function. Write a Python function `def _optimizer_handles_unscaling(optimizer: Any) -> bool` to solve the following problem:
Determines whether a PyTorch optimizer handles unscaling gradients in the step method rather than through the :class:`torch.cuda.amp.GradScaler`. Since, the current implementation of this function checks a PyTorch internal variable on the optimizer, the return value will only be reliable for built-in PyTorch optimizers.
Here is the function:
def _optimizer_handles_unscaling(optimizer: Any) -> bool:
"""Determines whether a PyTorch optimizer handles unscaling gradients in the step method rather than through the
:class:`torch.cuda.amp.GradScaler`.
Since, the current implementation of this function checks a PyTorch internal variable on the optimizer, the return
value will only be reliable for built-in PyTorch optimizers.
"""
return getattr(optimizer, "_step_supports_amp_scaling", False) | Determines whether a PyTorch optimizer handles unscaling gradients in the step method rather than through the :class:`torch.cuda.amp.GradScaler`. Since, the current implementation of this function checks a PyTorch internal variable on the optimizer, the return value will only be reliable for built-in PyTorch optimizers. |
155,540 | import functools
import logging
import math
import os
import warnings
from contextlib import ExitStack
from functools import partial
from types import ModuleType
from typing import Any, Callable, ContextManager, Literal, Optional, OrderedDict, Set, Tuple, Type, cast
import torch
from lightning_utilities import apply_to_collection
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from torch.nn import init
from torch.nn.modules.module import _IncompatibleKeys
from typing_extensions import Self, override
from lightning.fabric.plugins.precision.precision import Precision
from lightning.fabric.plugins.precision.utils import (
_ClassReplacementContextManager,
_convert_fp_tensor,
_DtypeContextManager,
)
from lightning.fabric.utilities.types import _DEVICE
log = logging.getLogger(__name__)
def _replace_param(
param: torch.nn.Parameter, data: torch.Tensor, quant_state: Optional[Tuple] = None
) -> torch.nn.Parameter:
bnb = _import_bitsandbytes()
# doing `param.data = weight` raises a RuntimeError if param.data was on meta-device, so
# we need to re-create the parameters instead of overwriting the data
if param.device.type == "meta":
if isinstance(param, bnb.nn.Params4bit):
return bnb.nn.Params4bit(
data,
requires_grad=data.requires_grad,
quant_state=quant_state,
compress_statistics=param.compress_statistics,
quant_type=param.quant_type,
)
return torch.nn.Parameter(data, requires_grad=data.requires_grad)
param.data = data
if isinstance(param, bnb.nn.Params4bit):
param.quant_state = quant_state
return param
def _convert_layers(module: torch.nn.Module, linear_cls: Type, ignore_modules: Set[str], prefix: str = "") -> None:
for name, child in module.named_children():
fullname = f"{prefix}.{name}" if prefix else name
if isinstance(child, torch.nn.Linear) and not any(fullname.startswith(s) for s in ignore_modules):
log.debug(f"Replacing layer {fullname!r} with bitsandbytes equivalent")
has_bias = child.bias is not None
# since we are going to copy over the child's data, the device doesn't matter. I chose CPU
# to avoid spiking CUDA memory even though initialization is slower
# 4bit layers support quantizing from meta-device params so this is only relevant for 8-bit
_Linear4bit = globals()["_Linear4bit"]
device = torch.device("meta" if issubclass(linear_cls, _Linear4bit) else "cpu")
replacement = linear_cls(
child.in_features,
child.out_features,
bias=has_bias,
device=device,
)
if has_bias:
replacement.bias = _replace_param(replacement.bias, child.bias.data.clone())
state = {"quant_state": replacement.weight.quant_state if issubclass(linear_cls, _Linear4bit) else None}
replacement.weight = _replace_param(replacement.weight, child.weight.data.clone(), **state)
module.__setattr__(name, replacement)
else:
_convert_layers(child, linear_cls, ignore_modules, prefix=fullname) | null |
155,541 | from typing import List, Union
import torch
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
The provided code snippet includes necessary dependencies for implementing the `_parse_cpu_cores` function. Write a Python function `def _parse_cpu_cores(cpu_cores: Union[int, str, List[int]]) -> int` to solve the following problem:
Parses the cpu_cores given in the format as accepted by the ``devices`` argument in the :class:`~lightning.pytorch.trainer.trainer.Trainer`. Args: cpu_cores: An int > 0. Returns: An int representing the number of processes Raises: MisconfigurationException: If cpu_cores is not an int > 0
Here is the function:
def _parse_cpu_cores(cpu_cores: Union[int, str, List[int]]) -> int:
"""Parses the cpu_cores given in the format as accepted by the ``devices`` argument in the
:class:`~lightning.pytorch.trainer.trainer.Trainer`.
Args:
cpu_cores: An int > 0.
Returns:
An int representing the number of processes
Raises:
MisconfigurationException:
If cpu_cores is not an int > 0
"""
if isinstance(cpu_cores, str) and cpu_cores.strip().isdigit():
cpu_cores = int(cpu_cores)
if not isinstance(cpu_cores, int) or cpu_cores <= 0:
raise TypeError("`devices` selected with `CPUAccelerator` should be an int > 0.")
return cpu_cores | Parses the cpu_cores given in the format as accepted by the ``devices`` argument in the :class:`~lightning.pytorch.trainer.trainer.Trainer`. Args: cpu_cores: An int > 0. Returns: An int representing the number of processes Raises: MisconfigurationException: If cpu_cores is not an int > 0 |
155,542 | import functools
from typing import Any, List, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.device_parser import _check_data_type
_XLA_GREATER_EQUAL_2_1 = RequirementCache("torch_xla>=2.1")
def _using_pjrt() -> bool:
# delete me when torch_xla 2.2 is the min supported version, where XRT support has been dropped.
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.using_pjrt()
from torch_xla.experimental import pjrt
return pjrt.using_pjrt() | null |
155,543 | import functools
from typing import Any, List, Union
import torch
from lightning_utilities.core.imports import RequirementCache
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.device_parser import _check_data_type
def _check_tpu_devices_valid(devices: object) -> None:
device_count = XLAAccelerator.auto_device_count()
if (
# support number of devices
isinstance(devices, int)
and devices in {1, device_count}
# support picking a specific device
or isinstance(devices, (list, tuple))
and len(devices) == 1
and 0 <= devices[0] <= device_count - 1
):
return
raise ValueError(
f"`devices` can only be 'auto', 1, {device_count} or [<0-{device_count - 1}>] for TPUs. Got {devices!r}"
)
def _parse_tpu_devices_str(devices: str) -> Union[int, List[int]]:
devices = devices.strip()
try:
return int(devices)
except ValueError:
try:
return [int(x.strip()) for x in devices.split(",") if len(x) > 0]
except ValueError:
raise ValueError(f"Could not parse the selected TPU devices: {devices!r}")
def _check_data_type(device_ids: object) -> None:
"""Checks that the device_ids argument is one of the following: int, string, or sequence of integers.
Args:
device_ids: gpus/tpu_cores parameter as passed to the Trainer
Raises:
TypeError:
If ``device_ids`` of GPU/TPUs aren't ``int``, ``str`` or sequence of ``int```
"""
msg = "Device IDs (GPU/TPU) must be an int, a string, a sequence of ints, but you passed"
if device_ids is None:
raise TypeError(f"{msg} None")
if isinstance(device_ids, (MutableSequence, tuple)):
for id_ in device_ids:
id_type = type(id_) # because `isinstance(False, int)` -> True
if id_type is not int:
raise TypeError(f"{msg} a sequence of {type(id_).__name__}.")
elif type(device_ids) not in (int, str):
raise TypeError(f"{msg} {device_ids!r}.")
The provided code snippet includes necessary dependencies for implementing the `_parse_tpu_devices` function. Write a Python function `def _parse_tpu_devices(devices: Union[int, str, List[int]]) -> Union[int, List[int]]` to solve the following problem:
Parses the TPU devices given in the format as accepted by the :class:`~lightning.pytorch.trainer.trainer.Trainer` and :class:`~lightning.fabric.Fabric`. Args: devices: An int of 1 or string '1' indicates that 1 core with multi-processing should be used An int 8 or string '8' indicates that all 8 cores with multi-processing should be used A single element list of int or string can be used to indicate the specific TPU core to use. Returns: A list of tpu cores to be used.
Here is the function:
def _parse_tpu_devices(devices: Union[int, str, List[int]]) -> Union[int, List[int]]:
"""Parses the TPU devices given in the format as accepted by the
:class:`~lightning.pytorch.trainer.trainer.Trainer` and :class:`~lightning.fabric.Fabric`.
Args:
devices: An int of 1 or string '1' indicates that 1 core with multi-processing should be used
An int 8 or string '8' indicates that all 8 cores with multi-processing should be used
A single element list of int or string can be used to indicate the specific TPU core to use.
Returns:
A list of tpu cores to be used.
"""
_check_data_type(devices)
if isinstance(devices, str):
devices = _parse_tpu_devices_str(devices)
_check_tpu_devices_valid(devices)
return devices | Parses the TPU devices given in the format as accepted by the :class:`~lightning.pytorch.trainer.trainer.Trainer` and :class:`~lightning.fabric.Fabric`. Args: devices: An int of 1 or string '1' indicates that 1 core with multi-processing should be used An int 8 or string '8' indicates that all 8 cores with multi-processing should be used A single element list of int or string can be used to indicate the specific TPU core to use. Returns: A list of tpu cores to be used. |
155,544 | from typing import Any, Callable, Dict, List, Optional
from typing_extensions import override
from lightning.fabric.utilities.exceptions import MisconfigurationException
from lightning.fabric.utilities.registry import _register_classes
class _AcceleratorRegistry(dict):
"""This class is a Registry that stores information about the Accelerators.
The Accelerators are mapped to strings. These strings are names that identify
an accelerator, e.g., "gpu". It also returns Optional description and
parameters to initialize the Accelerator, which were defined during the
registration.
The motivation for having a AcceleratorRegistry is to make it convenient
for the Users to try different accelerators by passing mapped aliases
to the accelerator flag to the Trainer.
Example::
class SOTAAccelerator(Accelerator):
def __init__(self, a, b):
...
or
AcceleratorRegistry.register("sota", SOTAAccelerator, description="Custom sota accelerator", a=1, b=True)
"""
def register(
self,
name: str,
accelerator: Optional[Callable] = None,
description: str = "",
override: bool = False,
**init_params: Any,
) -> Callable:
"""Registers a accelerator mapped to a name and with required metadata.
Args:
name : the name that identifies a accelerator, e.g. "gpu"
accelerator : accelerator class
description : accelerator description
override : overrides the registered accelerator, if True
init_params: parameters to initialize the accelerator
"""
if not (name is None or isinstance(name, str)):
raise TypeError(f"`name` must be a str, found {name}")
if name in self and not override:
raise MisconfigurationException(f"'{name}' is already present in the registry. HINT: Use `override=True`.")
data: Dict[str, Any] = {}
data["description"] = description
data["init_params"] = init_params
def do_register(name: str, accelerator: Callable) -> Callable:
data["accelerator"] = accelerator
data["accelerator_name"] = name
self[name] = data
return accelerator
if accelerator is not None:
return do_register(name, accelerator)
return do_register
def get(self, name: str, default: Optional[Any] = None) -> Any:
"""Calls the registered accelerator with the required parameters and returns the accelerator object.
Args:
name (str): the name that identifies a accelerator, e.g. "gpu"
"""
if name in self:
data = self[name]
return data["accelerator"](**data["init_params"])
if default is not None:
return default
err_msg = "'{}' not found in registry. Available names: {}"
available_names = self.available_accelerators()
raise KeyError(err_msg.format(name, available_names))
def remove(self, name: str) -> None:
"""Removes the registered accelerator by name."""
self.pop(name)
def available_accelerators(self) -> List[str]:
"""Returns a list of registered accelerators."""
return list(self.keys())
def __str__(self) -> str:
return "Registered Accelerators: {}".format(", ".join(self.available_accelerators()))
def _register_classes(registry: Any, method: str, module: ModuleType, parent: Type[object]) -> None:
for _, member in getmembers(module, isclass):
if issubclass(member, parent) and is_overridden(method, member, parent):
register_fn = getattr(member, method)
register_fn(registry)
class Accelerator(ABC):
"""The Accelerator base class.
An Accelerator is meant to deal with one type of hardware.
.. warning:: Writing your own accelerator is an :ref:`experimental <versioning:Experimental API>` feature.
"""
def setup_device(self, device: torch.device) -> None:
"""Create and prepare the device for the current process."""
def teardown(self) -> None:
"""Clean up any state created by the accelerator."""
def parse_devices(devices: Any) -> Any:
"""Accelerator device parsing logic."""
def get_parallel_devices(devices: Any) -> Any:
"""Gets parallel devices for the Accelerator."""
def auto_device_count() -> int:
"""Get the device count when set to auto."""
def is_available() -> bool:
"""Detect if the hardware is available."""
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
pass
The provided code snippet includes necessary dependencies for implementing the `call_register_accelerators` function. Write a Python function `def call_register_accelerators(registry: _AcceleratorRegistry, base_module: str) -> None` to solve the following problem:
Legacy. Do not use.
Here is the function:
def call_register_accelerators(registry: _AcceleratorRegistry, base_module: str) -> None: # pragma: no-cover
"""Legacy.
Do not use.
"""
import importlib
module = importlib.import_module(base_module)
from lightning.fabric.accelerators.accelerator import Accelerator
_register_classes(registry, "register_accelerators", module, Accelerator) | Legacy. Do not use. |
155,545 | import os
import warnings
from contextlib import contextmanager
from functools import lru_cache
from typing import Generator, List, Optional, Union, cast
import torch
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.rank_zero import rank_zero_info
def _get_all_visible_cuda_devices() -> List[int]:
"""Returns a list of all visible CUDA GPU devices.
Devices masked by the environment variabale ``CUDA_VISIBLE_DEVICES`` won't be returned here. For example, assume you
have 8 physical GPUs. If ``CUDA_VISIBLE_DEVICES="1,3,6"``, then this function will return the list ``[0, 1, 2]``
because these are the three visible GPUs after applying the mask ``CUDA_VISIBLE_DEVICES``.
"""
return list(range(num_cuda_devices()))
The provided code snippet includes necessary dependencies for implementing the `find_usable_cuda_devices` function. Write a Python function `def find_usable_cuda_devices(num_devices: int = -1) -> List[int]` to solve the following problem:
Returns a list of all available and usable CUDA GPU devices. A GPU is considered usable if we can successfully move a tensor to the device, and this is what this function tests for each GPU on the system until the target number of usable devices is found. A subset of GPUs on the system might be used by other processes, and if the GPU is configured to operate in 'exclusive' mode (configurable by the admin), then only one process is allowed to occupy it. Args: num_devices: The number of devices you want to request. By default, this function will return as many as there are usable CUDA GPU devices available. Warning: If multiple processes call this function at the same time, there can be race conditions in the case where both processes determine that the device is unoccupied, leading into one of them crashing later on.
Here is the function:
def find_usable_cuda_devices(num_devices: int = -1) -> List[int]:
"""Returns a list of all available and usable CUDA GPU devices.
A GPU is considered usable if we can successfully move a tensor to the device, and this is what this function
tests for each GPU on the system until the target number of usable devices is found.
A subset of GPUs on the system might be used by other processes, and if the GPU is configured to operate in
'exclusive' mode (configurable by the admin), then only one process is allowed to occupy it.
Args:
num_devices: The number of devices you want to request. By default, this function will return as many as there
are usable CUDA GPU devices available.
Warning:
If multiple processes call this function at the same time, there can be race conditions in the case where
both processes determine that the device is unoccupied, leading into one of them crashing later on.
"""
if num_devices == 0:
return []
visible_devices = _get_all_visible_cuda_devices()
if not visible_devices:
raise ValueError(
f"You requested to find {num_devices} devices but there are no visible CUDA devices on this machine."
)
if num_devices > len(visible_devices):
raise ValueError(
f"You requested to find {num_devices} devices but this machine only has {len(visible_devices)} GPUs."
)
available_devices = []
unavailable_devices = []
for gpu_idx in visible_devices:
try:
torch.tensor(0, device=torch.device("cuda", gpu_idx))
except RuntimeError:
unavailable_devices.append(gpu_idx)
continue
available_devices.append(gpu_idx)
if len(available_devices) == num_devices:
# exit early if we found the right number of GPUs
break
if num_devices != -1 and len(available_devices) != num_devices:
raise RuntimeError(
f"You requested to find {num_devices} devices but only {len(available_devices)} are currently available."
f" The devices {unavailable_devices} are occupied by other processes and can't be used at the moment."
)
return available_devices | Returns a list of all available and usable CUDA GPU devices. A GPU is considered usable if we can successfully move a tensor to the device, and this is what this function tests for each GPU on the system until the target number of usable devices is found. A subset of GPUs on the system might be used by other processes, and if the GPU is configured to operate in 'exclusive' mode (configurable by the admin), then only one process is allowed to occupy it. Args: num_devices: The number of devices you want to request. By default, this function will return as many as there are usable CUDA GPU devices available. Warning: If multiple processes call this function at the same time, there can be race conditions in the case where both processes determine that the device is unoccupied, leading into one of them crashing later on. |
155,546 | import os
import warnings
from contextlib import contextmanager
from functools import lru_cache
from typing import Generator, List, Optional, Union, cast
import torch
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.rank_zero import rank_zero_info
def is_cuda_available() -> bool:
"""Returns a bool indicating if CUDA is currently available.
Unlike :func:`torch.cuda.is_available`, this function does its best not to create a CUDA context for fork support,
if the platform allows it.
"""
# We set `PYTORCH_NVML_BASED_CUDA_CHECK=1` in lightning.fabric.__init__.py
return torch.cuda.is_available() if _TORCH_GREATER_EQUAL_2_0 else num_cuda_devices() > 0
def _device_count_nvml() -> int:
"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account.
Negative value is returned if NVML discovery or initialization has failed.
"""
visible_devices = _parse_visible_devices()
if not visible_devices:
return 0
try:
if isinstance(visible_devices[0], str):
# Skip MIG parsing
if visible_devices[0].startswith("MIG-"):
return -1
uuids = _raw_device_uuid_nvml()
if uuids is None:
return -1
visible_devices = _transform_uuid_to_ordinals(cast(List[str], visible_devices), uuids)
else:
raw_cnt = _raw_device_count_nvml()
if raw_cnt <= 0:
return raw_cnt
# Trim the list up to a maximum available device
for idx, val in enumerate(visible_devices):
if cast(int, val) >= raw_cnt:
return idx
except (OSError, AttributeError):
return -1
return len(visible_devices)
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
The provided code snippet includes necessary dependencies for implementing the `_patch_cuda_is_available` function. Write a Python function `def _patch_cuda_is_available() -> Generator` to solve the following problem:
Context manager that safely patches :func:`torch.cuda.is_available` with its NVML-based version if possible.
Here is the function:
def _patch_cuda_is_available() -> Generator:
"""Context manager that safely patches :func:`torch.cuda.is_available` with its NVML-based version if possible."""
if hasattr(torch._C, "_cuda_getDeviceCount") and _device_count_nvml() >= 0 and not _TORCH_GREATER_EQUAL_2_0:
# we can safely patch is_available if both torch has CUDA compiled and the NVML count is succeeding
# otherwise, patching is_available could lead to attribute errors or infinite recursion
orig_check = torch.cuda.is_available
torch.cuda.is_available = is_cuda_available
try:
yield
finally:
torch.cuda.is_available = orig_check
else:
yield | Context manager that safely patches :func:`torch.cuda.is_available` with its NVML-based version if possible. |
155,547 | import os
import warnings
from contextlib import contextmanager
from functools import lru_cache
from typing import Generator, List, Optional, Union, cast
import torch
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.rank_zero import rank_zero_info
def _is_ampere_or_later(device: Optional[torch.device] = None) -> bool: # Ampere and later leverage tensor cores, where this setting becomes useful
def _check_cuda_matmul_precision(device: torch.device) -> None:
if not torch.cuda.is_available() or not _is_ampere_or_later(device):
return
# check that the user hasn't changed the precision already, this works for both `allow_tf32 = True` and
# `set_float32_matmul_precision`
if torch.get_float32_matmul_precision() == "highest": # default
rank_zero_info(
f"You are using a CUDA device ({torch.cuda.get_device_name(device)!r}) that has Tensor Cores. To properly"
" utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off"
" precision for performance. For more details, read https://pytorch.org/docs/stable/generated/"
"torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision"
)
# note: no need change `torch.backends.cudnn.allow_tf32` as it's enabled by default:
# https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices | null |
155,548 | import os
import warnings
from contextlib import contextmanager
from functools import lru_cache
from typing import Generator, List, Optional, Union, cast
import torch
from typing_extensions import override
from lightning.fabric.accelerators.accelerator import Accelerator
from lightning.fabric.accelerators.registry import _AcceleratorRegistry
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0
from lightning.fabric.utilities.rank_zero import rank_zero_info
_TORCH_GREATER_EQUAL_2_0 = compare_version("torch", operator.ge, "2.0.0")
def _clear_cuda_memory() -> None:
# strangely, the attribute function be undefined when torch.compile is used
if _TORCH_GREATER_EQUAL_2_0 and hasattr(torch._C, "_cuda_clearCublasWorkspaces"):
# https://github.com/pytorch/pytorch/issues/95668
torch._C._cuda_clearCublasWorkspaces()
torch.cuda.empty_cache() | null |
155,549 | import asyncio
import inspect
import time
from copy import deepcopy
from dataclasses import dataclass
from functools import wraps
from multiprocessing import Queue
from typing import Any, Callable, Dict, List, Optional
from uuid import uuid4
from fastapi import FastAPI, HTTPException, Request, status
from lightning_utilities.core.apply_func import apply_to_collection
from lightning.app.api.request_types import _APIRequest, _CommandRequest, _RequestResponse
from lightning.app.utilities.app_helpers import Logger
def _signature_proxy_function():
pass | null |
155,550 | import asyncio
import inspect
import time
from copy import deepcopy
from dataclasses import dataclass
from functools import wraps
from multiprocessing import Queue
from typing import Any, Callable, Dict, List, Optional
from uuid import uuid4
from fastapi import FastAPI, HTTPException, Request, status
from lightning_utilities.core.apply_func import apply_to_collection
from lightning.app.api.request_types import _APIRequest, _CommandRequest, _RequestResponse
from lightning.app.utilities.app_helpers import Logger
class _FastApiMockRequest:
"""This class is meant to mock FastAPI Request class that isn't pickle-able.
If a user relies on FastAPI Request annotation, the Lightning framework
patches the annotation before pickling and replace them right after.
Finally, the FastAPI request is converted back to the _FastApiMockRequest
before being delivered to the users.
Example:
from lightning.app import LightningFlow
from fastapi import Request
from lightning.app.api import Post
class Flow(LightningFlow):
def request(self, request: Request) -> OutputRequestModel:
...
def configure_api(self):
return [Post("/api/v1/request", self.request)]
"""
_body: Optional[str] = None
_json: Optional[str] = None
_method: Optional[str] = None
_headers: Optional[Dict] = None
def receive(self):
raise NotImplementedError
def method(self):
return self._method
def headers(self):
return self._headers
def body(self):
return self._body
def json(self):
return self._json
def stream(self):
raise NotImplementedError
def form(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def is_disconnected(self):
raise NotImplementedError
async def _mock_fastapi_request(request: Request):
# TODO: Add more requests parameters.
return _FastApiMockRequest(
_body=await request.body(),
_json=await request.json(),
_headers=request.headers,
_method=request.method,
) | null |
155,551 | import typing as t
from lightning.app.utilities.app_helpers import _LightningAppRef, _set_child_name
def _prepare_name(component: "Component") -> str:
return str(component.name.split(".")[-1]) | null |
155,553 | import os
import shutil
import tarfile
import tempfile
from pathlib import Path
from typing import Any, Dict
from urllib.parse import urlparse
import requests
import uvicorn
from fastapi import FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from lightning_cloud.openapi import Externalv1LightningappInstance
from pydantic import BaseModel
from lightning.app.utilities.app_helpers import Logger
from lightning.app.utilities.component import _set_flow_context
from lightning.app.utilities.enum import AppStage
from lightning.app.utilities.load_app import _load_plugin_from_file
def _run_plugin(run: _Run) -> Dict[str, Any]:
from lightning.app.runners.cloud import _to_clean_dict
"""Create a run with the given name and entrypoint under the cloudspace with the given ID."""
with tempfile.TemporaryDirectory() as tmpdir:
download_path = os.path.join(tmpdir, "source.tar.gz")
source_path = os.path.join(tmpdir, "source")
os.makedirs(source_path)
# Download the tarball
try:
logger.info(f"Downloading plugin source: {run.source_code_url}")
# Sometimes the URL gets encoded, so we parse it here
source_code_url = urlparse(run.source_code_url).geturl()
response = requests.get(source_code_url)
# TODO: Backoff retry a few times in case the URL is flaky
response.raise_for_status()
with open(download_path, "wb") as f:
f.write(response.content)
except Exception as ex:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error downloading plugin source: {str(ex)}.",
)
# Extract
try:
logger.info("Extracting plugin source.")
with tarfile.open(download_path, "r:gz") as tf:
tf.extractall(source_path) # noqa: S202
except Exception as ex:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error extracting plugin source: {str(ex)}.",
)
# Import the plugin
try:
logger.info(f"Importing plugin: {run.plugin_entrypoint}")
plugin = _load_plugin_from_file(os.path.join(source_path, run.plugin_entrypoint))
except Exception as ex:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Error loading plugin: {str(ex)}."
)
# Allow devs to add files to the app source
if os.path.isdir(_PLUGIN_INTERNAL_DIR_PATH):
shutil.copytree(_PLUGIN_INTERNAL_DIR_PATH, source_path, dirs_exist_ok=True)
# Ensure that apps are dispatched from the temp directory
cwd = os.getcwd()
os.chdir(source_path)
# Setup and run the plugin
try:
logger.info(
"Running plugin. "
f"project_id: {run.project_id}, cloudspace_id: {run.cloudspace_id}, cluster_id: {run.cluster_id}."
)
plugin._setup(
project_id=run.project_id,
cloudspace_id=run.cloudspace_id,
cluster_id=run.cluster_id,
source_app=run.source_app,
keep_machines_after_stop=run.keep_machines_after_stop,
)
app_instance = plugin.run(**run.plugin_arguments)
return _to_clean_dict(app_instance, True)
except Exception as ex:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Error running plugin: {str(ex)}."
)
finally:
os.chdir(cwd)
async def _healthz() -> Dict[str, str]:
"""Health check endpoint."""
return {"status": "ok"}
The provided code snippet includes necessary dependencies for implementing the `_start_plugin_server` function. Write a Python function `def _start_plugin_server(port: int) -> None` to solve the following problem:
Start the plugin server which can be used to dispatch apps or run plugins.
Here is the function:
def _start_plugin_server(port: int) -> None:
"""Start the plugin server which can be used to dispatch apps or run plugins."""
fastapi_service = FastAPI()
fastapi_service.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
fastapi_service.post("/v1/runs")(_run_plugin)
fastapi_service.get("/healthz", status_code=200)(_healthz)
uvicorn.run(
app=fastapi_service,
host="127.0.0.1",
port=port,
log_level="error",
) | Start the plugin server which can be used to dispatch apps or run plugins. |
155,554 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
TEST_SESSION_UUID = "1234"
global_app_state_store = InMemoryStateStore()
global_app_state_store.add(TEST_SESSION_UUID)
lock = Lock()
ENABLE_PULLING_STATE_ENDPOINT = bool(int(os.getenv("ENABLE_PULLING_STATE_ENDPOINT", "1")))
async def get_state(
response: Response,
x_lightning_type: Optional[str] = Header(None),
x_lightning_session_uuid: Optional[str] = Header(None),
x_lightning_session_id: Optional[str] = Header(None),
) -> Mapping:
if x_lightning_session_uuid is None:
raise Exception("Missing X-Lightning-Session-UUID header")
if x_lightning_session_id is None:
raise Exception("Missing X-Lightning-Session-ID header")
if not ENABLE_PULLING_STATE_ENDPOINT:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {"status": "failure", "reason": "This endpoint is disabled."}
with lock:
x_lightning_session_uuid = TEST_SESSION_UUID
state = global_app_state_store.get_app_state(x_lightning_session_uuid)
global_app_state_store.set_served_state(x_lightning_session_uuid, state)
return state | null |
155,555 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
TEST_SESSION_UUID = "1234"
global_app_state_store = InMemoryStateStore()
global_app_state_store.add(TEST_SESSION_UUID)
lock = Lock()
def _get_component_by_name(component_name: str, state: dict) -> Union[LightningFlow, LightningWork]:
child = state
for child_name in component_name.split(".")[1:]:
try:
child = child["flows"][child_name]
except KeyError:
child = child["structures"][child_name]
if isinstance(child["vars"]["_layout"], list):
assert len(child["vars"]["_layout"]) == 1
return child["vars"]["_layout"][0]["target"]
return child["vars"]["_layout"]["target"]
async def get_layout() -> str:
with lock:
x_lightning_session_uuid = TEST_SESSION_UUID
state = global_app_state_store.get_app_state(x_lightning_session_uuid)
global_app_state_store.set_served_state(x_lightning_session_uuid, state)
layout = deepcopy(state["vars"]["_layout"])
for la in layout:
if la["content"].startswith("root."):
la["content"] = _get_component_by_name(la["content"], state)
return json.dumps(layout) | null |
155,556 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
app_spec: Optional[List] = None
ENABLE_PULLING_STATE_ENDPOINT = bool(int(os.getenv("ENABLE_PULLING_STATE_ENDPOINT", "1")))
async def get_spec(
response: Response,
x_lightning_session_uuid: Optional[str] = Header(None),
x_lightning_session_id: Optional[str] = Header(None),
) -> Union[List, Dict]:
if x_lightning_session_uuid is None:
raise Exception("Missing X-Lightning-Session-UUID header")
if x_lightning_session_id is None:
raise Exception("Missing X-Lightning-Session-ID header")
if not ENABLE_PULLING_STATE_ENDPOINT:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {"status": "failure", "reason": "This endpoint is disabled."}
global app_spec
return app_spec or [] | null |
155,557 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
api_app_delta_queue: Optional[Queue] = None
class _DeltaRequest(_BaseRequest):
delta: Delta
def to_dict(self):
return self.delta.to_dict()
ENABLE_PUSHING_STATE_ENDPOINT = ENABLE_PULLING_STATE_ENDPOINT and bool(
int(os.getenv("ENABLE_PUSHING_STATE_ENDPOINT", "1"))
)
The provided code snippet includes necessary dependencies for implementing the `post_delta` function. Write a Python function `async def post_delta( request: Request, response: Response, x_lightning_type: Optional[str] = Header(None), x_lightning_session_uuid: Optional[str] = Header(None), x_lightning_session_id: Optional[str] = Header(None), ) -> Optional[Dict]` to solve the following problem:
This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update the state.
Here is the function:
async def post_delta(
request: Request,
response: Response,
x_lightning_type: Optional[str] = Header(None),
x_lightning_session_uuid: Optional[str] = Header(None),
x_lightning_session_id: Optional[str] = Header(None),
) -> Optional[Dict]:
"""This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update
the state."""
if x_lightning_session_uuid is None:
raise Exception("Missing X-Lightning-Session-UUID header")
if x_lightning_session_id is None:
raise Exception("Missing X-Lightning-Session-ID header")
if not ENABLE_PUSHING_STATE_ENDPOINT:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {"status": "failure", "reason": "This endpoint is disabled."}
body: Dict = await request.json()
assert api_app_delta_queue is not None
api_app_delta_queue.put(_DeltaRequest(delta=Delta(body["delta"])))
return None | This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update the state. |
155,558 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
TEST_SESSION_UUID = "1234"
api_app_delta_queue: Optional[Queue] = None
global_app_state_store = InMemoryStateStore()
global_app_state_store.add(TEST_SESSION_UUID)
class _DeltaRequest(_BaseRequest):
def to_dict(self):
ENABLE_PUSHING_STATE_ENDPOINT = ENABLE_PULLING_STATE_ENDPOINT and bool(
int(os.getenv("ENABLE_PUSHING_STATE_ENDPOINT", "1"))
)
async def post_state(
request: Request,
response: Response,
x_lightning_type: Optional[str] = Header(None),
x_lightning_session_uuid: Optional[str] = Header(None),
x_lightning_session_id: Optional[str] = Header(None),
) -> Optional[Dict]:
if x_lightning_session_uuid is None:
raise Exception("Missing X-Lightning-Session-UUID header")
if x_lightning_session_id is None:
raise Exception("Missing X-Lightning-Session-ID header")
# This needs to be sent so that it can be set as last state
# in app (see sequencing above)
# Actually: we need to make sure last_state is actually
# the latest state seen by the UI, that is, the last state
# ui to the UI from the API, not the last state
# obtained by the app.
body: Dict = await request.json()
x_lightning_session_uuid = TEST_SESSION_UUID
if not ENABLE_PUSHING_STATE_ENDPOINT:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {"status": "failure", "reason": "This endpoint is disabled."}
if "stage" in body:
last_state = global_app_state_store.get_served_state(x_lightning_session_uuid)
state = deepcopy(last_state)
state["app_state"]["stage"] = body["stage"]
deep_diff = DeepDiff(last_state, state, verbose_level=2)
else:
state = body["state"]
last_state = global_app_state_store.get_served_state(x_lightning_session_uuid)
deep_diff = DeepDiff(last_state, state, verbose_level=2)
assert api_app_delta_queue is not None
api_app_delta_queue.put(_DeltaRequest(delta=Delta(deep_diff)))
return None | null |
155,559 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
ENABLE_UPLOAD_ENDPOINT = bool(int(os.getenv("ENABLE_UPLOAD_ENDPOINT", "1")))
def _context(ctx: str) -> Generator[None, None, None]:
class ComponentContext(enum.Enum):
async def upload_file(response: Response, filename: str, uploaded_file: UploadFile = File(...)) -> Union[str, dict]:
if not ENABLE_UPLOAD_ENDPOINT:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {"status": "failure", "reason": "This endpoint is disabled."}
with TemporaryDirectory() as tmp:
drive = Drive(
"lit://uploaded_files",
component_name="file_server",
allow_duplicates=True,
root_folder=tmp,
)
tmp_file = os.path.join(tmp, filename)
with open(tmp_file, "wb") as f:
done = False
while not done:
# Note: The 8192 number doesn't have a strong reason.
content = await uploaded_file.read(8192)
f.write(content)
done = content == b""
with _context(str(ComponentContext.WORK)):
drive.put(filename)
return f"Successfully uploaded '{filename}' to the Drive" | null |
155,560 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
app_status: Optional[AppStatus] = None
class AppStatus(BaseModel):
"""The ``AppStatus`` captures the current status of the app and its components."""
# ``True`` when the app UI is ready to be viewed
is_ui_ready: bool
# The statuses of ``LightningWork`` objects currently associated with this app
work_statuses: Dict[str, WorkStatus]
The provided code snippet includes necessary dependencies for implementing the `get_status` function. Write a Python function `async def get_status() -> AppStatus` to solve the following problem:
Get the current status of the app and works.
Here is the function:
async def get_status() -> AppStatus:
"""Get the current status of the app and works."""
global app_status
if app_status is None:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail="App status hasn't been reported yet."
)
return app_status | Get the current status of the app and works. |
155,561 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
app_annotations: Optional[List] = None
The provided code snippet includes necessary dependencies for implementing the `get_annotations` function. Write a Python function `async def get_annotations() -> Union[List, Dict]` to solve the following problem:
Get the annotations associated with this app.
Here is the function:
async def get_annotations() -> Union[List, Dict]:
"""Get the annotations associated with this app."""
global app_annotations
return app_annotations or [] | Get the annotations associated with this app. |
155,562 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
TEST_SESSION_UUID = "1234"
global_app_state_store = InMemoryStateStore()
global_app_state_store.add(TEST_SESSION_UUID)
def get_cloud_queue_type() -> Optional[str]:
value = os.getenv("LIGHTNING_CLOUD_QUEUE_TYPE", None)
if value is None and enable_interruptible_works():
value = "http"
return value
class QueuingSystem(Enum):
MULTIPROCESS = "multiprocess"
REDIS = "redis"
HTTP = "http"
def get_queue(self, queue_name: str) -> "BaseQueue":
if self == QueuingSystem.MULTIPROCESS:
return MultiProcessQueue(queue_name, default_timeout=STATE_UPDATE_TIMEOUT)
if self == QueuingSystem.REDIS:
return RedisQueue(queue_name, default_timeout=REDIS_QUEUES_READ_DEFAULT_TIMEOUT)
return RateLimitedQueue(
HTTPQueue(queue_name, default_timeout=STATE_UPDATE_TIMEOUT), HTTP_QUEUE_REQUESTS_PER_SECOND
)
def get_api_response_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{API_RESPONSE_QUEUE_CONSTANT}" if queue_id else API_RESPONSE_QUEUE_CONSTANT
return self.get_queue(queue_name)
def get_readiness_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{READINESS_QUEUE_CONSTANT}" if queue_id else READINESS_QUEUE_CONSTANT
return self.get_queue(queue_name)
def get_delta_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{DELTA_QUEUE_CONSTANT}" if queue_id else DELTA_QUEUE_CONSTANT
return self.get_queue(queue_name)
def get_error_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{ERROR_QUEUE_CONSTANT}" if queue_id else ERROR_QUEUE_CONSTANT
return self.get_queue(queue_name)
def get_has_server_started_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{HAS_SERVER_STARTED_CONSTANT}" if queue_id else HAS_SERVER_STARTED_CONSTANT
return self.get_queue(queue_name)
def get_caller_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{CALLER_QUEUE_CONSTANT}_{work_name}" if queue_id else f"{CALLER_QUEUE_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_api_state_publish_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{API_STATE_PUBLISH_QUEUE_CONSTANT}" if queue_id else API_STATE_PUBLISH_QUEUE_CONSTANT
return self.get_queue(queue_name)
# TODO: This is hack, so we can remove this queue entirely when fully optimized.
def get_api_delta_queue(self, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = f"{queue_id}_{DELTA_QUEUE_CONSTANT}" if queue_id else DELTA_QUEUE_CONSTANT
return self.get_queue(queue_name)
def get_orchestrator_request_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{ORCHESTRATOR_REQUEST_CONSTANT}_{work_name}"
if queue_id
else f"{ORCHESTRATOR_REQUEST_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_orchestrator_response_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{ORCHESTRATOR_RESPONSE_CONSTANT}_{work_name}"
if queue_id
else f"{ORCHESTRATOR_RESPONSE_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_orchestrator_copy_request_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{ORCHESTRATOR_COPY_REQUEST_CONSTANT}_{work_name}"
if queue_id
else f"{ORCHESTRATOR_COPY_REQUEST_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_orchestrator_copy_response_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{ORCHESTRATOR_COPY_RESPONSE_CONSTANT}_{work_name}"
if queue_id
else f"{ORCHESTRATOR_COPY_RESPONSE_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_work_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{WORK_QUEUE_CONSTANT}_{work_name}" if queue_id else f"{WORK_QUEUE_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def get_flow_to_work_delta_queue(self, work_name: str, queue_id: Optional[str] = None) -> "BaseQueue":
queue_name = (
f"{queue_id}_{FLOW_TO_WORKS_DELTA_QUEUE_CONSTANT}_{work_name}"
if queue_id
else f"{FLOW_TO_WORKS_DELTA_QUEUE_CONSTANT}_{work_name}"
)
return self.get_queue(queue_name)
def is_running_in_cloud() -> bool:
"""Returns True if the Lightning App is running in the cloud."""
return bool(int(os.environ.get("LAI_RUNNING_IN_CLOUD", "0"))) or "LIGHTNING_APP_STATE_URL" in os.environ
The provided code snippet includes necessary dependencies for implementing the `healthz` function. Write a Python function `async def healthz(response: Response) -> dict` to solve the following problem:
Health check endpoint used in the cloud FastAPI servers to check the status periodically.
Here is the function:
async def healthz(response: Response) -> dict:
"""Health check endpoint used in the cloud FastAPI servers to check the status periodically."""
# check the queue status only if running in cloud
if is_running_in_cloud():
queue_obj = QueuingSystem(get_cloud_queue_type()).get_queue(queue_name="healthz")
# this is only being implemented on Redis Queue. For HTTP Queue, it doesn't make sense to have every single
# app checking the status of the Queue server
if not queue_obj.is_running:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return {"status": "failure", "reason": "Redis is not available"}
x_lightning_session_uuid = TEST_SESSION_UUID
state = global_app_state_store.get_app_state(x_lightning_session_uuid)
global_app_state_store.set_served_state(x_lightning_session_uuid, state)
if not state:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return {"status": "failure", "reason": f"State is empty {state}"}
return {"status": "ok"} | Health check endpoint used in the cloud FastAPI servers to check the status periodically. |
155,563 | import asyncio
import contextlib
import json
import os
import queue
import socket
import sys
import traceback
from copy import deepcopy
from multiprocessing import Queue
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Mapping, Optional, Union
import uvicorn
from deepdiff import DeepDiff, Delta
from fastapi import FastAPI, File, HTTPException, Request, Response, UploadFile, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Header
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from websockets.exceptions import ConnectionClosed
from lightning.app.api.http_methods import _HttpMethod
from lightning.app.api.request_types import _DeltaRequest
from lightning.app.core.constants import (
ENABLE_PULLING_STATE_ENDPOINT,
ENABLE_PUSHING_STATE_ENDPOINT,
ENABLE_STATE_WEBSOCKET,
ENABLE_UPLOAD_ENDPOINT,
FRONTEND_DIR,
get_cloud_queue_type,
)
from lightning.app.core.flow import LightningFlow
from lightning.app.core.queues import QueuingSystem
from lightning.app.core.work import LightningWork
from lightning.app.storage import Drive
from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore
from lightning.app.utilities.app_status import AppStatus
from lightning.app.utilities.cloud import is_running_in_cloud
from lightning.app.utilities.component import _context
from lightning.app.utilities.enum import ComponentContext, OpenAPITags
global_app_state_store = InMemoryStateStore()
global_app_state_store.add(TEST_SESSION_UUID)
logger = Logger(__name__)
ENABLE_STATE_WEBSOCKET = bool(int(os.getenv("ENABLE_STATE_WEBSOCKET", "1")))
async def websocket_endpoint(websocket: WebSocket) -> None:
await websocket.accept()
if not ENABLE_STATE_WEBSOCKET:
await websocket.close()
return
try:
counter = global_app_state_store.counter
while True:
if global_app_state_store.counter != counter:
await websocket.send_text(f"{global_app_state_store.counter}")
counter = global_app_state_store.counter
logger.debug("Updated websocket.")
await asyncio.sleep(0.01)
except ConnectionClosed:
logger.debug("Websocket connection closed")
await websocket.close() | null |
155,564 | import base64
import multiprocessing
import pickle
import queue
import time
import warnings
from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path
from typing import Any, List, Optional, Tuple
from urllib.parse import urljoin
import backoff
import requests
from requests.exceptions import ConnectionError, ConnectTimeout, ReadTimeout
from lightning.app.core.constants import (
BATCH_DELTA_COUNT,
HTTP_QUEUE_REFRESH_INTERVAL,
HTTP_QUEUE_REQUESTS_PER_SECOND,
HTTP_QUEUE_TOKEN,
HTTP_QUEUE_URL,
LIGHTNING_DIR,
QUEUE_DEBUG_ENABLED,
REDIS_HOST,
REDIS_PASSWORD,
REDIS_PORT,
REDIS_QUEUES_READ_DEFAULT_TIMEOUT,
STATE_UPDATE_TIMEOUT,
WARNING_QUEUE_SIZE,
)
from lightning.app.utilities.app_helpers import Logger
from lightning.app.utilities.imports import _is_redis_available, requires
from lightning.app.utilities.network import HTTPClient
logger = Logger(__name__)
QUEUE_DEBUG_ENABLED = bool(int(os.getenv("LIGHTNING_QUEUE_DEBUG_ENABLED", "0")))
LIGHTNING_DIR = os.getenv("LIGHTNING_DIR", str(Path.home() / ".lightning"))
def debug_log_callback(message: str, *args: Any, **kwargs: Any) -> None:
if QUEUE_DEBUG_ENABLED or (Path(LIGHTNING_DIR) / "QUEUE_DEBUG_ENABLED").exists():
logger.info(message, *args, **kwargs) | null |
155,565 | import os
from pathlib import Path
from typing import Optional
import lightning_cloud.env
def enable_multiple_works_in_default_container() -> bool:
return bool(int(os.getenv("ENABLE_MULTIPLE_WORKS_IN_DEFAULT_CONTAINER", "0"))) | null |
155,566 | import os
from pathlib import Path
from typing import Optional
import lightning_cloud.env
def get_cluster_driver() -> Optional[str]:
return "direct" | null |
155,567 | import math
import os
import subprocess
import tarfile
from dataclasses import dataclass
from typing import Optional, Tuple
import click
MAX_SPLIT_COUNT = 999
The provided code snippet includes necessary dependencies for implementing the `_get_split_size` function. Write a Python function `def _get_split_size( total_size: int, minimum_split_size: int = 1024 * 1000 * 20, max_split_count: int = MAX_SPLIT_COUNT ) -> int` to solve the following problem:
Calculate the split size we should use to split the multipart upload of an object to a bucket. We are limited to 1000 max parts as the way we are using ListMultipartUploads. More info https://github.com/gridai/grid/pull/5267 https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpu-process https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html https://github.com/psf/requests/issues/2717#issuecomment-724725392 Python or requests has a limit of 2**31 bytes for a single file upload. Parameters ---------- minimum_split_size: int The minimum split size to use max_split_count: int The maximum split count total_size: int Total size of the file to split Returns ------- int Split size
Here is the function:
def _get_split_size(
total_size: int, minimum_split_size: int = 1024 * 1000 * 20, max_split_count: int = MAX_SPLIT_COUNT
) -> int:
"""Calculate the split size we should use to split the multipart upload of an object to a bucket. We are limited
to 1000 max parts as the way we are using ListMultipartUploads. More info https://github.com/gridai/grid/pull/5267
https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpu-process
https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
https://github.com/psf/requests/issues/2717#issuecomment-724725392 Python or requests has a limit of 2**31 bytes
for a single file upload.
Parameters
----------
minimum_split_size: int
The minimum split size to use
max_split_count: int
The maximum split count
total_size: int
Total size of the file to split
Returns
-------
int
Split size
"""
max_size = max_split_count * (1 << 31) # max size per part limited by Requests or urllib as shown in ref above
if total_size > max_size:
raise click.ClickException(
f"The size of the datastore to be uploaded is bigger than our {max_size / (1 << 40):.2f} TBytes limit"
)
split_size = minimum_split_size
split_count = math.ceil(total_size / split_size)
if split_count > max_split_count:
# Adjust the split size based on max split count
split_size = math.ceil(total_size / max_split_count)
return split_size | Calculate the split size we should use to split the multipart upload of an object to a bucket. We are limited to 1000 max parts as the way we are using ListMultipartUploads. More info https://github.com/gridai/grid/pull/5267 https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpu-process https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html https://github.com/psf/requests/issues/2717#issuecomment-724725392 Python or requests has a limit of 2**31 bytes for a single file upload. Parameters ---------- minimum_split_size: int The minimum split size to use max_split_count: int The maximum split count total_size: int Total size of the file to split Returns ------- int Split size |
155,568 | import math
import os
import subprocess
import tarfile
from dataclasses import dataclass
from typing import Optional, Tuple
import click
def _get_dir_size_and_count(source_dir: str, prefix: Optional[str] = None) -> Tuple[int, int]:
"""Get size and file count of a directory.
Parameters
----------
source_dir: str
Directory path
Returns
-------
Tuple[int, int]
Size in megabytes and file count
"""
size = 0
count = 0
for root, _, files in os.walk(source_dir, topdown=True):
for f in files:
if prefix and not f.startswith(prefix):
continue
full_path = os.path.join(root, f)
size += os.path.getsize(full_path)
count += 1
return (size, count)
class _TarResults:
"""This class holds the results of running tar_path.
Attributes
----------
before_size: int
The total size of the original directory files in bytes
after_size: int
The total size of the compressed and tarred split files in bytes
"""
before_size: int
after_size: int
def _tar_path_python(source_path: str, target_file: str, compression: bool = False) -> None:
"""Create tar from directory using `python`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
"""
file_mode = "w:gz" if compression else "w:"
with tarfile.open(target_file, file_mode) as tar:
if os.path.isdir(source_path):
tar.add(str(source_path), arcname=".")
elif os.path.isfile(source_path):
file_info = tarfile.TarInfo(os.path.basename(str(source_path)))
with open(source_path) as fo:
tar.addfile(file_info, fo)
def _tar_path_subprocess(source_path: str, target_file: str, compression: bool = False) -> None:
"""Create tar from directory using `tar`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
"""
# Only add compression when users explicitly request it.
# We do this because it takes too long to compress
# large datastores.
tar_flags = "-cvf"
if compression:
tar_flags = "-zcvf"
if os.path.isdir(source_path):
command = f"tar -C {source_path} {tar_flags} {target_file} ./"
else:
abs_path = os.path.abspath(source_path)
parent_dir = os.path.dirname(abs_path)
base_name = os.path.basename(abs_path)
command = f"tar -C {parent_dir} {tar_flags} {target_file} {base_name}"
subprocess.check_call(
command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True,
env={"GZIP": "-9", "COPYFILE_DISABLE": "1"},
)
The provided code snippet includes necessary dependencies for implementing the `_tar_path` function. Write a Python function `def _tar_path(source_path: str, target_file: str, compression: bool = False) -> _TarResults` to solve the following problem:
Create tar from directory using `tar` Parameters ---------- source_path: str Source directory or file target_file Target tar file compression: bool, default False Enable compression, which is disabled by default. Returns ------- TarResults Results that holds file counts and sizes
Here is the function:
def _tar_path(source_path: str, target_file: str, compression: bool = False) -> _TarResults:
"""Create tar from directory using `tar`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
Returns
-------
TarResults
Results that holds file counts and sizes
"""
if os.path.isdir(source_path):
before_size, _ = _get_dir_size_and_count(source_path)
else:
before_size = os.path.getsize(source_path)
try:
_tar_path_subprocess(source_path, target_file, compression)
except subprocess.CalledProcessError:
_tar_path_python(source_path, target_file, compression)
after_size = os.stat(target_file).st_size
return _TarResults(before_size=before_size, after_size=after_size) | Create tar from directory using `tar` Parameters ---------- source_path: str Source directory or file target_file Target tar file compression: bool, default False Enable compression, which is disabled by default. Returns ------- TarResults Results that holds file counts and sizes |
155,569 | import hashlib
from typing import List
The provided code snippet includes necessary dependencies for implementing the `_get_hash` function. Write a Python function `def _get_hash(files: List[str], algorithm: str = "blake2", chunk_num_blocks: int = 128) -> str` to solve the following problem:
Hashes the contents of a list of files. Parameters ---------- files: List[Path] List of files. algorithm: str, default "blake2" Algorithm to hash contents. "blake2" is set by default because it is faster than "md5". [1] chunk_num_blocks: int, default 128 Block size to user when iterating over file chunks. References ---------- [1] https://crypto.stackexchange.com/questions/70101/blake2-vs-md5-for-checksum-file-integrity [2] https://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
Here is the function:
def _get_hash(files: List[str], algorithm: str = "blake2", chunk_num_blocks: int = 128) -> str:
"""Hashes the contents of a list of files.
Parameters
----------
files: List[Path]
List of files.
algorithm: str, default "blake2"
Algorithm to hash contents. "blake2" is set by default because it
is faster than "md5". [1]
chunk_num_blocks: int, default 128
Block size to user when iterating over file chunks.
References
----------
[1] https://crypto.stackexchange.com/questions/70101/blake2-vs-md5-for-checksum-file-integrity
[2] https://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
"""
# validate input
if algorithm == "blake2":
h = hashlib.blake2b(digest_size=20)
elif algorithm == "md5":
h = hashlib.md5()
else:
raise ValueError(f"Algorithm {algorithm} not supported")
# calculate hash for all files
for file in files:
with open(file, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.hexdigest() | Hashes the contents of a list of files. Parameters ---------- files: List[Path] List of files. algorithm: str, default "blake2" Algorithm to hash contents. "blake2" is set by default because it is faster than "md5". [1] chunk_num_blocks: int, default 128 Block size to user when iterating over file chunks. References ---------- [1] https://crypto.stackexchange.com/questions/70101/blake2-vs-md5-for-checksum-file-integrity [2] https://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python |
155,570 | import fnmatch
import os
from functools import partial
from pathlib import Path
from shutil import Error, copy2, copystat
from typing import Callable, List, Optional, Set, Tuple, Union
from lightning.app.core.constants import DOT_IGNORE_FILENAME
from lightning.app.utilities.app_helpers import Logger
_IGNORE_FUNCTION = Callable[[Path, List[Path]], List[Path]]
def _filter_ignored(src: Path, patterns: Set[str], current_dir: Path, entries: List[Path]) -> List[Path]:
relative_dir = current_dir.relative_to(src)
names = [str(relative_dir / entry.name) for entry in entries]
ignored_names = set()
for pattern in patterns:
ignored_names.update(fnmatch.filter(names, pattern))
return [entry for entry in entries if str(relative_dir / entry.name) not in ignored_names]
def _read_lightningignore(path: Path) -> Set[str]:
"""Reads ignore file and filter and empty lines. This will also remove patterns that start with a `/`. That's done
to allow `glob` to simulate the behavior done by `git` where it interprets that as a root path.
Parameters
----------
path: Path
Path to .lightningignore file or equivalent.
Returns
-------
Set[str]
Set of unique lines.
"""
raw_lines = path.open().readlines()
return _parse_lightningignore(raw_lines)
def _ignore_filename_spell_check(src: Path):
possible_spelling_mistakes = [
".gridignore",
".lightingignore",
".lightinginore",
".lightninginore",
".lightninignore",
".lightinignore",
]
possible_spelling_mistakes.extend([p.lstrip(".") for p in possible_spelling_mistakes])
for path in src.iterdir():
if path.is_file() and path.name in possible_spelling_mistakes:
logger.warn(
f"Lightning uses `{DOT_IGNORE_FILENAME}` as the ignore file but found {path.name} at "
f"{path.parent} instead. If this was a mistake, please rename the file."
)
DOT_IGNORE_FILENAME = ".lightningignore"
The provided code snippet includes necessary dependencies for implementing the `_copytree` function. Write a Python function `def _copytree( src: Union[Path, str], dst: Union[Path, str], ignore_functions: Optional[List[_IGNORE_FUNCTION]] = None, dirs_exist_ok=False, dry_run=False, ) -> List[str]` to solve the following problem:
Vendor in from `shutil.copytree` to support ignoring files recursively based on `.lightningignore`, like `git` does with `.gitignore`. Also removed a few checks from the original copytree related to symlink checks. Differences between original and this function are. 1. It supports a list of ignore function instead of a single one in the original. We can use this for filtering out files based on nested .lightningignore files 2. It supports a dry run. When enabled, this function will not copy anything but just recursively find the source files which are not-ignored and return them. It is useful while calculating the hash or checking the size of files 3. This function returns a list of copied files unlike the original which was returning the destination directory Recursively copy a directory tree and return the destination directory. Parameters ---------- src: Source directory path to copy from dst: Destination directory path to copy to ignore_functions: List of functions that will be used to filter out files and directories. This isn't required to be passed when calling from outside but will be autopopulated by the recursive calls in this function itself (Original copytree doesn't have this argument) dirs_exist_ok: If true, the destination directory will be created if it doesn't exist. dry_run: If true, this function will not copy anything (this is not present in the original copytree) If exception(s) occur, an Error is raised with a list of reasons.
Here is the function:
def _copytree(
src: Union[Path, str],
dst: Union[Path, str],
ignore_functions: Optional[List[_IGNORE_FUNCTION]] = None,
dirs_exist_ok=False,
dry_run=False,
) -> List[str]:
"""Vendor in from `shutil.copytree` to support ignoring files recursively based on `.lightningignore`, like `git`
does with `.gitignore`. Also removed a few checks from the original copytree related to symlink checks. Differences
between original and this function are.
1. It supports a list of ignore function instead of a single one in the
original. We can use this for filtering out files based on nested
.lightningignore files
2. It supports a dry run. When enabled, this function will not copy anything but just recursively
find the source files which are not-ignored and return them. It is useful while calculating
the hash or checking the size of files
3. This function returns a list of copied files unlike the original which was returning the
destination directory
Recursively copy a directory tree and return the destination directory.
Parameters
----------
src:
Source directory path to copy from
dst:
Destination directory path to copy to
ignore_functions:
List of functions that will be used to filter out files
and directories. This isn't required to be passed when calling from outside but will be
autopopulated by the recursive calls in this function itself (Original copytree doesn't have this argument)
dirs_exist_ok:
If true, the destination directory will be created if it doesn't exist.
dry_run:
If true, this function will not copy anything (this is not present in the original copytree)
If exception(s) occur, an Error is raised with a list of reasons.
"""
files_copied = []
if ignore_functions is None:
ignore_functions = []
_ignore_filename_spell_check(src)
src = Path(src)
dst = Path(dst)
ignore_filepath = src / DOT_IGNORE_FILENAME
if ignore_filepath.is_file():
patterns = _read_lightningignore(ignore_filepath)
ignore_fn = partial(_filter_ignored, src, patterns)
# creating new list so we won't modify the original
ignore_functions = [*ignore_functions, ignore_fn]
if not dry_run:
os.makedirs(dst, exist_ok=dirs_exist_ok)
errors = []
entries = list(src.iterdir())
for fn in ignore_functions:
# ignore function return only the entries that are not ignored
entries = fn(src, entries)
for srcentry in entries:
dstpath = dst / srcentry.name
try:
if srcentry.is_dir():
_files = _copytree(
src=srcentry,
dst=dstpath,
ignore_functions=ignore_functions,
dirs_exist_ok=dirs_exist_ok,
dry_run=dry_run,
)
files_copied.extend(_files)
else:
files_copied.append(str(srcentry))
if not dry_run:
# Will raise a SpecialFileError for unsupported file types
copy2(srcentry, dstpath)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcentry, dstpath, str(why)))
try:
if not dry_run:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, "winerror", None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return files_copied | Vendor in from `shutil.copytree` to support ignoring files recursively based on `.lightningignore`, like `git` does with `.gitignore`. Also removed a few checks from the original copytree related to symlink checks. Differences between original and this function are. 1. It supports a list of ignore function instead of a single one in the original. We can use this for filtering out files based on nested .lightningignore files 2. It supports a dry run. When enabled, this function will not copy anything but just recursively find the source files which are not-ignored and return them. It is useful while calculating the hash or checking the size of files 3. This function returns a list of copied files unlike the original which was returning the destination directory Recursively copy a directory tree and return the destination directory. Parameters ---------- src: Source directory path to copy from dst: Destination directory path to copy to ignore_functions: List of functions that will be used to filter out files and directories. This isn't required to be passed when calling from outside but will be autopopulated by the recursive calls in this function itself (Original copytree doesn't have this argument) dirs_exist_ok: If true, the destination directory will be created if it doesn't exist. dry_run: If true, this function will not copy anything (this is not present in the original copytree) If exception(s) occur, an Error is raised with a list of reasons. |
155,571 | import contextlib
import pickle
import sys
import types
import typing
from copy import deepcopy
from pathlib import Path
from lightning.app.core.work import LightningWork
from lightning.app.utilities.app_helpers import _LightningAppRef
def get_picklable_work(work: LightningWork) -> LightningWork:
"""Pickling a LightningWork instance fails if done from the work process
itself. This function is safe to call from the work process within both MultiprocessRuntime
and Cloud.
Note: This function modifies the module information of the work object. Specifically, it injects
the relative module path into the __module__ attribute of the work object. If the object is not
importable from the CWD, then the pickle load will fail.
Example:
for a directory structure like below and the work class is defined in the app.py where
the app.py is the entrypoint for the app, it will inject `foo.bar.app` into the
__module__ attribute
└── foo
├── __init__.py
└── bar
└── app.py
"""
# If the work object not taken from the app ref, there is a thread lock reference
# somewhere thats preventing it from being pickled. Investigate it later. We
# shouldn't be fetching the work object from the app ref. TODO @sherin
app_ref = _LightningAppRef.get_current()
if app_ref is None:
raise RuntimeError("Cannot pickle LightningWork outside of a LightningApp")
for w in app_ref.works:
if work.name == w.name:
# deep-copying the work object to avoid modifying the original work object
with _trimmed_work(w, to_trim=NON_PICKLABLE_WORK_ATTRIBUTES):
copied_work = deepcopy(w)
break
else:
raise ValueError(f"Work with name {work.name} not found in the app references")
# if work is defined in the __main__ or __mp__main__ (the entrypoint file for `lightning run app` command),
# pickling/unpickling will fail, hence we need patch the module information
if "_main__" in copied_work.__class__.__module__:
work_class_module = sys.modules[copied_work.__class__.__module__]
work_class_file = work_class_module.__file__
if not work_class_file:
raise ValueError(
f"Cannot pickle work class {copied_work.__class__.__name__} because we "
f"couldn't identify the module file"
)
relative_path = Path(work_class_module.__file__).relative_to(Path.cwd()) # type: ignore
expected_module_name = relative_path.as_posix().replace(".py", "").replace("/", ".")
# TODO @sherin: also check if the module is importable from the CWD
fake_module = types.ModuleType(expected_module_name)
fake_module.__dict__.update(work_class_module.__dict__)
fake_module.__dict__["__name__"] = expected_module_name
sys.modules[expected_module_name] = fake_module
for k, v in fake_module.__dict__.items():
if not k.startswith("__") and hasattr(v, "__module__") and "_main__" in v.__module__:
v.__module__ = expected_module_name
return copied_work
class LightningWork:
_INTERNAL_STATE_VARS = (
# Internal protected variables that are still part of the state (even though they are prefixed with "_")
"_paths",
"_host",
"_port",
"_url",
"_restarting",
"_internal_ip",
"_public_ip",
)
_run_executor_cls: Type[WorkRunExecutor] = WorkRunExecutor
# TODO: Move to spawn for all Operating System.
_start_method = "spawn" if sys.platform in ("darwin", "win32") else "fork"
def __init__(
self,
parallel: bool = False,
cache_calls: bool = True,
raise_exception: bool = True,
host: str = "127.0.0.1",
port: Optional[int] = None,
local_build_config: Optional[BuildConfig] = None,
cloud_build_config: Optional[BuildConfig] = None,
cloud_compute: Optional[CloudCompute] = None,
run_once: Optional[bool] = None, # TODO: Remove run_once
start_with_flow: bool = True,
):
"""LightningWork, or Work in short, is a building block for long-running jobs.
The LightningApp runs its :class:`~lightning.app.core.flow.LightningFlow` component
within an infinite loop and track the ``LightningWork`` status update.
Use LightningWork for third-party services or for launching heavy jobs such as
downloading data, training or serving a model.
Each LightningWork is running in its own independent process. Works are self-isolated from the rest,
e.g any state changes happening within the work will be reflected within the flow but not the other way around.
Arguments:
parallel: Whether to run in parallel mode or not. When False, the flow waits for the work to finish.
cache_calls: Whether the ``run`` method should cache its input arguments and not run again when provided
with the same arguments in subsequent calls.
raise_exception: Whether to re-raise an exception in the flow when raised from within the work run method.
host: Bind socket to this host
port: Bind socket to this port. Be default, this is None and should be called within your run method.
local_build_config: The local BuildConfig isn't used until Lightning supports DockerRuntime.
cloud_build_config: The cloud BuildConfig enables user to easily configure machine before running this work.
run_once: Deprecated in favor of cache_calls. This will be removed soon.
start_with_flow: Whether the work should be started at the same time as the root flow. Only applies to works
defined in ``__init__``.
**Learn More About Lightning Work Inner Workings**
.. raw:: html
<div class="display-card-container">
<div class="row">
.. displayitem::
:header: The Lightning Work inner workings.
:description: Learn more Lightning Work.
:col_css: col-md-4
:button_link: ../../core_api/lightning_work/index.html
:height: 180
:tag: Basic
.. raw:: html
</div>
</div>
<br />
"""
from lightning.app.runners.backends.backend import Backend
if run_once is not None:
warnings.warn(
"The `run_once` argument to LightningWork is deprecated in favor of `cache_calls` and will be removed"
" in the next version. Use `cache_calls` instead."
)
self._cache_calls = run_once if run_once is not None else cache_calls
self._state = {
"_host",
"_port",
"_url",
"_future_url",
"_internal_ip",
"_public_ip",
"_restarting",
"_cloud_compute",
"_display_name",
}
self._parallel: bool = parallel
self._host: str = host
self._port: Optional[int] = port
self._url: str = ""
self._future_url: str = "" # The cache URL is meant to defer resolving the url values.
self._internal_ip: str = ""
self._public_ip: str = ""
# setattr_replacement is used by the multiprocessing runtime to send the latest changes to the main coordinator
self._setattr_replacement: Optional[Callable[[str, Any], None]] = None
self._name: str = ""
self._display_name: str = ""
# The ``self._calls`` is used to track whether the run
# method with a given set of input arguments has already been called.
# Example of its usage:
# {
# 'latest_call_hash': '167fe2e',
# '167fe2e': {
# 'statuses': [
# {'stage': 'pending', 'timestamp': 1659433519.851271},
# {'stage': 'running', 'timestamp': 1659433519.956482},
# {'stage': 'stopped', 'timestamp': 1659433520.055768}]}
# ]
# },
# ...
# }
self._calls: dict = {CacheCallsKeys.LATEST_CALL_HASH: None}
self._changes: dict = {}
self._raise_exception = raise_exception
self._paths: dict = {}
self._request_queue: Optional[BaseQueue] = None
self._response_queue: Optional[BaseQueue] = None
self._restarting: bool = False
self._start_with_flow = start_with_flow
self._local_build_config = local_build_config or BuildConfig()
self._cloud_build_config = cloud_build_config or BuildConfig()
self._cloud_compute = cloud_compute or CloudCompute()
# tuple instead of a list so that it cannot be modified without using the setter
self._lightningignore: Tuple[str, ...] = ()
self._backend: Optional[Backend] = None
self._check_run_is_implemented()
self._on_init_end()
def url(self) -> str:
"""Returns the current url of the work."""
return self._url
def url(self, url: str) -> None:
self._url = url
def host(self) -> str:
"""Returns the current host of the work."""
return self._host
def port(self) -> int:
if self._port is None:
self._port = find_free_network_port()
return self._port
def internal_ip(self) -> str:
"""The internal ip address of this LightningWork, reachable by other Work locally and in the cloud.
By default, this attribute returns the empty string and the ip address will only be returned once the work runs.
Locally, the address is 127.0.0.1 and in the cloud it will be determined by the cluster.
"""
return self._internal_ip
def public_ip(self) -> str:
"""The public ip address of this LightningWork, reachable from the internet.
By default, this attribute returns the empty string and the ip address will only be returned once the work runs.
Locally, this address is undefined (empty string) and in the cloud it will be determined by the cluster.
"""
return self._public_ip
def _on_init_end(self) -> None:
self._local_build_config.on_work_init(self)
self._cloud_build_config.on_work_init(self, self._cloud_compute)
def _is_state_attribute(name: str) -> bool:
"""Every public attribute is part of the state by default and all protected (prefixed by '_') or private
(prefixed by '__') attributes are not.
Exceptions are listed in the `_INTERNAL_STATE_VARS` class variable.
"""
return name in LightningWork._INTERNAL_STATE_VARS or not name.startswith("_")
def name(self) -> str:
"""Returns the name of the LightningWork."""
return self._name
def display_name(self) -> str:
"""Returns the display name of the LightningWork in the cloud.
The display name needs to set before the run method of the work is called.
"""
return self._display_name
def display_name(self, display_name: str) -> None:
"""Sets the display name of the LightningWork in the cloud."""
if not self.has_started:
self._display_name = display_name
elif self._display_name != display_name:
raise RuntimeError("The display name can be set only before the work has started.")
def cache_calls(self) -> bool:
"""Returns whether the ``run`` method should cache its input arguments and not run again when provided with the
same arguments in subsequent calls."""
return self._cache_calls
def parallel(self) -> bool:
"""Whether to run in parallel mode or not.
When parallel is False, the flow waits for the work to finish.
"""
return self._parallel
def local_build_config(self) -> BuildConfig:
return self._local_build_config
def local_build_config(self, build_config: BuildConfig) -> None:
self._local_build_config = build_config
self._local_build_config.on_work_init(self)
def cloud_build_config(self) -> BuildConfig:
"""Returns the cloud build config used to prepare the selected cloud hardware."""
return self._cloud_build_config
def cloud_build_config(self, build_config: BuildConfig) -> None:
self._cloud_build_config = build_config
self._cloud_build_config.on_work_init(self, cloud_compute=self._cloud_compute)
def cloud_compute(self) -> CloudCompute:
return self._cloud_compute
def cloud_compute(self, cloud_compute: CloudCompute) -> None:
"""Returns the cloud compute used to select the cloud hardware."""
# A new ID
current_id = self._cloud_compute.id
new_id = cloud_compute.id
if current_id != new_id:
compute_store: _CloudComputeStore = _CLOUD_COMPUTE_STORE[current_id]
compute_store.remove(self.name)
self._cloud_compute = cloud_compute
def lightningignore(self) -> Tuple[str, ...]:
"""Programmatic equivalent of the ``.lightningignore`` file."""
return self._lightningignore
def lightningignore(self, lightningignore: Tuple[str, ...]) -> None:
if self._backend is not None:
raise RuntimeError(
f"Your app has been already dispatched, so modifying the `{self.name}.lightningignore` does not have an"
" effect"
)
self._lightningignore = lightningignore
def status(self) -> WorkStatus:
"""Return the current status of the work.
All statuses are stored in the state.
"""
call_hash = self._calls[CacheCallsKeys.LATEST_CALL_HASH]
if call_hash in self._calls:
statuses = self._calls[call_hash]["statuses"]
# deltas aren't necessarily coming in the expected order.
statuses = sorted(statuses, key=lambda x: x["timestamp"])
latest_status = statuses[-1]
if latest_status.get("reason") == WorkFailureReasons.TIMEOUT:
return self._aggregate_status_timeout(statuses)
return WorkStatus(**latest_status)
return WorkStatus(stage=WorkStageStatus.NOT_STARTED, timestamp=time.time())
def statuses(self) -> List[WorkStatus]:
"""Return all the status of the work."""
call_hash = self._calls[CacheCallsKeys.LATEST_CALL_HASH]
if call_hash in self._calls:
statuses = self._calls[call_hash]["statuses"]
# deltas aren't necessarily coming in the expected order.
statuses = sorted(statuses, key=lambda x: x["timestamp"])
return [WorkStatus(**status) for status in statuses]
return []
def has_started(self) -> bool:
"""Return whether the work has started."""
return self.status.stage != WorkStageStatus.NOT_STARTED
def has_stopped(self) -> bool:
"""Return whether the work has stopped."""
return self.status.stage == WorkStageStatus.STOPPED
def has_succeeded(self) -> bool:
"""Return whether the work has succeeded."""
return self.status.stage == WorkStageStatus.SUCCEEDED
def has_failed(self) -> bool:
"""Return whether the work has failed."""
return self.status.stage == WorkStageStatus.FAILED
def has_timeout(self) -> bool:
"""Return whether the work has time-out."""
return self.has_failed and self.status.reason == WorkFailureReasons.TIMEOUT
def is_running(self) -> bool:
"""Return whether the work is running."""
return self.status.stage == WorkStageStatus.RUNNING
def is_pending(self) -> bool:
"""Return whether the work is pending."""
return self.status.stage == WorkStageStatus.PENDING
def num_timeouts(self) -> int:
"""Return the number of timeout status since the lastest succeeded run."""
status = self.status
if status.reason == WorkFailureReasons.TIMEOUT:
return status.count
return 0
def num_successes(self) -> int:
"""Returns the number of successful runs."""
# FIXME: Resolve this within single process runtime.
run_keys = [key for key in self._calls if key.startswith("run:")]
if not run_keys:
return 0
has_succeeded_counter = 0
for run_key in run_keys:
c = len([s for s in self._calls[run_key]["statuses"] if s["stage"] == WorkStageStatus.SUCCEEDED])
has_succeeded_counter += c
return has_succeeded_counter
def _get_property_if_exists(self, name: str) -> Union[property, None]:
attr = getattr(self.__class__, name, None)
return attr if isinstance(attr, property) else None
def __setattr__(self, name: str, value: Any) -> None:
property_object = self._get_property_if_exists(name)
if property_object is not None and property_object.fset is not None:
property_object.fset(self, value)
else:
setattr_fn = getattr(self, "_setattr_replacement", None) or self._default_setattr
setattr_fn(name, value)
def _default_setattr(self, name: str, value: Any) -> None:
from lightning.app.core.flow import LightningFlow
# Allow the run method to be patched with ProxyWorkRun (done by certain Runtime implementations).
allowed_to_set_run = name == "run" and (
isinstance(value, ProxyWorkRun)
or (unwrap(value) == unwrap(self.run))
or (isinstance(value, partial) and value.func.__name__ == "_dynamic_run_wrapper")
)
is_proxy_setattr = isinstance(value, LightningWorkSetAttrProxy)
is_init_context = _is_init_context(self)
if (
not is_init_context
and name not in self._state
and name not in self._paths
and self._is_state_attribute(name)
and not allowed_to_set_run
):
raise AttributeError(f"Cannot set attributes that were not defined in __init__: {name}.")
if isinstance(value, str) and value.startswith("lit://"):
value = Path(value)
if self._is_state_attribute(name):
if isinstance(value, (LightningFlow, LightningWork)):
raise LightningWorkException(
"A ``LightningWork`` isn't allowed to take any children "
f"such as ``LightningWork`` or ``LightningFlow``. Found {value}."
)
if isinstance(value, Path):
value._attach_work(work=self)
value._attach_queues(self._request_queue, self._response_queue) # type: ignore[arg-type]
value._name = name
# In the init context, the full name of the Flow and Work is not known, i.e., we can't serialize
# the path without losing the information of origin and consumer. Hence, we delay the serialization
# of the path object until the app is instantiated.
if not is_init_context:
self._paths[name] = value.to_dict()
self._state.add(name)
elif isinstance(value, Payload):
if is_init_context:
raise AttributeError("The Payload object should be set only within the run method of the work.")
value._attach_work(work=self)
value._name = name
self._state.add(name)
elif isinstance(value, Drive):
value = deepcopy(value)
value.component_name = self.name
self._state.add(name)
elif allowed_to_set_run or is_proxy_setattr:
# enable overriding the run method (dispatcher)
pass
elif _is_json_serializable(value):
self._state.add(name)
else:
raise AttributeError(
f"Only JSON-serializable attributes are currently supported"
f" (str, int, float, bool, tuple, list, dict etc.) to be part of {self} state. "
f"Found the attribute {name} with {value} instead. \n"
"HINT: Private attributes defined as follows `self._x = y` won't be shared between components "
"and therefore don't need to be JSON-serializable. If you need to include non-JSON serializable "
"objects in the state, you can use the `lightning.app.storage.Payload` API."
)
super().__setattr__(name, value)
def __getattribute__(self, name: str) -> Any:
try:
attr = object.__getattribute__(self, name)
except AttributeError as ex:
if str(ex).endswith("'_state'"):
raise AttributeError(f"Did you forget to call super().__init__() in {self}")
raise ex
if isinstance(attr, ProxyWorkRun):
return attr
if callable(attr) and getattr(attr, "__name__", "") == "run" and getattr(self, "_cache_calls", False):
# disable while building the class.
return self._wrap_run_for_caching(attr)
return attr
def __getattr__(self, item: str) -> Any:
if item in self.__dict__.get("_paths", {}) and not _is_init_context(self):
path = Path.from_dict(self._paths[item])
path._attach_work(work=self)
path._attach_queues(self._request_queue, self._response_queue) # type: ignore[arg-type]
return path
return self.__getattribute__(item)
def _call_hash(self, fn: Callable, args: Any, kwargs: Any) -> str:
hash_args = args[1:] if len(args) > 0 and args[0] == self else args
call_obj = {"args": hash_args, "kwargs": kwargs}
# Note: Generate a hash as 167fe2e.
# Seven was selected after checking upon Github default SHA length
# and to minimize hidden state size.
return str(DeepHash(call_obj)[call_obj])[:7]
def _wrap_run_for_caching(self, fn: Callable) -> Callable:
def new_fn(*args: Any, **kwargs: Any) -> Any:
call_hash = self._call_hash(fn, args, kwargs)
entered = call_hash in self._calls
returned = entered and "ret" in self._calls[call_hash]
if returned:
entry = self._calls[call_hash]
return entry["ret"]
self._calls[call_hash] = {}
result = fn(*args, **kwargs)
self._calls[call_hash] = {"ret": result}
return result
return new_fn
def changes(self) -> dict:
return self._changes.copy()
def state(self) -> dict:
"""Returns the current state of this LightningWork."""
return {
"vars": _sanitize_state({el: getattr(self, el) for el in self._state}),
# this may have the challenge that ret cannot be pickled, we'll need to handle this
"calls": self._calls.copy(),
"changes": {},
}
def state_vars(self) -> dict:
return {"vars": _sanitize_state({el: getattr(self, el) for el in self._state})}
def state_with_changes(self) -> dict:
return {
"vars": _sanitize_state({el: getattr(self, el) for el in self._state}),
# this may have the challenge that ret cannot be pickled, we'll need to handle this
"calls": self._calls.copy(),
"changes": self.changes,
}
def set_state(self, provided_state: dict) -> None:
for k, v in provided_state["vars"].items():
if isinstance(v, Dict):
v = _maybe_create_drive(self.name, v)
if isinstance(v, Dict):
v = _maybe_create_cloud_compute(v)
setattr(self, k, v)
self._changes = provided_state["changes"]
# Note, this is handled by the flow only.
if _is_flow_context():
self._cleanup_calls(provided_state["calls"])
self._calls = provided_state["calls"]
def _cleanup_calls(calls: Dict[str, Any]) -> None:
# 1: Collect all the in_progress call hashes
in_progress_call_hash = [k for k in list(calls) if k not in (CacheCallsKeys.LATEST_CALL_HASH)]
for call_hash in in_progress_call_hash:
if "statuses" not in calls[call_hash]:
continue
# 2: Filter the statuses by timestamp
statuses = sorted(calls[call_hash]["statuses"], key=lambda x: x["timestamp"])
# If the latest status is succeeded, then drop everything before.
if statuses[-1]["stage"] == WorkStageStatus.SUCCEEDED:
status = statuses[-1]
status["timestamp"] = int(status["timestamp"])
calls[call_hash]["statuses"] = [status]
else:
# TODO: Some status are being duplicated,
# this seems related to the StateObserver.
final_statuses = []
for status in statuses:
if status not in final_statuses:
final_statuses.append(status)
calls[call_hash]["statuses"] = final_statuses
def start(self) -> None:
"""Starts LightingWork component via CloudCompute."""
if self.status.stage == WorkStageStatus.STOPPED:
raise Exception("A work can be started only once for now.")
# This enables to start the run method with a phony input and exit.
self.run(Action(method="start"))
def run(self, *args: Any, **kwargs: Any) -> None:
"""Override to add your own logic.
Raises:
LightningPlatformException: If resource exceeds platform quotas or other constraints.
"""
def on_exception(self, exception: BaseException) -> None:
"""Override to customize how to handle exception in the run method."""
if self._raise_exception:
raise exception
def _aggregate_status_timeout(self, statuses: List[Dict]) -> WorkStatus:
"""Method used to return the first request and the total count of timeout after the latest succeeded status."""
succeeded_statuses = [
status_idx for status_idx, status in enumerate(statuses) if status["stage"] == WorkStageStatus.SUCCEEDED
]
if succeeded_statuses:
succeed_status_id = succeeded_statuses[-1] + 1
statuses = statuses[succeed_status_id:]
timeout_statuses = [status for status in statuses if status.get("reason") == WorkFailureReasons.TIMEOUT]
assert statuses[0]["stage"] == WorkStageStatus.PENDING
status = {**timeout_statuses[-1], "timestamp": statuses[0]["timestamp"]}
return WorkStatus(**status, count=len(timeout_statuses))
def on_exit(self) -> None:
"""Override this hook to add your logic when the work is exiting.
Note: This hook is not guaranteed to be called when running in the cloud.
"""
pass
def stop(self) -> None:
"""Stops LightingWork component and shuts down hardware provisioned via CloudCompute.
This can only be called from a ``LightningFlow``.
"""
if not self._backend:
raise RuntimeError(f"Only the `LightningFlow` can request this work ({self.name!r}) to stop.")
if self.status.stage == WorkStageStatus.STOPPED:
return
latest_hash = self._calls[CacheCallsKeys.LATEST_CALL_HASH]
stop_status = make_status(WorkStageStatus.STOPPED, reason=WorkStopReasons.PENDING)
self._calls[latest_hash]["statuses"].append(stop_status)
app = _LightningAppRef().get_current()
self._backend.stop_work(app, self) # type: ignore[arg-type]
def delete(self) -> None:
"""Delete LightingWork component and shuts down hardware provisioned via CloudCompute.
Locally, the work.delete() behaves as work.stop().
"""
if not self._backend:
raise Exception(
"Can't delete the work, it looks like it isn't attached to a LightningFlow. "
"Make sure to assign the Work to a flow instance."
)
app = _LightningAppRef().get_current()
self._backend.delete_work(app, self)
def _check_run_is_implemented(self) -> None:
if not is_overridden("run", instance=self, parent=LightningWork):
raise TypeError(
f"The work `{self.__class__.__name__}` is missing the `run()` method. This is required. Implement it"
" first and then call it in your Flow."
)
def _register_cloud_compute(self) -> None:
internal_id = self.cloud_compute.id
assert internal_id
if internal_id not in _CLOUD_COMPUTE_STORE:
_CLOUD_COMPUTE_STORE[internal_id] = _CloudComputeStore(id=internal_id, component_names=[])
_CLOUD_COMPUTE_STORE[internal_id].add_component_name(self.name)
def apply_flow_delta(self, delta: Delta) -> None:
"""Override to customize how the flow should update the work state."""
# TODO: Add support for thread safe locking over JSON Serializable objects.
if any(k not in ["values_changed", "type_changed"] for k in delta.to_dict()):
raise Exception(
"A forbidden operation to update the work from the flow was detected."
f" Found {delta.to_dict()}, only `values_changed` and `type_changes` are currently allowed."
)
vars = self.state["vars"] + delta
for name, value in vars.items():
property_object = self._get_property_if_exists(name)
if property_object is not None and property_object.fset is not None:
property_object.fset(self, value)
else:
self._default_setattr(name, value)
def configure_layout(self) -> Union[None, str, "Frontend"]:
"""Configure the UI of this LightningWork.
You can either
1. Return a single :class:`~lightning.app.frontend.frontend.Frontend` object to serve a user interface
for this Work.
2. Return a string containing a URL to act as the user interface for this Work.
3. Return ``None`` to indicate that this Work doesn't currently have a user interface.
**Example:** Serve a static directory (with at least a file index.html inside).
.. code-block:: python
from lightning.app.frontend import StaticWebFrontend
class Work(LightningWork):
def configure_layout(self):
return StaticWebFrontend("path/to/folder/to/serve")
**Example:** Arrange the UI of my children in tabs (default UI by Lightning).
.. code-block:: python
class Work(LightningWork):
def configure_layout(self):
return [
dict(name="First Tab", content=self.child0),
dict(name="Second Tab", content=self.child1),
dict(name="Lightning", content="https://lightning.ai"),
]
If you don't implement ``configure_layout``, Lightning will use ``self.url``.
Note:
This hook gets called at the time of app creation and then again as part of the loop. If desired, a
returned URL can depend on the state. This is not the case if the work returns a
:class:`~lightning.app.frontend.frontend.Frontend`. These need to be provided at the time of app creation
in order for the runtime to start the server.
"""
def dump(work: LightningWork, f: typing.BinaryIO) -> None:
picklable_work = get_picklable_work(work)
pickle.dump(picklable_work, f) | null |
155,572 | import contextlib
import pickle
import sys
import types
import typing
from copy import deepcopy
from pathlib import Path
from lightning.app.core.work import LightningWork
from lightning.app.utilities.app_helpers import _LightningAppRef
def load(f: typing.BinaryIO) -> typing.Any:
# inject current working directory to sys.path
sys.path.insert(1, str(Path.cwd()))
work = pickle.load(f)
sys.path.pop(1)
return work | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.