Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h +25 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar.h +39 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h +28 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h +22 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h +26 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h +30 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h +25 -0
- mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h +25 -0
- moondream/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py +299 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py +2086 -0
- moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py +262 -0
- moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py +43 -0
- moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py +173 -0
- moondream/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py +94 -0
- moondream/lib/python3.10/site-packages/torch/utils/_sympy/solve.py +175 -0
- moondream/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py +782 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/__init__.py +76 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py +72 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py +329 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py +5 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/dataloader.py +1479 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py +3 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py +248 -0
- moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py +430 -0
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false);
|
| 21 |
+
TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false);
|
| 22 |
+
TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foobar_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
|
| 26 |
+
inline at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) {
|
| 27 |
+
return at::_ops::_foobar::call(self, arg1, arg2, arg3);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _foobar_out(at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) {
|
| 32 |
+
return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _foobar_outf(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace cuda
|
| 28 |
+
} // namespace at
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor chain_matmul(at::TensorList matrices);
|
| 20 |
+
TORCH_API at::Tensor & chain_matmul_out(at::TensorList matrices, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/resolve_conj_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
|
| 26 |
+
inline at::Tensor resolve_conj(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::resolve_conj::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n);
|
| 21 |
+
TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n);
|
| 22 |
+
TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
|
| 3 |
+
from itertools import chain
|
| 4 |
+
|
| 5 |
+
import pickle
|
| 6 |
+
|
| 7 |
+
from typing import (
|
| 8 |
+
Any,
|
| 9 |
+
Callable,
|
| 10 |
+
Dict,
|
| 11 |
+
List,
|
| 12 |
+
no_type_check,
|
| 13 |
+
Sequence,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
from torch.utils.hooks import RemovableHandle
|
| 19 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
BYTES_PER_MB = 1024 * 1024.0
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MemoryProfileDispatchMode(TorchDispatchMode):
|
| 26 |
+
"""Run in ``TorchDispatchMode`` to get memory stats at operator level."""
|
| 27 |
+
|
| 28 |
+
def __init__(self, memory_tracker) -> None:
|
| 29 |
+
self.memory_tracker = memory_tracker
|
| 30 |
+
|
| 31 |
+
def __torch_dispatch__(self, func, types, args=..., kwargs=None):
|
| 32 |
+
rs = func(*args, **kwargs)
|
| 33 |
+
if func == torch.ops.aten.detach.default:
|
| 34 |
+
return rs
|
| 35 |
+
func_name: str = (
|
| 36 |
+
self.memory_tracker._cur_module_name
|
| 37 |
+
+ "."
|
| 38 |
+
+ func.__name__
|
| 39 |
+
+ "_"
|
| 40 |
+
+ str(self.memory_tracker._operator_names[func.__name__])
|
| 41 |
+
)
|
| 42 |
+
self.memory_tracker._operator_names[func.__name__] = (
|
| 43 |
+
self.memory_tracker._operator_names[func.__name__] + 1
|
| 44 |
+
)
|
| 45 |
+
self.memory_tracker._record_memory_stats(func_name)
|
| 46 |
+
|
| 47 |
+
return rs
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class MemoryTracker:
|
| 51 |
+
"""
|
| 52 |
+
Collect and plot the memory stats at operator level.
|
| 53 |
+
|
| 54 |
+
Includes ``memories_allocated``, ``memories_active`` and ``memories_reserved``.
|
| 55 |
+
It also prints a summary for the top 20 operators that generate the most memories.
|
| 56 |
+
|
| 57 |
+
Example usage:
|
| 58 |
+
|
| 59 |
+
>>> # xdoctest: +SKIP(failing)
|
| 60 |
+
>>> net.cuda()
|
| 61 |
+
>>> input = input.cuda()
|
| 62 |
+
|
| 63 |
+
>>> mem_tracker = MemoryTracker()
|
| 64 |
+
>>> mem_tracker.start_monitor(net)
|
| 65 |
+
|
| 66 |
+
>>> net.zero_grad(True)
|
| 67 |
+
>>> loss = net(input)
|
| 68 |
+
>>> if isinstance(loss, dict):
|
| 69 |
+
>>> loss = loss['out']
|
| 70 |
+
>>> loss.sum().backward()
|
| 71 |
+
>>> net.zero_grad(set_to_none=True)
|
| 72 |
+
|
| 73 |
+
>>> mem_tracker.stop()
|
| 74 |
+
>>> mem_tracker.summary()
|
| 75 |
+
>>> mem_tracker.show_traces()
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self) -> None:
|
| 79 |
+
torch._C._log_api_usage_once("torch.distributed.memory_tracker")
|
| 80 |
+
self._hooks: List[RemovableHandle] = []
|
| 81 |
+
self._operator_names: Dict[str, int] = defaultdict(int)
|
| 82 |
+
self.memories_allocated: Dict[int, Dict[str, float]] = defaultdict()
|
| 83 |
+
self.memories_active: Dict[int, Dict[str, float]] = defaultdict()
|
| 84 |
+
self.memories_reserved: Dict[int, Dict[str, float]] = defaultdict()
|
| 85 |
+
self._markers: Dict[str, int] = defaultdict(int)
|
| 86 |
+
self._cur_module_name: str = ""
|
| 87 |
+
self._op_index: int = 0
|
| 88 |
+
self._num_cuda_retries: int = 0
|
| 89 |
+
|
| 90 |
+
@no_type_check
|
| 91 |
+
def start_monitor(self, root_module: nn.Module) -> None:
|
| 92 |
+
"""
|
| 93 |
+
Register module hooks and entering ``MemoryProfileDispatchMode``.
|
| 94 |
+
|
| 95 |
+
This enables operator level memory stats can be tracked during module runtime.
|
| 96 |
+
"""
|
| 97 |
+
self._clear_state()
|
| 98 |
+
root_module.__setattr__("_memory_tracker_is_root", True)
|
| 99 |
+
for name, m in root_module.named_modules():
|
| 100 |
+
if m is not root_module:
|
| 101 |
+
m.__setattr__("_memory_tracker_is_root", False)
|
| 102 |
+
# fused_proxy_group does not support hooks
|
| 103 |
+
if ".fused_proxy_grouped_embedding_bag" in name:
|
| 104 |
+
continue
|
| 105 |
+
# hook ordering with other hooks added by users is not managed, so
|
| 106 |
+
# the memory stats tracked here may not completely accurate.
|
| 107 |
+
h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))
|
| 108 |
+
h2 = m.register_forward_hook(self._create_post_forward_hook(name))
|
| 109 |
+
# it does not work well with jagged tensor somehow, the root cause is not
|
| 110 |
+
# clear and remove it for now as it does not really capture important info.
|
| 111 |
+
# h3 = m.register_backward_hook(self._create_backward_hook(name))
|
| 112 |
+
self._hooks.extend([h1, h2])
|
| 113 |
+
torch.cuda.empty_cache()
|
| 114 |
+
assert getattr(self, "profile_mode", None) is None
|
| 115 |
+
self.profile_mode = MemoryProfileDispatchMode(self)
|
| 116 |
+
self.profile_mode.__enter__()
|
| 117 |
+
|
| 118 |
+
@no_type_check
|
| 119 |
+
def stop(self) -> None:
|
| 120 |
+
"""
|
| 121 |
+
Remove module hooks and exit ``MemoryProfileDispatchMode`` to stop tracking memory stats at operator level.
|
| 122 |
+
|
| 123 |
+
Get some aggregated stats when the memory_tracker() is enabled, like cuda ``num_alloc_retries``.
|
| 124 |
+
"""
|
| 125 |
+
self._num_cuda_retries = torch.cuda.memory_stats().get("num_alloc_retries", 0)
|
| 126 |
+
|
| 127 |
+
for h in self._hooks:
|
| 128 |
+
h.remove()
|
| 129 |
+
self._hooks.clear()
|
| 130 |
+
assert getattr(self, "profile_mode", None) is not None
|
| 131 |
+
self.profile_mode.__exit__(None, None, None)
|
| 132 |
+
self.profile_mode = None
|
| 133 |
+
|
| 134 |
+
@no_type_check
|
| 135 |
+
def summary(self, top: int = 20) -> None:
|
| 136 |
+
"""
|
| 137 |
+
Print out the top operators that generate the most memories.
|
| 138 |
+
|
| 139 |
+
The number of the top operators can be configured.
|
| 140 |
+
"""
|
| 141 |
+
op_diff: Dict[str, float] = defaultdict(float)
|
| 142 |
+
op_name, previous_allocated_memory = self.memories_allocated[0]
|
| 143 |
+
for i in range(1, self._op_index):
|
| 144 |
+
op_name, current_allocated_memory = self.memories_allocated[i]
|
| 145 |
+
op_diff[op_name] = current_allocated_memory - previous_allocated_memory
|
| 146 |
+
previous_allocated_memory = current_allocated_memory
|
| 147 |
+
|
| 148 |
+
print("------------------------------------------------")
|
| 149 |
+
print(f"The number of cuda retries are: {self._num_cuda_retries}")
|
| 150 |
+
print(f"Top {top} ops that generates memory are:")
|
| 151 |
+
for k, v in sorted(op_diff.items(), key=lambda item: item[1], reverse=True)[
|
| 152 |
+
:top
|
| 153 |
+
]:
|
| 154 |
+
print(f"{k}: {v}MB")
|
| 155 |
+
print("------------------------------------------------")
|
| 156 |
+
|
| 157 |
+
@no_type_check
|
| 158 |
+
def show_traces(self, path: str = "") -> None:
|
| 159 |
+
import matplotlib.pyplot as plt
|
| 160 |
+
|
| 161 |
+
def _plot_figure(x, y_values, labels):
|
| 162 |
+
min_val = min(list(chain(*y_values))) * 0.999
|
| 163 |
+
max_val = max(list(chain(*y_values))) * 1.001
|
| 164 |
+
plt.figure()
|
| 165 |
+
for y, label in zip(y_values, labels):
|
| 166 |
+
plt.plot(x, y, label=label)
|
| 167 |
+
plt.xlabel("# Operator Calls")
|
| 168 |
+
plt.ylabel("Memory (MB)")
|
| 169 |
+
plt.legend()
|
| 170 |
+
for marker_name, marker in self._markers.items():
|
| 171 |
+
if marker_name == "fw_bw_boundary":
|
| 172 |
+
plt.plot(
|
| 173 |
+
[marker, marker],
|
| 174 |
+
[min_val, max_val],
|
| 175 |
+
"r",
|
| 176 |
+
lw=2,
|
| 177 |
+
label=marker_name,
|
| 178 |
+
)
|
| 179 |
+
else:
|
| 180 |
+
plt.plot(
|
| 181 |
+
[marker, marker],
|
| 182 |
+
[min_val, max_val],
|
| 183 |
+
"k-",
|
| 184 |
+
lw=2,
|
| 185 |
+
label=marker_name,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
if path != "":
|
| 189 |
+
self.load(path)
|
| 190 |
+
|
| 191 |
+
y_1 = [gb for (name, gb) in self.memories_allocated.values()]
|
| 192 |
+
y_2 = [gb for (name, gb) in self.memories_active.values()]
|
| 193 |
+
y_3 = [gb for (name, gb) in self.memories_reserved.values()]
|
| 194 |
+
x = list(range(len(y_1)))
|
| 195 |
+
# Split figures when there is big difference between
|
| 196 |
+
# "reserved_memory" and "allocated_memory" or "active_memory".
|
| 197 |
+
_plot_figure(
|
| 198 |
+
x,
|
| 199 |
+
[list(y_1), list(y_2), list(y_3)],
|
| 200 |
+
["allocated_memory", "active_memory", "reserved_memory"],
|
| 201 |
+
)
|
| 202 |
+
_plot_figure(x, [list(y_1)], ["allocated_memory"])
|
| 203 |
+
_plot_figure(x, [list(y_2)], ["active_memory"])
|
| 204 |
+
_plot_figure(x, [list(y_3)], ["reserved_memory"])
|
| 205 |
+
|
| 206 |
+
def save_stats(self, path: str) -> None:
|
| 207 |
+
"""Save the stats using pickle during runtime if users want to plot the traces in other places like notebook."""
|
| 208 |
+
stats = {
|
| 209 |
+
"memories_allocated": self.memories_allocated,
|
| 210 |
+
"memories_active": self.memories_active,
|
| 211 |
+
"memories_reserved": self.memories_reserved,
|
| 212 |
+
"markers": self._markers,
|
| 213 |
+
"num_alloc_retries": self._num_cuda_retries,
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
with open(path, "wb") as f:
|
| 217 |
+
pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
|
| 218 |
+
|
| 219 |
+
def load(self, path: str) -> None:
|
| 220 |
+
"""Load the pickled memory stats to plot the traces or print the summary."""
|
| 221 |
+
with open(path, "rb") as f:
|
| 222 |
+
stats = pickle.load(f)
|
| 223 |
+
|
| 224 |
+
self.memories_allocated = stats["memories_allocated"]
|
| 225 |
+
self.memories_active = stats["memories_active"]
|
| 226 |
+
self.memories_reserved = stats["memories_reserved"]
|
| 227 |
+
self._markers = stats["markers"]
|
| 228 |
+
self._num_cuda_retries = stats["num_alloc_retries"]
|
| 229 |
+
|
| 230 |
+
def _create_pre_forward_hook(self, name: str) -> Callable:
|
| 231 |
+
"""Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start."""
|
| 232 |
+
def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:
|
| 233 |
+
self._cur_module_name = f"{name}.forward"
|
| 234 |
+
if (
|
| 235 |
+
hasattr(module, "_memory_tracker_is_root")
|
| 236 |
+
and module._memory_tracker_is_root
|
| 237 |
+
):
|
| 238 |
+
self._add_marker("fw_start")
|
| 239 |
+
|
| 240 |
+
return _pre_forward_hook
|
| 241 |
+
|
| 242 |
+
def _create_post_forward_hook(self, name: str) -> Callable:
|
| 243 |
+
"""Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass."""
|
| 244 |
+
|
| 245 |
+
def _post_forward_hook(
|
| 246 |
+
module: nn.Module,
|
| 247 |
+
inputs: Sequence[torch.Tensor],
|
| 248 |
+
outputs: Sequence[torch.Tensor],
|
| 249 |
+
) -> None:
|
| 250 |
+
if (
|
| 251 |
+
hasattr(module, "_memory_tracker_is_root")
|
| 252 |
+
and module._memory_tracker_is_root
|
| 253 |
+
):
|
| 254 |
+
self._add_marker("fw_bw_boundary")
|
| 255 |
+
|
| 256 |
+
return _post_forward_hook
|
| 257 |
+
|
| 258 |
+
def _create_backward_hook(self, name: str) -> Callable:
|
| 259 |
+
"""Insert the current module name with backward prefix for the operator name."""
|
| 260 |
+
|
| 261 |
+
def _backward_hook(
|
| 262 |
+
module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor
|
| 263 |
+
) -> None:
|
| 264 |
+
self._cur_module_name = f"{name}.backward"
|
| 265 |
+
|
| 266 |
+
return _backward_hook
|
| 267 |
+
|
| 268 |
+
@no_type_check
|
| 269 |
+
def _record_memory_stats(self, fn_name: str) -> None:
|
| 270 |
+
"""
|
| 271 |
+
Record current memory allocated, current memory active and current memory reserved.
|
| 272 |
+
|
| 273 |
+
The memory stats dict is indexed with ``self._op_index``.
|
| 274 |
+
"""
|
| 275 |
+
memory_allocated: float = torch.cuda.memory_allocated() / BYTES_PER_MB
|
| 276 |
+
memory_reserved: float = torch.cuda.memory_reserved() / BYTES_PER_MB
|
| 277 |
+
memory_active: float = (
|
| 278 |
+
torch.cuda.memory_stats().get("active_bytes.all.current", 0) / BYTES_PER_MB
|
| 279 |
+
)
|
| 280 |
+
self.memories_allocated[self._op_index] = (fn_name, memory_allocated)
|
| 281 |
+
self.memories_reserved[self._op_index] = (fn_name, memory_reserved)
|
| 282 |
+
self.memories_active[self._op_index] = (fn_name, memory_active)
|
| 283 |
+
self._op_index += 1
|
| 284 |
+
|
| 285 |
+
def _add_marker(self, marker_name: str) -> None:
|
| 286 |
+
"""Set the marker's x-axis value."""
|
| 287 |
+
marker_val = len(self.memories_allocated.values())
|
| 288 |
+
self._markers[marker_name] = marker_val
|
| 289 |
+
|
| 290 |
+
def _clear_state(self) -> None:
|
| 291 |
+
"""Clear states when start_monitor() is called."""
|
| 292 |
+
self._operator_names.clear()
|
| 293 |
+
self.memories_allocated.clear()
|
| 294 |
+
self.memories_active.clear()
|
| 295 |
+
self.memories_reserved.clear()
|
| 296 |
+
self._markers.clear()
|
| 297 |
+
self._cur_module_name = ""
|
| 298 |
+
self._op_index = 0
|
| 299 |
+
self._num_cuda_retries = 0
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc
ADDED
|
Binary file (5 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc
ADDED
|
Binary file (1.59 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (19.8 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc
ADDED
|
Binary file (12.3 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py
ADDED
|
@@ -0,0 +1,2086 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import functools
|
| 3 |
+
import logging
|
| 4 |
+
import warnings
|
| 5 |
+
from contextlib import ExitStack
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import (
|
| 8 |
+
Any,
|
| 9 |
+
cast,
|
| 10 |
+
Dict,
|
| 11 |
+
Iterable,
|
| 12 |
+
Iterator,
|
| 13 |
+
List,
|
| 14 |
+
NamedTuple,
|
| 15 |
+
no_type_check,
|
| 16 |
+
Optional,
|
| 17 |
+
Sequence,
|
| 18 |
+
Set,
|
| 19 |
+
Tuple,
|
| 20 |
+
Union,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.distributed as dist
|
| 25 |
+
import torch.distributed.fsdp._traversal_utils as traversal_utils
|
| 26 |
+
import torch.nn as nn
|
| 27 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 28 |
+
from torch.distributed._state_dict_utils import _gather_state_dict
|
| 29 |
+
from torch.distributed._tensor import DTensor, Replicate
|
| 30 |
+
from torch.distributed.distributed_c10d import _get_pg_default_device
|
| 31 |
+
from torch.distributed.fsdp._common_utils import (
|
| 32 |
+
_apply_to_modules,
|
| 33 |
+
_FSDPState,
|
| 34 |
+
_get_module_fsdp_state_if_fully_sharded_module,
|
| 35 |
+
_get_param_to_fqns,
|
| 36 |
+
_module_handle,
|
| 37 |
+
_named_parameters_with_duplicates,
|
| 38 |
+
clean_tensor_name,
|
| 39 |
+
)
|
| 40 |
+
from torch.distributed.fsdp._debug_utils import SimpleProfiler
|
| 41 |
+
from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle
|
| 42 |
+
from torch.distributed.fsdp._fsdp_extensions import (
|
| 43 |
+
_ext_chunk_dtensor,
|
| 44 |
+
_ext_chunk_tensor,
|
| 45 |
+
)
|
| 46 |
+
from torch.distributed.fsdp._runtime_utils import (
|
| 47 |
+
_lazy_init,
|
| 48 |
+
_reset_flat_param_grad_info_if_needed,
|
| 49 |
+
)
|
| 50 |
+
from torch.distributed.fsdp.api import (
|
| 51 |
+
ShardingStrategy,
|
| 52 |
+
StateDictSettings,
|
| 53 |
+
StateDictType,
|
| 54 |
+
)
|
| 55 |
+
from torch.utils._pytree import tree_map_only
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
logger = logging.getLogger(__name__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@dataclass
|
| 62 |
+
class FSDPParamInfo:
|
| 63 |
+
state: _FSDPState
|
| 64 |
+
handle: FlatParamHandle
|
| 65 |
+
param_indices: Dict[str, int]
|
| 66 |
+
param_requires_grad: List[bool]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]:
|
| 70 |
+
keys = sorted(dictionary.keys())
|
| 71 |
+
for k in keys:
|
| 72 |
+
yield k, dictionary[k]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@dataclass
|
| 76 |
+
class _ConsolidatedOptimState:
|
| 77 |
+
"""
|
| 78 |
+
This holds the consolidated optimizer state on the target rank. Positive-
|
| 79 |
+
dimension tensor state is communicated across ranks, while zero-dimension
|
| 80 |
+
tensor state and non-tensor state is taken directly from the target rank.
|
| 81 |
+
|
| 82 |
+
PyTorch version 1.12 moved to using zero-dimension tensors for scalar
|
| 83 |
+
values, but user implemented optimizers may still use float (i.e. a
|
| 84 |
+
non-tensor). Thus, we support both and handle them identically.
|
| 85 |
+
|
| 86 |
+
Attributes:
|
| 87 |
+
tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
|
| 88 |
+
tensor state name to the unsharded flat tensor representing the
|
| 89 |
+
state.
|
| 90 |
+
zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
|
| 91 |
+
dimension tensor state name to its value.
|
| 92 |
+
non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
|
| 93 |
+
name to its value.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
|
| 97 |
+
zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
|
| 98 |
+
non_tensor_state: Dict[str, Any] = field(default_factory=dict)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class _PosDimTensorInfo(NamedTuple):
|
| 102 |
+
"""
|
| 103 |
+
Meatadata for positive-dimension tensors used internally for
|
| 104 |
+
:meth:`scatter_full_optim_state_dict`.
|
| 105 |
+
|
| 106 |
+
Attributes:
|
| 107 |
+
shape (torch.Size): Sharded tensor shape (which is equal to the
|
| 108 |
+
unsharded tensor shape if the tensor is optimizer state for a
|
| 109 |
+
non-FSDP parameter and is hence not sharded).
|
| 110 |
+
dtype (torch.dtype): Data type of the tensor.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
shape: torch.Size
|
| 114 |
+
dtype: torch.dtype
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class _OptimStateKey(NamedTuple):
|
| 118 |
+
"""
|
| 119 |
+
This represents an optimizer state key that may be used commonly across
|
| 120 |
+
ranks. It is based on the unflattened parameter names rather than parameter
|
| 121 |
+
IDs to make it independent of each rank's own optimizer construction.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
unflat_param_names: Tuple[str, ...]
|
| 125 |
+
is_fsdp_managed: bool
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _unflatten_optim_state(
|
| 129 |
+
fsdp_param_info: FSDPParamInfo,
|
| 130 |
+
flat_param_state: Dict[str, Any],
|
| 131 |
+
to_save: bool,
|
| 132 |
+
shard_state: bool,
|
| 133 |
+
cpu_offload: bool,
|
| 134 |
+
) -> List[Dict[str, Any]]:
|
| 135 |
+
"""
|
| 136 |
+
Unflattens the optimizer state, consisting of the "state" part and the
|
| 137 |
+
"param_groups" part. Unflattening the "state" part involves consolidating
|
| 138 |
+
the state on the target rank and remapping from flattened to unflattened
|
| 139 |
+
parameter IDs, and the "param_groups" part only involves remapping from
|
| 140 |
+
flattened to unflattened parameter IDs.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 144 |
+
mapping from FQN to original parameter index.
|
| 145 |
+
flat_param_state (Dict[str, Any]): Entry for the flat parameter in the
|
| 146 |
+
"state" part of the optimizer state dict.
|
| 147 |
+
to_save (bool): Whether to save the state on this rank.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
List[Dict[str, Any]]: A :class:`list` holding the entries in the
|
| 151 |
+
"state" part of the optimizer state dict corresponding to the
|
| 152 |
+
unflattened parameters comprising the flat parameter if on the target
|
| 153 |
+
rank or an empty :class:`list` otherwise. The final optimizer state
|
| 154 |
+
dict will need to map these entries using the proper unflattened
|
| 155 |
+
parameter IDs.
|
| 156 |
+
"""
|
| 157 |
+
assert (
|
| 158 |
+
not shard_state or to_save
|
| 159 |
+
), "If ``shard_state`` is True, ``to_save`` has to be True."
|
| 160 |
+
consolidated_state = _communicate_optim_state(
|
| 161 |
+
fsdp_param_info,
|
| 162 |
+
flat_param_state,
|
| 163 |
+
)
|
| 164 |
+
if to_save:
|
| 165 |
+
unflat_param_state = _unflatten_communicated_optim_state(
|
| 166 |
+
fsdp_param_info,
|
| 167 |
+
consolidated_state,
|
| 168 |
+
shard_state,
|
| 169 |
+
)
|
| 170 |
+
for optim_state in unflat_param_state:
|
| 171 |
+
# We can't use .items() below cuz we'd run into a concurrent modification error
|
| 172 |
+
if cpu_offload:
|
| 173 |
+
for key in list(optim_state.keys()):
|
| 174 |
+
state = optim_state[key]
|
| 175 |
+
if not isinstance(state, torch.Tensor):
|
| 176 |
+
continue
|
| 177 |
+
optim_state[key] = state.cpu()
|
| 178 |
+
return unflat_param_state
|
| 179 |
+
else:
|
| 180 |
+
return []
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def _is_zero_dim_tensor(x: Any) -> bool:
|
| 184 |
+
return torch.is_tensor(x) and x.dim() == 0
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _communicate_optim_state(
|
| 188 |
+
fsdp_param_info: FSDPParamInfo,
|
| 189 |
+
flat_param_state: Dict[str, Any],
|
| 190 |
+
) -> _ConsolidatedOptimState:
|
| 191 |
+
"""
|
| 192 |
+
Communicates the optimizer state for a flat parameter across ranks. All
|
| 193 |
+
ranks will hold the entire non-sharded optimizer state on GPU.
|
| 194 |
+
|
| 195 |
+
If ``N`` is the number of tensor optimizer states in the optimizer state
|
| 196 |
+
dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
|
| 197 |
+
otherwise (where the plus 1 comes from all-gathering the padding per rank).
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 201 |
+
mapping from FQN to original parameter index.
|
| 202 |
+
flat_param_state (Dict[str, Any]): The entry in the "state" part of the
|
| 203 |
+
optimizer state dict corresponding to the flat parameter.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
ConsolidatedOptimState: Consolidated optimizer state for the target
|
| 207 |
+
flat parameter.
|
| 208 |
+
"""
|
| 209 |
+
fsdp_state = fsdp_param_info.state
|
| 210 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 211 |
+
state = _ConsolidatedOptimState()
|
| 212 |
+
tensor_state, zero_dim_tensor_state, non_tensor_state = (
|
| 213 |
+
state.tensor_state,
|
| 214 |
+
state.zero_dim_tensor_state,
|
| 215 |
+
state.non_tensor_state,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
for state_name, value in sorted_items(flat_param_state):
|
| 219 |
+
# Positive-dimension tensor state: communicate across ranks
|
| 220 |
+
if torch.is_tensor(value) and value.dim() > 0:
|
| 221 |
+
# If the parameter is not sharded, then neither is the
|
| 222 |
+
# positive-dimension tensor state, so no need to communicate it --
|
| 223 |
+
# we take the target rank's value
|
| 224 |
+
if (
|
| 225 |
+
fsdp_state.world_size == 1
|
| 226 |
+
or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
|
| 227 |
+
):
|
| 228 |
+
tensor_state[state_name] = value
|
| 229 |
+
continue
|
| 230 |
+
assert (
|
| 231 |
+
fsdp_state.compute_device is not None
|
| 232 |
+
), "compute_device has not been initialized"
|
| 233 |
+
if value.device.type != fsdp_state.compute_device.type:
|
| 234 |
+
value = value.to(fsdp_state.compute_device)
|
| 235 |
+
# Assume that positive-dimension tensor optimizer state
|
| 236 |
+
# has the same shape as the sharded flat parameter
|
| 237 |
+
buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
|
| 238 |
+
tensor_buffer = value.new_zeros(*buffer_size)
|
| 239 |
+
dist.all_gather_into_tensor(
|
| 240 |
+
tensor_buffer, value, group=fsdp_state.process_group
|
| 241 |
+
)
|
| 242 |
+
fsdp_state._device_handle.synchronize()
|
| 243 |
+
unpadded_numel = cast(
|
| 244 |
+
nn.Parameter, flat_param._unpadded_unsharded_size
|
| 245 |
+
).numel()
|
| 246 |
+
tensor_state[state_name] = tensor_buffer[:unpadded_numel]
|
| 247 |
+
# Zero-dimension tensor state and non-tensor state: take this rank's
|
| 248 |
+
# value directly
|
| 249 |
+
else:
|
| 250 |
+
if _is_zero_dim_tensor(value):
|
| 251 |
+
zero_dim_tensor_state[state_name] = value.detach().clone()
|
| 252 |
+
else:
|
| 253 |
+
non_tensor_state[state_name] = value
|
| 254 |
+
return state
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _unflatten_communicated_optim_state(
|
| 258 |
+
fsdp_param_info: FSDPParamInfo,
|
| 259 |
+
state: _ConsolidatedOptimState,
|
| 260 |
+
shard_state: bool,
|
| 261 |
+
) -> List[Dict[str, Any]]:
|
| 262 |
+
"""
|
| 263 |
+
Unflattens the communicated optimizer state (given by ``tensor_state``,
|
| 264 |
+
``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat
|
| 265 |
+
parameter. This should only be called on the target rank.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 269 |
+
mapping from FQN to original parameter index.
|
| 270 |
+
state (_ConsolidatedOptimState): Consolidated optimizer state.
|
| 271 |
+
|
| 272 |
+
Returns:
|
| 273 |
+
List[Dict[str, Any]]: A :class:`list` holding the entries in the
|
| 274 |
+
"state" part of the optimizer state dict corresponding to the
|
| 275 |
+
unflattened parameters comprising the flat parameter. The final
|
| 276 |
+
optimizer state dict will need to map these entries using the proper
|
| 277 |
+
unflattened parameter IDs.
|
| 278 |
+
"""
|
| 279 |
+
fsdp_state = fsdp_param_info.state
|
| 280 |
+
handle = fsdp_param_info.handle
|
| 281 |
+
flat_param = handle.flat_param
|
| 282 |
+
unflat_param_state: List[Dict[str, Any]] = []
|
| 283 |
+
flat_param_views: Dict[str, Iterator] = {}
|
| 284 |
+
num_unflat_params = flat_param._num_params
|
| 285 |
+
tensor_state, zero_dim_tensor_state, non_tensor_state = (
|
| 286 |
+
state.tensor_state,
|
| 287 |
+
state.zero_dim_tensor_state,
|
| 288 |
+
state.non_tensor_state,
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
for _ in range(num_unflat_params):
|
| 292 |
+
unflat_state_param = {}
|
| 293 |
+
# Add positive-dimension tensor state: unflatten with views
|
| 294 |
+
for state_name, flat_tensor in sorted_items(tensor_state):
|
| 295 |
+
views_generated = state_name in flat_param_views
|
| 296 |
+
if not views_generated:
|
| 297 |
+
views = handle._get_unflat_views(flat_tensor)
|
| 298 |
+
flat_param_views[state_name] = views
|
| 299 |
+
else:
|
| 300 |
+
views = flat_param_views[state_name]
|
| 301 |
+
optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views)
|
| 302 |
+
if shard_state:
|
| 303 |
+
osd_config = fsdp_state._optim_state_dict_config
|
| 304 |
+
if getattr(osd_config, "_use_dtensor", False):
|
| 305 |
+
assert fsdp_state._device_mesh is not None
|
| 306 |
+
optim_state = _ext_chunk_dtensor(
|
| 307 |
+
optim_state,
|
| 308 |
+
fsdp_state.rank,
|
| 309 |
+
fsdp_state._device_mesh,
|
| 310 |
+
fsdp_state._fsdp_extension,
|
| 311 |
+
)
|
| 312 |
+
else:
|
| 313 |
+
assert fsdp_state.process_group is not None
|
| 314 |
+
optim_state = _ext_chunk_tensor(
|
| 315 |
+
optim_state,
|
| 316 |
+
fsdp_state.rank,
|
| 317 |
+
fsdp_state.world_size,
|
| 318 |
+
fsdp_state._device_handle.device_count(),
|
| 319 |
+
fsdp_state.process_group,
|
| 320 |
+
fsdp_state._fsdp_extension,
|
| 321 |
+
)
|
| 322 |
+
unflat_state_param[state_name] = optim_state
|
| 323 |
+
|
| 324 |
+
# Add zero-dimension tensor state: take the target rank's value
|
| 325 |
+
for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state):
|
| 326 |
+
unflat_state_param[state_name] = zero_dim_tensor
|
| 327 |
+
# Add non-tensor state: take the target rank's value
|
| 328 |
+
for state_name, non_tensor in sorted_items(non_tensor_state):
|
| 329 |
+
unflat_state_param[state_name] = non_tensor
|
| 330 |
+
unflat_param_state.append(unflat_state_param)
|
| 331 |
+
return unflat_param_state
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _broadcast_processed_state(
|
| 335 |
+
fsdp_state: _FSDPState,
|
| 336 |
+
optim_state: Dict[str, Any],
|
| 337 |
+
group: Optional[dist.ProcessGroup],
|
| 338 |
+
) -> Dict[str, Any]:
|
| 339 |
+
objects: List[Any] = [None]
|
| 340 |
+
if fsdp_state.rank == 0:
|
| 341 |
+
objects[0] = tree_map_only(
|
| 342 |
+
torch.Tensor,
|
| 343 |
+
lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr]
|
| 344 |
+
optim_state,
|
| 345 |
+
)
|
| 346 |
+
dist.broadcast_object_list(objects, src=0, group=group)
|
| 347 |
+
if fsdp_state.rank == 0:
|
| 348 |
+
return optim_state
|
| 349 |
+
else:
|
| 350 |
+
return objects[0]
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _broadcast_state(
|
| 354 |
+
fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup]
|
| 355 |
+
) -> Any:
|
| 356 |
+
if fsdp_state.rank == 0:
|
| 357 |
+
if not isinstance(state, torch.Tensor) or state.dim() == 0:
|
| 358 |
+
return state
|
| 359 |
+
tensor = state.to(fsdp_state.compute_device)
|
| 360 |
+
else:
|
| 361 |
+
if isinstance(state, torch.Tensor):
|
| 362 |
+
assert state.dim() == 0, (
|
| 363 |
+
"For non-zero ranks, a tensor state should have zero dimension, "
|
| 364 |
+
"but got the state with shape {state.shape()}."
|
| 365 |
+
)
|
| 366 |
+
return state
|
| 367 |
+
elif not isinstance(state, _PosDimTensorInfo):
|
| 368 |
+
return state
|
| 369 |
+
tensor = torch.zeros(
|
| 370 |
+
state.shape, dtype=state.dtype, device=fsdp_state.compute_device
|
| 371 |
+
)
|
| 372 |
+
dist.broadcast(tensor, src=0, group=group)
|
| 373 |
+
return tensor
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _shard_orig_param_state(
|
| 377 |
+
fsdp_param_info: FSDPParamInfo,
|
| 378 |
+
fqn: str,
|
| 379 |
+
optim_state: Dict[str, Any],
|
| 380 |
+
) -> Dict[str, Any]:
|
| 381 |
+
"""
|
| 382 |
+
Shard the optimizer state for the original parameter with the name ``fqn``.
|
| 383 |
+
This API should only be used when ``use_orig_params`` is True.
|
| 384 |
+
"""
|
| 385 |
+
if not optim_state:
|
| 386 |
+
return {}
|
| 387 |
+
fsdp_state = fsdp_param_info.state
|
| 388 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 389 |
+
param_idx = fsdp_param_info.param_indices[fqn]
|
| 390 |
+
shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined]
|
| 391 |
+
optim_state = _gather_state_dict(
|
| 392 |
+
optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device
|
| 393 |
+
)
|
| 394 |
+
if not shard_param_info.in_shard:
|
| 395 |
+
return {}
|
| 396 |
+
# Flatten and shard the state.
|
| 397 |
+
new_optim_state: Dict[str, Any] = {}
|
| 398 |
+
intra_param_start_idx = shard_param_info.intra_param_start_idx
|
| 399 |
+
intra_param_end_idx = shard_param_info.intra_param_end_idx
|
| 400 |
+
for state_name, value in optim_state.items():
|
| 401 |
+
if (
|
| 402 |
+
torch.is_tensor(value)
|
| 403 |
+
and value.dim() > 0
|
| 404 |
+
and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
|
| 405 |
+
):
|
| 406 |
+
value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator]
|
| 407 |
+
new_optim_state[state_name] = value
|
| 408 |
+
return new_optim_state
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def _flatten_optim_state_dict(
|
| 412 |
+
optim_state_dict: Dict[str, Any],
|
| 413 |
+
model: nn.Module,
|
| 414 |
+
use_orig_params: bool = False,
|
| 415 |
+
optim: Optional[torch.optim.Optimizer] = None,
|
| 416 |
+
rank0_only: bool = False,
|
| 417 |
+
group: Optional[dist.ProcessGroup] = None,
|
| 418 |
+
) -> Dict[str, Any]:
|
| 419 |
+
"""
|
| 420 |
+
Flattens the full optimizer state dict, still keying by unflattened parameter
|
| 421 |
+
names.
|
| 422 |
+
|
| 423 |
+
If ``use_orig_params`` is True, each rank will have all FSDP-managed
|
| 424 |
+
parameters but some of these parameters may be empty due to the sharding.
|
| 425 |
+
For a regular optim.Optimizer, states for those empty parameters will
|
| 426 |
+
not be initialized. So, when aggregating the FQNs across ranks, no assert
|
| 427 |
+
will be raised on a rank even if it does not have all the states -- it is
|
| 428 |
+
valid and FSDP know how to aggregate them. However, FSDP has to ignore
|
| 429 |
+
handling those parameters that are not managed by FSDP and do not exist on
|
| 430 |
+
the local rank -- it is managed by other parallelism and FSDP does not
|
| 431 |
+
know ho to handle/aggregate them.
|
| 432 |
+
|
| 433 |
+
Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to
|
| 434 |
+
flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require
|
| 435 |
+
all the states even if the corresponding parameters are empty. To this end,
|
| 436 |
+
``optim`` will be used to to get the initial state of the empty parameters.
|
| 437 |
+
``optim`` should only be non-None if the ``optim` is KeyedOptimizer or
|
| 438 |
+
NamedOptimizer.
|
| 439 |
+
|
| 440 |
+
Returns:
|
| 441 |
+
Dict[str, Any]: The flattened optimizer state dict.
|
| 442 |
+
"""
|
| 443 |
+
SimpleProfiler.reset()
|
| 444 |
+
|
| 445 |
+
unflat_osd = optim_state_dict
|
| 446 |
+
if "state" not in unflat_osd and not rank0_only:
|
| 447 |
+
raise ValueError(
|
| 448 |
+
'`optim_state_dict` must have the keys "state"'
|
| 449 |
+
"to be a valid optimizer state dict"
|
| 450 |
+
)
|
| 451 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 452 |
+
fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
|
| 453 |
+
fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state
|
| 454 |
+
|
| 455 |
+
# Broadcast unflat_osd without non-scalar tensor if rank0_only is True.
|
| 456 |
+
if rank0_only:
|
| 457 |
+
unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group)
|
| 458 |
+
|
| 459 |
+
# Construct the "state" part
|
| 460 |
+
flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {}
|
| 461 |
+
unflat_osd_state = unflat_osd["state"]
|
| 462 |
+
all_state_keys = set(unflat_osd_state.keys())
|
| 463 |
+
|
| 464 |
+
for param, fqns in param_to_fqns.items():
|
| 465 |
+
fqn = fqns[0]
|
| 466 |
+
if fqn not in unflat_osd_state:
|
| 467 |
+
continue
|
| 468 |
+
all_state_keys.difference_update(fqns)
|
| 469 |
+
|
| 470 |
+
if rank0_only:
|
| 471 |
+
for fqn in fqns:
|
| 472 |
+
if not unflat_osd_state[fqn]:
|
| 473 |
+
continue
|
| 474 |
+
for state_name in unflat_osd_state[fqn].keys():
|
| 475 |
+
unflat_osd_state[fqn][state_name] = _broadcast_state(
|
| 476 |
+
fsdp_state, unflat_osd_state[fqn][state_name], group=group
|
| 477 |
+
)
|
| 478 |
+
fqn = fqns[0]
|
| 479 |
+
if fqn in fqn_to_fsdp_param_info:
|
| 480 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 481 |
+
if use_orig_params:
|
| 482 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
|
| 483 |
+
flat_state = _shard_orig_param_state(
|
| 484 |
+
fsdp_param_info,
|
| 485 |
+
fqn,
|
| 486 |
+
unflat_osd_state[fqn],
|
| 487 |
+
)
|
| 488 |
+
else:
|
| 489 |
+
flat_state = _flatten_optim_state(
|
| 490 |
+
fsdp_param_info,
|
| 491 |
+
unflat_osd_state,
|
| 492 |
+
fqns,
|
| 493 |
+
)
|
| 494 |
+
key = _OptimStateKey(tuple(fqns), True)
|
| 495 |
+
# Only include non-empty states since as expected by
|
| 496 |
+
# `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer
|
| 497 |
+
# or NamedOptimizer.
|
| 498 |
+
if flat_state:
|
| 499 |
+
flat_osd_state[key] = flat_state
|
| 500 |
+
elif use_orig_params:
|
| 501 |
+
assert (
|
| 502 |
+
len(fqns) == 1
|
| 503 |
+
), f"use_orig_params is True but there are multiple FQNs, {fqns}."
|
| 504 |
+
if optim is not None: # NamedOptimizer or KeyedOptimizer case.
|
| 505 |
+
state = optim.state.get(param, None) # type: ignore[call-overload]
|
| 506 |
+
if state is not None:
|
| 507 |
+
flat_osd_state[key] = copy.deepcopy(state)
|
| 508 |
+
else:
|
| 509 |
+
warnings.warn(
|
| 510 |
+
f"optim_state[{key}] is not on rank{fsdp_state.rank}."
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
else:
|
| 514 |
+
raise RuntimeError(
|
| 515 |
+
f"The state of {key} is empty. This should happen when "
|
| 516 |
+
"use_orig_params=True."
|
| 517 |
+
)
|
| 518 |
+
else: # do not flatten non-FSDP parameters' states
|
| 519 |
+
assert len(fqns) == 1
|
| 520 |
+
key = _OptimStateKey(tuple(fqns), False)
|
| 521 |
+
flat_osd_state[key] = copy.copy(unflat_osd_state[fqn])
|
| 522 |
+
|
| 523 |
+
if rank0_only:
|
| 524 |
+
for fqn in fqns:
|
| 525 |
+
if not unflat_osd_state[fqn]:
|
| 526 |
+
continue
|
| 527 |
+
for state_name, param_state in list(unflat_osd_state[fqn].items()):
|
| 528 |
+
if fsdp_state.rank > 0:
|
| 529 |
+
# Deference the tensor so that PyTorch can collect the memory.
|
| 530 |
+
del unflat_osd_state[fqn][state_name]
|
| 531 |
+
else:
|
| 532 |
+
# Move the tensor in the original osd back to CPU to make the
|
| 533 |
+
# original osd unaffected.
|
| 534 |
+
unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][
|
| 535 |
+
state_name
|
| 536 |
+
].cpu()
|
| 537 |
+
|
| 538 |
+
# Handle user-defined state, states that are not associated with parameters.
|
| 539 |
+
for key in all_state_keys:
|
| 540 |
+
user_state = unflat_osd_state[key]
|
| 541 |
+
if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params:
|
| 542 |
+
user_state = _broadcast_state(fsdp_state, user_state, group=group)
|
| 543 |
+
flat_osd_state[key] = copy.copy(user_state)
|
| 544 |
+
|
| 545 |
+
SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ")
|
| 546 |
+
# Construct the "param_groups" part -- copy as is since it will be
|
| 547 |
+
# rekeyed later according to the target rank's optimizer
|
| 548 |
+
# Only copy param_groups if it exists in unflat_osd
|
| 549 |
+
if "param_groups" in unflat_osd:
|
| 550 |
+
flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"])
|
| 551 |
+
return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
|
| 552 |
+
else:
|
| 553 |
+
return {"state": flat_osd_state}
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def _flatten_optim_state(
|
| 557 |
+
fsdp_param_info: FSDPParamInfo,
|
| 558 |
+
unflat_osd_state: Dict[str, Dict[str, Any]],
|
| 559 |
+
unflat_param_names: List[str],
|
| 560 |
+
) -> Dict[str, Any]:
|
| 561 |
+
"""
|
| 562 |
+
Flattens the optimizer state in ``full_optim_state_dict`` for a single
|
| 563 |
+
flat parameter in ``fsdp_param_info`` corresponding to the unflattened
|
| 564 |
+
parameter names in ``unflat_param_names``.
|
| 565 |
+
|
| 566 |
+
Args:
|
| 567 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 568 |
+
mapping from FQN to original parameter index.
|
| 569 |
+
unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
|
| 570 |
+
optimizer state dict corresponding to the unflattened parameters.
|
| 571 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 572 |
+
parameter names corresponding to the flat parameter ``flat_param``.
|
| 573 |
+
|
| 574 |
+
Returns:
|
| 575 |
+
Dict[str, Any]: A :class:`dict` mapping state names to their values for
|
| 576 |
+
a particular flat parameter. The sharded optimizer state dict's "state"
|
| 577 |
+
part will map a key to this returned value.
|
| 578 |
+
"""
|
| 579 |
+
fsdp_state = fsdp_param_info.state
|
| 580 |
+
handle = fsdp_param_info.handle
|
| 581 |
+
flat_param = handle.flat_param
|
| 582 |
+
num_unflat_params = len(unflat_param_names)
|
| 583 |
+
assert num_unflat_params > 0, (
|
| 584 |
+
"Expects at least one unflattened parameter corresponding to the "
|
| 585 |
+
"flat parameter"
|
| 586 |
+
)
|
| 587 |
+
unflat_param_shapes = flat_param._shapes
|
| 588 |
+
num_unflat_param_shapes = len(unflat_param_shapes)
|
| 589 |
+
assert (
|
| 590 |
+
num_unflat_params == num_unflat_param_shapes
|
| 591 |
+
), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
|
| 592 |
+
|
| 593 |
+
# Check if these unflattened parameters have any optimizer state
|
| 594 |
+
has_state = [
|
| 595 |
+
bool(unflat_param_name in unflat_osd_state)
|
| 596 |
+
for unflat_param_name in unflat_param_names
|
| 597 |
+
]
|
| 598 |
+
# If none of the unflattened parameters comprising this flat parameter have
|
| 599 |
+
# any state, then we do not want an entry in the optimizer state dict
|
| 600 |
+
if not any(has_state):
|
| 601 |
+
return {} # no need to flatten any state
|
| 602 |
+
# There may still be some unflattened parameters with state and some
|
| 603 |
+
# without
|
| 604 |
+
unflat_param_states = [
|
| 605 |
+
_gather_state_dict(
|
| 606 |
+
unflat_osd_state[unflat_param_name],
|
| 607 |
+
pg=fsdp_state.process_group,
|
| 608 |
+
device=fsdp_state.compute_device,
|
| 609 |
+
)
|
| 610 |
+
if unflat_param_name in unflat_osd_state
|
| 611 |
+
else None
|
| 612 |
+
for unflat_param_name in unflat_param_names
|
| 613 |
+
]
|
| 614 |
+
# Check that the unflattened parameters have the same state names
|
| 615 |
+
state_names = None
|
| 616 |
+
for unflat_param_state in unflat_param_states:
|
| 617 |
+
if unflat_param_state is None:
|
| 618 |
+
continue
|
| 619 |
+
if state_names is None:
|
| 620 |
+
state_names = set(unflat_param_state.keys())
|
| 621 |
+
else:
|
| 622 |
+
if state_names != set(unflat_param_state.keys()):
|
| 623 |
+
raise ValueError(
|
| 624 |
+
"Differing optimizer state names for the unflattened "
|
| 625 |
+
f"parameters: {unflat_param_names}"
|
| 626 |
+
)
|
| 627 |
+
assert state_names is not None
|
| 628 |
+
|
| 629 |
+
# Flatten the state
|
| 630 |
+
flat_state: Dict[str, Any] = {}
|
| 631 |
+
for state_name in state_names:
|
| 632 |
+
state_values = [
|
| 633 |
+
unflat_param_state[state_name] if unflat_param_state is not None else None
|
| 634 |
+
for unflat_param_state in unflat_param_states
|
| 635 |
+
]
|
| 636 |
+
non_none_state_values = [v for v in state_values if v is not None]
|
| 637 |
+
# If all ranks have None, this is a None value
|
| 638 |
+
if not non_none_state_values:
|
| 639 |
+
flat_state[state_name] = None
|
| 640 |
+
continue
|
| 641 |
+
are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
|
| 642 |
+
for v in non_none_state_values:
|
| 643 |
+
are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
|
| 644 |
+
are_zero_dim_tensors &= _is_zero_dim_tensor(v)
|
| 645 |
+
are_non_tensors &= not torch.is_tensor(v)
|
| 646 |
+
types = {type(v) for v in non_none_state_values}
|
| 647 |
+
if len(types) != 1 or not (
|
| 648 |
+
are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
|
| 649 |
+
):
|
| 650 |
+
raise ValueError(
|
| 651 |
+
f"Differing optimizer state types for state {state_name}, "
|
| 652 |
+
f"values {non_none_state_values}, and unflattened parameter "
|
| 653 |
+
f"names {unflat_param_names}"
|
| 654 |
+
)
|
| 655 |
+
if are_pos_dim_tensors:
|
| 656 |
+
flat_tensor = _flatten_tensor_optim_state(
|
| 657 |
+
state_name,
|
| 658 |
+
state_values,
|
| 659 |
+
unflat_param_names,
|
| 660 |
+
unflat_param_shapes,
|
| 661 |
+
handle,
|
| 662 |
+
)
|
| 663 |
+
# Shard the flattened tensor immediately to minimize max memory
|
| 664 |
+
# usage
|
| 665 |
+
if (
|
| 666 |
+
fsdp_state.world_size != 1
|
| 667 |
+
and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
|
| 668 |
+
):
|
| 669 |
+
sharded_flat_tensor, _ = FlatParamHandle._get_shard(
|
| 670 |
+
flat_tensor,
|
| 671 |
+
fsdp_state.rank,
|
| 672 |
+
fsdp_state.world_size,
|
| 673 |
+
)
|
| 674 |
+
else:
|
| 675 |
+
sharded_flat_tensor = flat_tensor
|
| 676 |
+
flat_state[state_name] = sharded_flat_tensor
|
| 677 |
+
elif are_zero_dim_tensors:
|
| 678 |
+
flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
|
| 679 |
+
state_name,
|
| 680 |
+
state_values,
|
| 681 |
+
unflat_param_names,
|
| 682 |
+
)
|
| 683 |
+
else:
|
| 684 |
+
assert are_non_tensors
|
| 685 |
+
flat_state[state_name] = _flatten_non_tensor_optim_state(
|
| 686 |
+
state_name,
|
| 687 |
+
state_values,
|
| 688 |
+
unflat_param_names,
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
return flat_state
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def _flatten_tensor_optim_state(
|
| 695 |
+
state_name: str,
|
| 696 |
+
pos_dim_tensors: List[torch.Tensor],
|
| 697 |
+
unflat_param_names: List[str],
|
| 698 |
+
unflat_param_shapes: Sequence[torch.Size],
|
| 699 |
+
handle: FlatParamHandle,
|
| 700 |
+
) -> torch.Tensor:
|
| 701 |
+
"""
|
| 702 |
+
Flattens the positive-dimension tensor optimizer state given by the values
|
| 703 |
+
``tensors`` for the state ``state_name`` for a single flat parameter
|
| 704 |
+
from ``handle`` corresponding to the unflattened parameter names
|
| 705 |
+
``unflat_param_names`` and unflatted parameter shapes
|
| 706 |
+
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
|
| 707 |
+
state into one tensor.
|
| 708 |
+
|
| 709 |
+
NOTE: We use zero tensors for any unflattened parameters without state
|
| 710 |
+
since some value is required to fill those entries. This assumes that the
|
| 711 |
+
zero tensor is mathematically equivalent to having no state, which is true
|
| 712 |
+
for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
|
| 713 |
+
optimizers.
|
| 714 |
+
|
| 715 |
+
Args:
|
| 716 |
+
state_name (str): Optimizer state name.
|
| 717 |
+
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
|
| 718 |
+
optimizer state values for the unflattened parameters corresponding
|
| 719 |
+
to the single flat parameter.
|
| 720 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 721 |
+
parameter names corresponding to the single flat parameter.
|
| 722 |
+
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
|
| 723 |
+
corresponding to the single flat parameter.
|
| 724 |
+
handle (FlatParamHandle): The flat parameter's handle.
|
| 725 |
+
|
| 726 |
+
Returns:
|
| 727 |
+
torch.Tensor: A flat tensor containing the optimizer state
|
| 728 |
+
corresponding to ``state_name`` constructed by concatenating the
|
| 729 |
+
unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
|
| 730 |
+
tensors for any unflattened parameters without the state).
|
| 731 |
+
"""
|
| 732 |
+
flat_param = handle.flat_param
|
| 733 |
+
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
|
| 734 |
+
# Check that all are tensors with the same dtype
|
| 735 |
+
dtypes = {t.dtype for t in non_none_tensors}
|
| 736 |
+
if len(dtypes) != 1:
|
| 737 |
+
raise ValueError(
|
| 738 |
+
"All unflattened parameters comprising a single flat "
|
| 739 |
+
"parameter must have positive-dimension tensor state with the "
|
| 740 |
+
f"same dtype but got dtypes {dtypes} for state {state_name} and "
|
| 741 |
+
f"unflattened parameter names {unflat_param_names}"
|
| 742 |
+
)
|
| 743 |
+
dtype = next(iter(dtypes))
|
| 744 |
+
# Check that each tensor state matches its parameter's shape
|
| 745 |
+
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
|
| 746 |
+
if tensor is None and len(shape) == 0:
|
| 747 |
+
raise ValueError("Flattening a zero-dimension parameter is not supported")
|
| 748 |
+
elif tensor is not None and tensor.shape != shape:
|
| 749 |
+
raise ValueError(
|
| 750 |
+
"Tensor optimizer state does not have same shape as its "
|
| 751 |
+
f"parameter: {tensor.shape} {shape}"
|
| 752 |
+
)
|
| 753 |
+
# Flatten the tensor states: we do not need to add any right-hand-side
|
| 754 |
+
# padding since the flat optimizer state tensor is sharded via
|
| 755 |
+
# `_get_shard()`, which pads the shard as needed (just like for the flat
|
| 756 |
+
# parameter)
|
| 757 |
+
cpu_device = torch.device("cpu")
|
| 758 |
+
tensors_to_flatten = [
|
| 759 |
+
torch.flatten(state_value.to(cpu_device))
|
| 760 |
+
if state_value is not None
|
| 761 |
+
else torch.flatten(
|
| 762 |
+
torch.zeros(
|
| 763 |
+
size=shape,
|
| 764 |
+
dtype=dtype,
|
| 765 |
+
device=cpu_device,
|
| 766 |
+
)
|
| 767 |
+
)
|
| 768 |
+
for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes)
|
| 769 |
+
]
|
| 770 |
+
flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel)
|
| 771 |
+
flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined]
|
| 772 |
+
assert flat_tensor.shape == flat_param_shape, (
|
| 773 |
+
f"tensor optim state: {flat_tensor.shape} "
|
| 774 |
+
f"flat parameter: {flat_param_shape}"
|
| 775 |
+
)
|
| 776 |
+
return flat_tensor
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
def _flatten_zero_dim_tensor_optim_state(
|
| 780 |
+
state_name: str,
|
| 781 |
+
zero_dim_tensors: List[torch.Tensor],
|
| 782 |
+
unflat_param_names: List[str],
|
| 783 |
+
) -> torch.Tensor:
|
| 784 |
+
"""
|
| 785 |
+
Flattens the zero-dimension tensor optimizer state given by the values
|
| 786 |
+
``zero_dim_tensors`` for the state ``state_name`` for a single flat
|
| 787 |
+
parameter corresponding to the unflattened parameter names
|
| 788 |
+
``unflat_param_names`` by enforcing that all tensors are the same and using
|
| 789 |
+
that common value.
|
| 790 |
+
|
| 791 |
+
NOTE: The requirement that the tensors are the same across all unflattened
|
| 792 |
+
parameters comprising the flat parameter is needed to maintain the
|
| 793 |
+
invariant that FSDP performs the same computation as its non-sharded
|
| 794 |
+
equivalent. This means that none of the unflattened parameters can be
|
| 795 |
+
missing this state since imposing a value may differ from having no value.
|
| 796 |
+
For example, for Adam's "step", no value means maximum bias correction,
|
| 797 |
+
while having some positive value means less bias correction.
|
| 798 |
+
|
| 799 |
+
Args:
|
| 800 |
+
state_name (str): Optimizer state name.
|
| 801 |
+
zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
|
| 802 |
+
for the unflattened parameters corresponding to the single
|
| 803 |
+
flat parameter.
|
| 804 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 805 |
+
parameter names corresponding to the single flat parameter.
|
| 806 |
+
|
| 807 |
+
Returns:
|
| 808 |
+
torch.Tensor: A zero-dimensional tensor giving the value of the state
|
| 809 |
+
``state_name`` for all unflattened parameters corresponding to the
|
| 810 |
+
names ``unflat_param_names``.
|
| 811 |
+
"""
|
| 812 |
+
non_none_tensors = [t for t in zero_dim_tensors if t is not None]
|
| 813 |
+
# Enforce that all have the same value and dtype
|
| 814 |
+
values_set = {t.item() if t is not None else None for t in zero_dim_tensors}
|
| 815 |
+
dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors}
|
| 816 |
+
if (
|
| 817 |
+
len(non_none_tensors) != len(zero_dim_tensors)
|
| 818 |
+
or len(values_set) != 1
|
| 819 |
+
or len(dtypes) != 1
|
| 820 |
+
):
|
| 821 |
+
raise ValueError(
|
| 822 |
+
"All unflattened parameters comprising a single flat "
|
| 823 |
+
"parameter must have scalar state with the same value and dtype "
|
| 824 |
+
f"but got values {values_set} and dtypes {dtypes} for state "
|
| 825 |
+
f"{state_name} and unflattened parameter names "
|
| 826 |
+
f"{unflat_param_names}"
|
| 827 |
+
)
|
| 828 |
+
value = next(iter(values_set))
|
| 829 |
+
dtype = next(iter(dtypes))
|
| 830 |
+
return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def _flatten_non_tensor_optim_state(
|
| 834 |
+
state_name: str,
|
| 835 |
+
non_tensors: List[Any],
|
| 836 |
+
unflat_param_names: List[str],
|
| 837 |
+
) -> Any:
|
| 838 |
+
"""
|
| 839 |
+
Flattens the non-tensor optimizer state given by the values ``non_tensors``
|
| 840 |
+
for the state ``state_name`` for a single flat parameter corresponding
|
| 841 |
+
to the unflattened parameter names ``unflat_param_names`` by enforcing that
|
| 842 |
+
all values are the same and using that common value.
|
| 843 |
+
|
| 844 |
+
See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
|
| 845 |
+
|
| 846 |
+
Args:
|
| 847 |
+
state_name (str): Optimizer state name.
|
| 848 |
+
non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
|
| 849 |
+
parameters corresponding to the single flat parameter.
|
| 850 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 851 |
+
parameter names corresponding to the single flat parameter.
|
| 852 |
+
|
| 853 |
+
Returns:
|
| 854 |
+
Any: A non-tensor giving the value of the state ``state_name`` for all
|
| 855 |
+
unflattened parameters corresponding to the names
|
| 856 |
+
``unflat_param_names``.
|
| 857 |
+
"""
|
| 858 |
+
non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
|
| 859 |
+
# Enforce that all have the same value (same type already checked)
|
| 860 |
+
non_tensor_set = set(non_tensors)
|
| 861 |
+
if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1:
|
| 862 |
+
raise ValueError(
|
| 863 |
+
"All unflattened parameters comprising a single flat "
|
| 864 |
+
"parameter must have scalar state with the same value and dtype "
|
| 865 |
+
f"but got values {non_tensor_set} for state {state_name} and "
|
| 866 |
+
f"unflattened parameter names {unflat_param_names}"
|
| 867 |
+
)
|
| 868 |
+
non_tensor = next(iter(non_tensor_set))
|
| 869 |
+
return non_tensor
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def _rekey_sharded_optim_state_dict(
|
| 873 |
+
sharded_osd: Dict[str, Any],
|
| 874 |
+
model: nn.Module,
|
| 875 |
+
optim: torch.optim.Optimizer,
|
| 876 |
+
optim_input: Optional[
|
| 877 |
+
Union[
|
| 878 |
+
List[Dict[str, Any]],
|
| 879 |
+
Iterable[nn.Parameter],
|
| 880 |
+
]
|
| 881 |
+
],
|
| 882 |
+
using_optim_input: bool,
|
| 883 |
+
is_named_optimizer: bool = False,
|
| 884 |
+
) -> Dict[str, Any]:
|
| 885 |
+
"""
|
| 886 |
+
Rekeys the optimizer state dict from unflattened parameter names to flat
|
| 887 |
+
parameter IDs according to the calling rank's ``optim``, which may be
|
| 888 |
+
different across ranks. In particular, the unflattened parameter names are
|
| 889 |
+
represented as :class:`_OptimStateKey` s.
|
| 890 |
+
"""
|
| 891 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 892 |
+
flat_param_to_fqn = _get_flat_param_to_fqn(model)
|
| 893 |
+
param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast(
|
| 894 |
+
Dict[nn.Parameter, Union[int, str]],
|
| 895 |
+
(
|
| 896 |
+
_get_param_to_param_id_from_optim_input(model, optim_input)
|
| 897 |
+
if using_optim_input
|
| 898 |
+
else _get_param_to_param_key(
|
| 899 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 900 |
+
)
|
| 901 |
+
),
|
| 902 |
+
)
|
| 903 |
+
# All parameter keys in `param_to_param_key` should be in
|
| 904 |
+
# `param_to_fqns` -- strict inequality follows when not all parameters are
|
| 905 |
+
# passed to the optimizer
|
| 906 |
+
assert len(param_to_param_key) <= len(param_to_fqns)
|
| 907 |
+
|
| 908 |
+
unflat_param_names_to_flat_param_key: Dict[
|
| 909 |
+
Tuple[str, ...], Union[int, str]
|
| 910 |
+
] = {} # for "state"
|
| 911 |
+
unflat_param_name_to_flat_param_key: Dict[
|
| 912 |
+
str, Union[int, str]
|
| 913 |
+
] = {} # for "param_groups"
|
| 914 |
+
for param, unflat_param_names in param_to_fqns.items():
|
| 915 |
+
if param not in param_to_param_key:
|
| 916 |
+
# This parameter was not passed to the optimizer
|
| 917 |
+
continue
|
| 918 |
+
flat_param_key = param_to_param_key[param]
|
| 919 |
+
unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key
|
| 920 |
+
for unflat_param_name in unflat_param_names:
|
| 921 |
+
unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key
|
| 922 |
+
|
| 923 |
+
sharded_osd_state = sharded_osd["state"]
|
| 924 |
+
rekeyed_osd_state: Dict[Union[str, int], Any] = {}
|
| 925 |
+
for key, param_state in sharded_osd_state.items():
|
| 926 |
+
if isinstance(key, str):
|
| 927 |
+
rekeyed_osd_state[key] = param_state
|
| 928 |
+
continue
|
| 929 |
+
flat_param_key = unflat_param_names_to_flat_param_key.get(
|
| 930 |
+
key.unflat_param_names, key.unflat_param_names
|
| 931 |
+
)
|
| 932 |
+
rekeyed_osd_state[flat_param_key] = param_state
|
| 933 |
+
|
| 934 |
+
# Only process param_groups if it exists in sharded_osd
|
| 935 |
+
if "param_groups" in sharded_osd:
|
| 936 |
+
rekeyed_osd_param_groups: List[Dict[str, Any]] = []
|
| 937 |
+
for unflat_param_group in sharded_osd["param_groups"]:
|
| 938 |
+
flat_param_group = copy.deepcopy(unflat_param_group)
|
| 939 |
+
flat_param_keys = sorted(
|
| 940 |
+
{
|
| 941 |
+
unflat_param_name_to_flat_param_key[unflat_param_name]
|
| 942 |
+
for unflat_param_name in unflat_param_group["params"]
|
| 943 |
+
}
|
| 944 |
+
)
|
| 945 |
+
flat_param_group["params"] = flat_param_keys
|
| 946 |
+
rekeyed_osd_param_groups.append(flat_param_group)
|
| 947 |
+
return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
|
| 948 |
+
else:
|
| 949 |
+
return {"state": rekeyed_osd_state}
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
def _get_param_id_to_param_from_optim_input(
|
| 953 |
+
model: nn.Module,
|
| 954 |
+
optim_input: Optional[
|
| 955 |
+
Union[
|
| 956 |
+
List[Dict[str, Any]],
|
| 957 |
+
Iterable[nn.Parameter],
|
| 958 |
+
]
|
| 959 |
+
] = None,
|
| 960 |
+
) -> Dict[int, nn.Parameter]:
|
| 961 |
+
"""
|
| 962 |
+
Constructs a mapping from parameter IDs to parameters. This may be used
|
| 963 |
+
both for models with ``FlatParameter`` s and without.
|
| 964 |
+
|
| 965 |
+
NOTE: This method is only preserved for backward compatibility. The method
|
| 966 |
+
:meth:`_get_param_key_to_param` is the preferred code path that does not
|
| 967 |
+
rely on ``optim_input``.
|
| 968 |
+
|
| 969 |
+
NOTE: We critically assume that, whether the optimizer input is a list of
|
| 970 |
+
parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
|
| 971 |
+
enumerates the parameter IDs in order. In other words, for a parameter list
|
| 972 |
+
input, the parameter IDs should be in that list order, and for a parameter
|
| 973 |
+
groups input, the parameter IDs should be in order within each parameter
|
| 974 |
+
group and in order across parameter groups.
|
| 975 |
+
|
| 976 |
+
Args:
|
| 977 |
+
model (nn.Module): Model whose parameters are passed into the
|
| 978 |
+
optimizer.
|
| 979 |
+
optim_input (Optional[Union[List[Dict[str, Any]],
|
| 980 |
+
Iterable[nn.Parameter]]]): Input passed into the optimizer
|
| 981 |
+
representing either a :class:`list` of parameter groups or an
|
| 982 |
+
iterable of parameters; if ``None``, then this method assumes the
|
| 983 |
+
input was ``model.parameters()``. (Default: ``None``)
|
| 984 |
+
|
| 985 |
+
Returns:
|
| 986 |
+
List[nn.Parameter]: Mapping from parameter IDs to parameters,
|
| 987 |
+
where the parameter ID is implicitly the index in the :class:`list`.
|
| 988 |
+
"""
|
| 989 |
+
# Assume the standard case of passing `model.parameters()` to the optimizer
|
| 990 |
+
# if `optim_input` is not specified
|
| 991 |
+
if optim_input is None:
|
| 992 |
+
return dict(enumerate(model.parameters()))
|
| 993 |
+
try:
|
| 994 |
+
params = cast(List[nn.Parameter], list(optim_input))
|
| 995 |
+
except TypeError as e:
|
| 996 |
+
raise TypeError(
|
| 997 |
+
"Optimizer input should be an iterable of Tensors or dicts, "
|
| 998 |
+
f"but got {optim_input}"
|
| 999 |
+
) from e
|
| 1000 |
+
if len(params) == 0:
|
| 1001 |
+
raise ValueError("Optimizer input should not be empty")
|
| 1002 |
+
|
| 1003 |
+
# Check if the optimizer input represents tensors or parameter groups
|
| 1004 |
+
all_tensors = True
|
| 1005 |
+
all_dicts = True
|
| 1006 |
+
for param in params:
|
| 1007 |
+
all_tensors &= isinstance(param, torch.Tensor)
|
| 1008 |
+
all_dicts &= isinstance(param, dict)
|
| 1009 |
+
if not all_tensors and not all_dicts:
|
| 1010 |
+
raise TypeError("Optimizer input should be an iterable of Tensors or dicts")
|
| 1011 |
+
if all_tensors:
|
| 1012 |
+
return dict(enumerate(params))
|
| 1013 |
+
assert all_dicts
|
| 1014 |
+
param_id_to_param: List[nn.Parameter] = []
|
| 1015 |
+
for param_group in params:
|
| 1016 |
+
has_params_key = "params" in param_group # type: ignore[operator]
|
| 1017 |
+
assert has_params_key, (
|
| 1018 |
+
'A parameter group should map "params" to a list of the '
|
| 1019 |
+
"parameters in the group"
|
| 1020 |
+
)
|
| 1021 |
+
# Implicitly map `flat_param_id` (current length of the list) to
|
| 1022 |
+
# `param`
|
| 1023 |
+
param_id_to_param.extend(param_group["params"]) # type: ignore[index]
|
| 1024 |
+
return dict(enumerate(param_id_to_param))
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]:
|
| 1028 |
+
"""
|
| 1029 |
+
Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes
|
| 1030 |
+
from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical"
|
| 1031 |
+
because ``FlatParameter`` s do not come from the original module but are
|
| 1032 |
+
registered only after FSDP has been applied. This function returns the FSDP-given
|
| 1033 |
+
name for the ``FlatParameter`` (usually module._flat_param) as opposed to the
|
| 1034 |
+
canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``).
|
| 1035 |
+
|
| 1036 |
+
Consequently, this function will only return a non-empty mapping if FSDP was
|
| 1037 |
+
applied with ``use_orig_params=False`` as, otherwise, the original parameters
|
| 1038 |
+
are used within the module and there would be no ``FlatParameter`` s in the module.
|
| 1039 |
+
|
| 1040 |
+
"""
|
| 1041 |
+
|
| 1042 |
+
def module_fn(module, prefix, tree_level, flat_param_to_fqn):
|
| 1043 |
+
for param_name, param in _named_parameters_with_duplicates(
|
| 1044 |
+
module, recurse=False
|
| 1045 |
+
):
|
| 1046 |
+
if not isinstance(param, FlatParameter):
|
| 1047 |
+
continue
|
| 1048 |
+
fqn = clean_tensor_name(prefix + param_name)
|
| 1049 |
+
flat_param_to_fqn[param] = fqn
|
| 1050 |
+
|
| 1051 |
+
def return_fn(flat_param_to_fqn):
|
| 1052 |
+
return flat_param_to_fqn
|
| 1053 |
+
|
| 1054 |
+
flat_param_to_fqn_ret: Dict[FlatParameter, str] = {}
|
| 1055 |
+
return _apply_to_modules(
|
| 1056 |
+
model,
|
| 1057 |
+
module_fn,
|
| 1058 |
+
return_fn,
|
| 1059 |
+
[fqn for fqn, _ in _named_parameters_with_duplicates(model)],
|
| 1060 |
+
flat_param_to_fqn_ret,
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
def _get_param_key_to_param(
|
| 1065 |
+
optim: torch.optim.Optimizer,
|
| 1066 |
+
model: Optional[nn.Module] = None,
|
| 1067 |
+
is_named_optimizer: bool = False,
|
| 1068 |
+
param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
|
| 1069 |
+
flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
|
| 1070 |
+
) -> Dict[Union[int, str], nn.Parameter]:
|
| 1071 |
+
"""
|
| 1072 |
+
Constructs a mapping from parameter keys to parameters. For the regular
|
| 1073 |
+
optimizers, the keys are parameter IDs. For NamedOptimizer, the keys
|
| 1074 |
+
are FQNs. This API may be used both for models with ``FlatParameter`` s and
|
| 1075 |
+
without.
|
| 1076 |
+
"""
|
| 1077 |
+
clean_fqn_to_curr_fqn: Dict[str, str] = {}
|
| 1078 |
+
if is_named_optimizer:
|
| 1079 |
+
assert (
|
| 1080 |
+
param_to_fqns is not None and flat_param_to_fqn is not None
|
| 1081 |
+
), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None."
|
| 1082 |
+
assert model is not None
|
| 1083 |
+
for key, _ in _named_parameters_with_duplicates(model):
|
| 1084 |
+
clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key
|
| 1085 |
+
|
| 1086 |
+
param_key_to_param: Dict[Union[str, int], nn.Parameter] = {}
|
| 1087 |
+
pid = 0
|
| 1088 |
+
for param_group in optim.param_groups:
|
| 1089 |
+
if is_named_optimizer:
|
| 1090 |
+
for param in param_group["params"]:
|
| 1091 |
+
assert flat_param_to_fqn is not None
|
| 1092 |
+
if param in flat_param_to_fqn:
|
| 1093 |
+
# FlatParameter case
|
| 1094 |
+
key = flat_param_to_fqn[param]
|
| 1095 |
+
else:
|
| 1096 |
+
assert param_to_fqns is not None
|
| 1097 |
+
# use_orig_params case
|
| 1098 |
+
assert len(param_to_fqns[param]) == 1
|
| 1099 |
+
key = param_to_fqns[param][0]
|
| 1100 |
+
try:
|
| 1101 |
+
key = clean_fqn_to_curr_fqn[key]
|
| 1102 |
+
except KeyError as e:
|
| 1103 |
+
raise KeyError(
|
| 1104 |
+
f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}."
|
| 1105 |
+
) from e
|
| 1106 |
+
param_key_to_param[key] = param
|
| 1107 |
+
else:
|
| 1108 |
+
for param in param_group["params"]:
|
| 1109 |
+
param_key_to_param[pid] = param
|
| 1110 |
+
pid += 1
|
| 1111 |
+
|
| 1112 |
+
return param_key_to_param
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
def _get_param_to_param_key(
|
| 1116 |
+
optim: torch.optim.Optimizer,
|
| 1117 |
+
model: Optional[nn.Module] = None,
|
| 1118 |
+
is_named_optimizer: bool = False,
|
| 1119 |
+
param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
|
| 1120 |
+
flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
|
| 1121 |
+
) -> Dict[nn.Parameter, Union[int, str]]:
|
| 1122 |
+
"""
|
| 1123 |
+
Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API
|
| 1124 |
+
only supports the case where `optim` is a regular optimizer, not NamedOptimizer.
|
| 1125 |
+
So the parameter keys will be parameter ids.
|
| 1126 |
+
"""
|
| 1127 |
+
param_id_to_param = _get_param_key_to_param(
|
| 1128 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 1129 |
+
)
|
| 1130 |
+
return {param: param_id for param_id, param in param_id_to_param.items()}
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
def _get_param_to_param_id_from_optim_input(
|
| 1134 |
+
model: nn.Module,
|
| 1135 |
+
optim_input: Optional[
|
| 1136 |
+
Union[
|
| 1137 |
+
List[Dict[str, Any]],
|
| 1138 |
+
Iterable[nn.Parameter],
|
| 1139 |
+
]
|
| 1140 |
+
] = None,
|
| 1141 |
+
) -> Dict[nn.Parameter, int]:
|
| 1142 |
+
"""Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`."""
|
| 1143 |
+
param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input)
|
| 1144 |
+
return {param: param_id for param_id, param in param_id_to_param.items()}
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
def _check_missing_keys_on_rank(
|
| 1148 |
+
r0_optim_state_keys: List[_OptimStateKey],
|
| 1149 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]],
|
| 1150 |
+
param_key_to_param: Dict[Union[str, int], nn.Parameter],
|
| 1151 |
+
group: Optional[dist.ProcessGroup],
|
| 1152 |
+
) -> None:
|
| 1153 |
+
# Ensure that all ranks have at least the optimizer states needed by
|
| 1154 |
+
# rank 0's optimizer
|
| 1155 |
+
missing_keys: List[_OptimStateKey] = []
|
| 1156 |
+
for r0_optim_state_key in r0_optim_state_keys:
|
| 1157 |
+
if r0_optim_state_key not in optim_state_key_to_param_key:
|
| 1158 |
+
# A parameter from rank 0's optimizer does not exist for this
|
| 1159 |
+
# rank's optimizer
|
| 1160 |
+
missing_keys.append(r0_optim_state_key)
|
| 1161 |
+
continue
|
| 1162 |
+
param_key = optim_state_key_to_param_key[r0_optim_state_key]
|
| 1163 |
+
if isinstance(param_key, int):
|
| 1164 |
+
assert param_key >= 0 and param_key < len(
|
| 1165 |
+
param_key_to_param
|
| 1166 |
+
), "Check the `param_key_to_param` construction"
|
| 1167 |
+
# We cannot use FSDPState.compute_device as this API is a global view.
|
| 1168 |
+
device = _get_pg_default_device(group)
|
| 1169 |
+
num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device)
|
| 1170 |
+
dist.all_reduce(num_missing, group=group)
|
| 1171 |
+
if num_missing.item() > 0:
|
| 1172 |
+
obj_list = [None for _ in range(dist.get_world_size(group))]
|
| 1173 |
+
dist.all_gather_object(obj_list, missing_keys, group=group)
|
| 1174 |
+
error_msg = (
|
| 1175 |
+
"FSDP currently requires each rank to have at least the "
|
| 1176 |
+
"optimizer states needed by rank 0's optimizer but some ranks "
|
| 1177 |
+
"are missing some of those states"
|
| 1178 |
+
)
|
| 1179 |
+
for rank, keys in enumerate(obj_list):
|
| 1180 |
+
keys = cast(List[_OptimStateKey], keys)
|
| 1181 |
+
if len(keys) > 0:
|
| 1182 |
+
error_msg += (
|
| 1183 |
+
f"\nRank {rank} is missing states for the parameters: "
|
| 1184 |
+
f"{[key.unflat_param_names for key in keys]}"
|
| 1185 |
+
)
|
| 1186 |
+
raise RuntimeError(error_msg)
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
def _map_param_key_to_optim_keys(
|
| 1190 |
+
optim_state_dict: Dict[str, Any],
|
| 1191 |
+
group: Optional[dist.ProcessGroup],
|
| 1192 |
+
param_key_to_param: Dict[Union[int, str], nn.Parameter],
|
| 1193 |
+
param_to_fqns: Dict[nn.Parameter, List[str]],
|
| 1194 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1195 |
+
merge_keys: bool = False,
|
| 1196 |
+
) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]:
|
| 1197 |
+
"""
|
| 1198 |
+
Construct the local mapping between the ``_OptimStateKey`` and parameter keys
|
| 1199 |
+
and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0
|
| 1200 |
+
must contain all the ``_OptimStateKey``, an exception will be raised otherwise.
|
| 1201 |
+
Note that ``merge_keys`` should equal to ``use_orig_params``.
|
| 1202 |
+
"""
|
| 1203 |
+
rank = dist.get_rank(group)
|
| 1204 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local
|
| 1205 |
+
all_optim_state_keys: List[_OptimStateKey] = []
|
| 1206 |
+
|
| 1207 |
+
for param_key, param in param_key_to_param.items():
|
| 1208 |
+
# Do not include parameters without state to avoid empty mappings
|
| 1209 |
+
# just like in normal `torch.optim.Optimizer.state_dict()`
|
| 1210 |
+
if param_key not in optim_state_dict["state"]:
|
| 1211 |
+
continue
|
| 1212 |
+
fqns = param_to_fqns[param]
|
| 1213 |
+
is_fsdp_managed = isinstance(param, FlatParameter)
|
| 1214 |
+
if is_fsdp_managed:
|
| 1215 |
+
assert fqns[0] in fqn_to_fsdp_param_info, (
|
| 1216 |
+
fqns[0],
|
| 1217 |
+
list(fqn_to_fsdp_param_info.keys()),
|
| 1218 |
+
)
|
| 1219 |
+
is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info
|
| 1220 |
+
optim_state_key = _OptimStateKey(
|
| 1221 |
+
unflat_param_names=tuple(fqns),
|
| 1222 |
+
is_fsdp_managed=is_fsdp_managed,
|
| 1223 |
+
)
|
| 1224 |
+
if rank == 0 or merge_keys:
|
| 1225 |
+
all_optim_state_keys.append(optim_state_key)
|
| 1226 |
+
optim_state_key_to_param_key[optim_state_key] = param_key
|
| 1227 |
+
|
| 1228 |
+
if merge_keys:
|
| 1229 |
+
all_keys: List[List[_OptimStateKey]] = [
|
| 1230 |
+
[] for _ in range(dist.get_world_size(group))
|
| 1231 |
+
]
|
| 1232 |
+
dist.all_gather_object(all_keys, all_optim_state_keys, group=group)
|
| 1233 |
+
merge_all_optim_state_keys = [
|
| 1234 |
+
key for local_keys in all_keys for key in local_keys
|
| 1235 |
+
]
|
| 1236 |
+
all_optim_state_keys = sorted(set(merge_all_optim_state_keys))
|
| 1237 |
+
else:
|
| 1238 |
+
key_obj_list: List[Optional[List[_OptimStateKey]]] = (
|
| 1239 |
+
[all_optim_state_keys] if rank == 0 else [None]
|
| 1240 |
+
)
|
| 1241 |
+
dist.broadcast_object_list(key_obj_list, src=0, group=group)
|
| 1242 |
+
assert key_obj_list[0] is not None
|
| 1243 |
+
all_optim_state_keys = key_obj_list[0]
|
| 1244 |
+
_check_missing_keys_on_rank(
|
| 1245 |
+
all_optim_state_keys,
|
| 1246 |
+
optim_state_key_to_param_key,
|
| 1247 |
+
param_key_to_param,
|
| 1248 |
+
group,
|
| 1249 |
+
)
|
| 1250 |
+
|
| 1251 |
+
return all_optim_state_keys, optim_state_key_to_param_key
|
| 1252 |
+
|
| 1253 |
+
|
| 1254 |
+
def _unflatten_param_groups(
|
| 1255 |
+
state_dict: Dict[str, Any],
|
| 1256 |
+
param_key_to_param: Dict[Union[int, str], nn.Parameter],
|
| 1257 |
+
param_to_fqns: Dict[nn.Parameter, List[str]],
|
| 1258 |
+
) -> List[Dict[str, Any]]:
|
| 1259 |
+
param_groups: List[Dict[str, Any]] = []
|
| 1260 |
+
for flat_param_group in state_dict["param_groups"]:
|
| 1261 |
+
unflat_param_group = copy.deepcopy(flat_param_group)
|
| 1262 |
+
param_group_params = [
|
| 1263 |
+
param_key_to_param[flat_param_key]
|
| 1264 |
+
for flat_param_key in flat_param_group["params"]
|
| 1265 |
+
]
|
| 1266 |
+
nested_unflat_param_names = [
|
| 1267 |
+
param_to_fqns[param] for param in param_group_params
|
| 1268 |
+
]
|
| 1269 |
+
unflat_param_group["params"] = [
|
| 1270 |
+
unflat_param_name
|
| 1271 |
+
for unflat_param_names in nested_unflat_param_names
|
| 1272 |
+
for unflat_param_name in unflat_param_names
|
| 1273 |
+
] # flatten the list of lists
|
| 1274 |
+
param_groups.append(unflat_param_group)
|
| 1275 |
+
return param_groups
|
| 1276 |
+
|
| 1277 |
+
|
| 1278 |
+
def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool:
|
| 1279 |
+
"""
|
| 1280 |
+
Returns whether the state_dict is from a NamedOptimizer.
|
| 1281 |
+
This function checks that the keys in the state_dict['state'] are strings
|
| 1282 |
+
(which usually are FQNs) versus integers (which usually refer to param_ids
|
| 1283 |
+
from a vanilla torch.optim.Optimizer).
|
| 1284 |
+
"""
|
| 1285 |
+
state = optim_state_dict.get("state", None)
|
| 1286 |
+
if not state:
|
| 1287 |
+
# If we cannot find a state, assume it is not NamedOptimizer as
|
| 1288 |
+
# NamedOptimizer has eager initialization.
|
| 1289 |
+
return False
|
| 1290 |
+
try:
|
| 1291 |
+
key = next(iter(state.keys()))
|
| 1292 |
+
except Exception as e:
|
| 1293 |
+
raise Exception(optim_state_dict) from e
|
| 1294 |
+
return isinstance(key, str)
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
@dataclass
|
| 1298 |
+
class StateInfo:
|
| 1299 |
+
# The key of these dictionaries are the state name, e.g., `exp_avg`.
|
| 1300 |
+
tensors: Dict[str, _PosDimTensorInfo]
|
| 1301 |
+
scalar_tensors: Dict[str, torch.Tensor]
|
| 1302 |
+
non_tensors: Dict[str, Any]
|
| 1303 |
+
|
| 1304 |
+
|
| 1305 |
+
def _allgather_state_info(
|
| 1306 |
+
fsdp_state: _FSDPState,
|
| 1307 |
+
input_states: Dict[str, Any],
|
| 1308 |
+
) -> List[Dict[str, StateInfo]]:
|
| 1309 |
+
"""
|
| 1310 |
+
Given the ``input_states``, allgather StateInfo for each state. The function
|
| 1311 |
+
uses all_gather_object to gather StateInfo so no GPU tensors are sent.
|
| 1312 |
+
"""
|
| 1313 |
+
|
| 1314 |
+
processed_state_dict: Dict[str, StateInfo] = {}
|
| 1315 |
+
gathered_state_info: List[Dict[str, StateInfo]] = [
|
| 1316 |
+
{} for _ in range(fsdp_state.world_size)
|
| 1317 |
+
]
|
| 1318 |
+
|
| 1319 |
+
for fqn, optim_state in input_states.items():
|
| 1320 |
+
# Allgather the scalar tensor state, non-tensor states and tensors metadata.
|
| 1321 |
+
processed_state = StateInfo({}, {}, {})
|
| 1322 |
+
for state_name, value in sorted_items(optim_state):
|
| 1323 |
+
if torch.is_tensor(value):
|
| 1324 |
+
if value.dim() == 0:
|
| 1325 |
+
# Ensure that `step` is on CPU.
|
| 1326 |
+
processed_state.scalar_tensors[state_name] = value.cpu()
|
| 1327 |
+
else:
|
| 1328 |
+
processed_state.tensors[state_name] = _PosDimTensorInfo(
|
| 1329 |
+
value.shape, value.dtype
|
| 1330 |
+
)
|
| 1331 |
+
else:
|
| 1332 |
+
processed_state.non_tensors[state_name] = value
|
| 1333 |
+
processed_state_dict[fqn] = processed_state
|
| 1334 |
+
dist.all_gather_object(
|
| 1335 |
+
gathered_state_info,
|
| 1336 |
+
processed_state_dict,
|
| 1337 |
+
group=fsdp_state.process_group,
|
| 1338 |
+
)
|
| 1339 |
+
return gathered_state_info
|
| 1340 |
+
|
| 1341 |
+
|
| 1342 |
+
def _convert_all_state_info(
|
| 1343 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1344 |
+
gathered_state_info: List[Dict[str, StateInfo]],
|
| 1345 |
+
input_states: Dict[str, Any],
|
| 1346 |
+
output_states: Dict[str, Dict[str, Any]],
|
| 1347 |
+
) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]:
|
| 1348 |
+
"""
|
| 1349 |
+
Given the ``gathered_state_info`` and ``input_states``, the API converted
|
| 1350 |
+
the StateInfo into the original state if the state is not a non-scalar
|
| 1351 |
+
tensor. For a multi-dimensional tensor, the local state will be stored in
|
| 1352 |
+
``state_buffer`` in a correct order for later allgather purpose.
|
| 1353 |
+
"""
|
| 1354 |
+
|
| 1355 |
+
state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {}
|
| 1356 |
+
|
| 1357 |
+
for fqn, gathered_state in output_states.items():
|
| 1358 |
+
state_info = [s[fqn] for s in gathered_state_info]
|
| 1359 |
+
all_tensor_states = sorted(
|
| 1360 |
+
{n for state in state_info for n in state.tensors.keys()}
|
| 1361 |
+
)
|
| 1362 |
+
empty_ranks: Set[int] = set()
|
| 1363 |
+
dtype: Optional[torch.dtype] = None
|
| 1364 |
+
# First check all the non-scalar states and get the information of
|
| 1365 |
+
# states on each rank.
|
| 1366 |
+
for state_name in all_tensor_states:
|
| 1367 |
+
numels = []
|
| 1368 |
+
_empty_ranks: Set[int] = set()
|
| 1369 |
+
for rank, object_state in enumerate(state_info):
|
| 1370 |
+
numels.append(0)
|
| 1371 |
+
info = object_state.tensors.get(state_name, None)
|
| 1372 |
+
if info is not None:
|
| 1373 |
+
numels[-1] = info.shape.numel()
|
| 1374 |
+
if not dtype:
|
| 1375 |
+
dtype = info.dtype
|
| 1376 |
+
else:
|
| 1377 |
+
assert dtype == info.dtype
|
| 1378 |
+
if numels[-1] == 0:
|
| 1379 |
+
_empty_ranks.add(rank)
|
| 1380 |
+
|
| 1381 |
+
assert not empty_ranks or empty_ranks == _empty_ranks
|
| 1382 |
+
empty_ranks = _empty_ranks
|
| 1383 |
+
if state_name not in state_buffers:
|
| 1384 |
+
state_buffers[state_name] = [
|
| 1385 |
+
None for _ in fsdp_param_info.param_indices
|
| 1386 |
+
]
|
| 1387 |
+
local_state = input_states[fqn].get(state_name, None)
|
| 1388 |
+
# N.B. We need to move the state to compute_device. The reason is
|
| 1389 |
+
# not yet clear and we need to figure out why the state may be on a
|
| 1390 |
+
# different device.
|
| 1391 |
+
if local_state is not None:
|
| 1392 |
+
local_state = local_state.to(fsdp_param_info.state.compute_device)
|
| 1393 |
+
state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state
|
| 1394 |
+
|
| 1395 |
+
# Restoring the scalar and non-tensor states. If the corresponding
|
| 1396 |
+
# non-scalar states do not exist on the rank, we also skip the scalar
|
| 1397 |
+
# non-tensor states on that rank.
|
| 1398 |
+
for rank, object_state in enumerate(state_info):
|
| 1399 |
+
if rank in empty_ranks:
|
| 1400 |
+
continue
|
| 1401 |
+
for name, non_tensor_value in object_state.non_tensors.items():
|
| 1402 |
+
curr_non_tensor_value = gathered_state.get(name, None)
|
| 1403 |
+
assert (
|
| 1404 |
+
curr_non_tensor_value is None
|
| 1405 |
+
or curr_non_tensor_value == non_tensor_value
|
| 1406 |
+
), (
|
| 1407 |
+
f"Rank {rank} has different values for {name}: {non_tensor_value}."
|
| 1408 |
+
+ f" Other ranks: {curr_non_tensor_value}"
|
| 1409 |
+
)
|
| 1410 |
+
gathered_state[name] = non_tensor_value
|
| 1411 |
+
|
| 1412 |
+
for name, scalar_tensor_value in object_state.scalar_tensors.items():
|
| 1413 |
+
curr_scalar_tensor_value = gathered_state.get(name, None)
|
| 1414 |
+
assert curr_scalar_tensor_value is None or torch.equal(
|
| 1415 |
+
scalar_tensor_value, curr_scalar_tensor_value
|
| 1416 |
+
), (
|
| 1417 |
+
f"Rank {rank} has different values for {name}: {scalar_tensor_value}."
|
| 1418 |
+
+ f" Other ranks: {curr_scalar_tensor_value}"
|
| 1419 |
+
)
|
| 1420 |
+
gathered_state[name] = scalar_tensor_value
|
| 1421 |
+
|
| 1422 |
+
return dtype, state_buffers # type: ignore[possibly-undefined]
|
| 1423 |
+
|
| 1424 |
+
|
| 1425 |
+
def _unflatten_orig_param_states(
|
| 1426 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1427 |
+
output_states: Dict[str, Dict[str, Any]],
|
| 1428 |
+
state_name: str,
|
| 1429 |
+
shard_state: bool,
|
| 1430 |
+
to_save: bool,
|
| 1431 |
+
cpu_offload: bool,
|
| 1432 |
+
) -> None:
|
| 1433 |
+
"""
|
| 1434 |
+
Given a output state dict, ``output_states``, which the keys are FQNs to the
|
| 1435 |
+
original parameters (not FlatParameters nor parmeter ID), and the values
|
| 1436 |
+
are gathered states, unflatten the states to the original dimensions.
|
| 1437 |
+
|
| 1438 |
+
This function performs the unflattening process in-place.
|
| 1439 |
+
"""
|
| 1440 |
+
if not to_save:
|
| 1441 |
+
return
|
| 1442 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 1443 |
+
fsdp_state = fsdp_param_info.state
|
| 1444 |
+
for fqn, gathered_state in output_states.items():
|
| 1445 |
+
value = gathered_state[state_name]
|
| 1446 |
+
param_idx = fsdp_param_info.param_indices[fqn]
|
| 1447 |
+
|
| 1448 |
+
# TODO: This solution is not general and only apply to PTD TP solution.
|
| 1449 |
+
if isinstance(value, DTensor):
|
| 1450 |
+
placement = value.placements[0]
|
| 1451 |
+
# If gathered state is a DTensor and its TP placement is not Replicate(), we need to
|
| 1452 |
+
# gather the tensor on its TP dimension before chunking them into DTensor again.
|
| 1453 |
+
if placement != Replicate():
|
| 1454 |
+
placement_dim = placement.dim # type: ignore[attr-defined]
|
| 1455 |
+
value_local = value.redistribute(placements=(Replicate(),))
|
| 1456 |
+
reshape_size = list(flat_param._shapes[param_idx])
|
| 1457 |
+
reshape_size[placement_dim] *= value.device_mesh.size(0)
|
| 1458 |
+
reshape_size = torch.Size(reshape_size)
|
| 1459 |
+
value = value.reshape(reshape_size)
|
| 1460 |
+
# If gathered state is a replicate DTensor, we directly reshape it.
|
| 1461 |
+
else:
|
| 1462 |
+
value = value.reshape(flat_param._shapes[param_idx])
|
| 1463 |
+
else:
|
| 1464 |
+
# If gathered state is a tensor, we directly reshape it into unflatten state.
|
| 1465 |
+
value = value.reshape(flat_param._shapes[param_idx])
|
| 1466 |
+
|
| 1467 |
+
if shard_state:
|
| 1468 |
+
osd_config = fsdp_state._optim_state_dict_config
|
| 1469 |
+
if getattr(osd_config, "_use_dtensor", False):
|
| 1470 |
+
assert fsdp_state._device_mesh is not None
|
| 1471 |
+
value = _ext_chunk_dtensor(
|
| 1472 |
+
value,
|
| 1473 |
+
fsdp_state.rank,
|
| 1474 |
+
fsdp_state._device_mesh,
|
| 1475 |
+
fsdp_state._fsdp_extension,
|
| 1476 |
+
)
|
| 1477 |
+
else:
|
| 1478 |
+
assert fsdp_state.process_group is not None
|
| 1479 |
+
value = _ext_chunk_tensor(
|
| 1480 |
+
value,
|
| 1481 |
+
fsdp_state.rank,
|
| 1482 |
+
fsdp_state.world_size,
|
| 1483 |
+
fsdp_state._device_handle.device_count(),
|
| 1484 |
+
fsdp_state.process_group,
|
| 1485 |
+
fsdp_state._fsdp_extension,
|
| 1486 |
+
)
|
| 1487 |
+
elif not cpu_offload:
|
| 1488 |
+
with SimpleProfiler.profile("clone"):
|
| 1489 |
+
value = value.detach().clone()
|
| 1490 |
+
|
| 1491 |
+
if cpu_offload:
|
| 1492 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.D2H):
|
| 1493 |
+
value = value.cpu()
|
| 1494 |
+
gathered_state[state_name] = value
|
| 1495 |
+
|
| 1496 |
+
|
| 1497 |
+
def _allgather_orig_param_states(
|
| 1498 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1499 |
+
gathered_state_info: List[Dict[str, StateInfo]],
|
| 1500 |
+
input_states: Dict[str, Any],
|
| 1501 |
+
shard_state: bool,
|
| 1502 |
+
to_save: bool,
|
| 1503 |
+
cpu_offload: bool,
|
| 1504 |
+
) -> Dict[str, Dict[str, Any]]:
|
| 1505 |
+
"""
|
| 1506 |
+
Given the ``gathered_state_info`` and ``input_states``, the API allgathers
|
| 1507 |
+
all tensor states and restore non-tensor states from ``gathered_state_info``.
|
| 1508 |
+
"""
|
| 1509 |
+
fsdp_state = fsdp_param_info.state
|
| 1510 |
+
if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL:
|
| 1511 |
+
logger.warning(
|
| 1512 |
+
"CUDA Memory Summary before calling to _allgather_orig_param_states %s",
|
| 1513 |
+
torch.cuda.memory_summary(),
|
| 1514 |
+
)
|
| 1515 |
+
|
| 1516 |
+
output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()}
|
| 1517 |
+
|
| 1518 |
+
dtype, state_buffers = _convert_all_state_info(
|
| 1519 |
+
fsdp_param_info, gathered_state_info, input_states, output_states
|
| 1520 |
+
)
|
| 1521 |
+
|
| 1522 |
+
if len(state_buffers) == 0:
|
| 1523 |
+
return output_states
|
| 1524 |
+
|
| 1525 |
+
has_state_params: List[bool] = [
|
| 1526 |
+
True if fqn in output_states else False
|
| 1527 |
+
for fqn, idx in fsdp_param_info.param_indices.items()
|
| 1528 |
+
]
|
| 1529 |
+
|
| 1530 |
+
# Loop through the ``state_buffers`` and construct the flattened, concatenated,
|
| 1531 |
+
# sharded states. The size of the constructed state will be the same size as
|
| 1532 |
+
# flat_param (also sharded).
|
| 1533 |
+
# Then we perform an allgather_into_tensor to get the full flat_param state.
|
| 1534 |
+
# The full flat_param state is the result of concatenation of multiple states
|
| 1535 |
+
# the order of of flat_param._fqns.
|
| 1536 |
+
# The final step is to split the flat_param state into original param states
|
| 1537 |
+
# and return the result.
|
| 1538 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 1539 |
+
empty_func = functools.partial(
|
| 1540 |
+
torch.empty, dtype=dtype, device=fsdp_state.compute_device
|
| 1541 |
+
)
|
| 1542 |
+
gathered_tensor = empty_func(flat_param._padded_unsharded_size)
|
| 1543 |
+
# Synchronize can be slow but this will be easier for us to debug.
|
| 1544 |
+
torch.cuda.synchronize()
|
| 1545 |
+
for state_name, buffers in state_buffers.items():
|
| 1546 |
+
local_buffers: List[torch.Tensor] = []
|
| 1547 |
+
begin = fsdp_state.rank * flat_param._sharded_size.numel()
|
| 1548 |
+
# End is inclusive.
|
| 1549 |
+
end = begin + flat_param._sharded_size.numel() - 1
|
| 1550 |
+
# param_idx corresponds to the parameter index in the FlatParameter.
|
| 1551 |
+
mem_offset, param_idx = 0, 0
|
| 1552 |
+
for numel, is_padding in zip(
|
| 1553 |
+
flat_param._numels_with_padding, flat_param._is_padding_mask
|
| 1554 |
+
):
|
| 1555 |
+
frozen_and_no_state = not is_padding and (
|
| 1556 |
+
not fsdp_param_info.param_requires_grad[param_idx]
|
| 1557 |
+
and not has_state_params[param_idx]
|
| 1558 |
+
)
|
| 1559 |
+
|
| 1560 |
+
if is_padding or frozen_and_no_state:
|
| 1561 |
+
# This memory range is a padding or the param is frozen and does
|
| 1562 |
+
# not require gradient. For the later case, we treat it as a
|
| 1563 |
+
# padding and add empty values to the local_buffers.
|
| 1564 |
+
|
| 1565 |
+
padding_begin, padding_end = mem_offset, mem_offset + numel - 1
|
| 1566 |
+
if padding_begin <= begin <= padding_end:
|
| 1567 |
+
# The range is an align padding before the first parameter in
|
| 1568 |
+
# the shard. The shard includes parts of this align padding.
|
| 1569 |
+
padding_len = (
|
| 1570 |
+
padding_end - begin + 1
|
| 1571 |
+
if end >= padding_end
|
| 1572 |
+
else end - begin + 1
|
| 1573 |
+
)
|
| 1574 |
+
elif padding_begin <= end <= padding_end:
|
| 1575 |
+
# The range is an align padding after the last parameter in
|
| 1576 |
+
# the shard. The shard includes parts of this align padding.
|
| 1577 |
+
padding_len = (
|
| 1578 |
+
end - padding_begin + 1
|
| 1579 |
+
if begin <= padding_begin
|
| 1580 |
+
else end - begin + 1
|
| 1581 |
+
)
|
| 1582 |
+
elif begin < padding_begin <= padding_end < end:
|
| 1583 |
+
# The range is an align padding that is completely in the
|
| 1584 |
+
# shard.
|
| 1585 |
+
padding_len = numel
|
| 1586 |
+
else:
|
| 1587 |
+
padding_len = 0
|
| 1588 |
+
if padding_len:
|
| 1589 |
+
local_buffers.append(empty_func(padding_len))
|
| 1590 |
+
|
| 1591 |
+
if not is_padding:
|
| 1592 |
+
# This memory range is a parameter in FlatParameter. So there
|
| 1593 |
+
# should be an corresponding state in the optimizer unless the
|
| 1594 |
+
# parameter is frozen, which we treat it as a padding above.
|
| 1595 |
+
|
| 1596 |
+
# We need to check if this rank owns the buffer. If this is None:
|
| 1597 |
+
# 1.) the rank does not own any part of the original parameter.
|
| 1598 |
+
# As a result, there is no corresponding optimizer state on
|
| 1599 |
+
# the rank as well.
|
| 1600 |
+
# 2.) the parameter is frozen AND no optimizer state for the
|
| 1601 |
+
# parameter. If a parameter is frozen, there can still be
|
| 1602 |
+
# optimizer state if the parameter is not frozen in the
|
| 1603 |
+
# previous steps.
|
| 1604 |
+
if buffers[param_idx] is not None:
|
| 1605 |
+
local_buffers.append(cast(torch.Tensor, buffers[param_idx]))
|
| 1606 |
+
param_idx += 1
|
| 1607 |
+
|
| 1608 |
+
mem_offset += numel
|
| 1609 |
+
|
| 1610 |
+
shard_numel_padded = flat_param._sharded_size.numel() - (
|
| 1611 |
+
sum(t.numel() for t in local_buffers)
|
| 1612 |
+
)
|
| 1613 |
+
|
| 1614 |
+
assert flat_param._shard_numel_padded == shard_numel_padded, (
|
| 1615 |
+
"Manually calculated _sharded_numel_padded is incorrect. "
|
| 1616 |
+
f"_shard_numel_padded={flat_param._shard_numel_padded}, "
|
| 1617 |
+
f"shard_numel_padded={shard_numel_padded}, "
|
| 1618 |
+
f"_sharded_size.numel={flat_param._sharded_size.numel()}, "
|
| 1619 |
+
f"_numels_with_padding={flat_param._numels_with_padding}, "
|
| 1620 |
+
f"begin={begin}, end={end},"
|
| 1621 |
+
)
|
| 1622 |
+
if shard_numel_padded > 0:
|
| 1623 |
+
# Add right-handed padding.
|
| 1624 |
+
local_buffers.append(empty_func(shard_numel_padded))
|
| 1625 |
+
local_shard = torch.cat(local_buffers)
|
| 1626 |
+
assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), (
|
| 1627 |
+
"The size of local shard times the world size should equal to the "
|
| 1628 |
+
"gathered tensor size. The inconsistency may be from a bug of "
|
| 1629 |
+
"FlatParameter's metadata or the reconstruction logic in optimizer "
|
| 1630 |
+
"state dict."
|
| 1631 |
+
)
|
| 1632 |
+
torch.cuda.synchronize()
|
| 1633 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
|
| 1634 |
+
dist.all_gather_into_tensor(
|
| 1635 |
+
gathered_tensor, local_shard, group=fsdp_state.process_group
|
| 1636 |
+
)
|
| 1637 |
+
# Synchronize can be slow but this will be easier for us to debug.
|
| 1638 |
+
torch.cuda.synchronize()
|
| 1639 |
+
|
| 1640 |
+
unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()]
|
| 1641 |
+
flat_param_handle = fsdp_param_info.handle
|
| 1642 |
+
orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor)
|
| 1643 |
+
assert len(orig_states) == len(fsdp_param_info.param_indices), (
|
| 1644 |
+
"The number of parameters from FlatParameter is not consistent to "
|
| 1645 |
+
"the number of states used by optimizer state dict reconstruction "
|
| 1646 |
+
"logic."
|
| 1647 |
+
)
|
| 1648 |
+
for fqn, idx in fsdp_param_info.param_indices.items():
|
| 1649 |
+
if fsdp_param_info.param_requires_grad[idx] or fqn in output_states:
|
| 1650 |
+
output_states[fqn][state_name] = orig_states[idx]
|
| 1651 |
+
|
| 1652 |
+
_unflatten_orig_param_states(
|
| 1653 |
+
fsdp_param_info,
|
| 1654 |
+
output_states,
|
| 1655 |
+
state_name,
|
| 1656 |
+
shard_state,
|
| 1657 |
+
to_save,
|
| 1658 |
+
cpu_offload,
|
| 1659 |
+
)
|
| 1660 |
+
|
| 1661 |
+
del gathered_tensor
|
| 1662 |
+
return output_states
|
| 1663 |
+
|
| 1664 |
+
|
| 1665 |
+
def _gather_all_orig_param_state(
|
| 1666 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1667 |
+
input_states: Dict[str, Any],
|
| 1668 |
+
shard_state: bool,
|
| 1669 |
+
to_save: bool,
|
| 1670 |
+
cpu_offload: bool,
|
| 1671 |
+
) -> Dict[str, Any]:
|
| 1672 |
+
"""
|
| 1673 |
+
Given a optimizer state dict, ``input_states``, which the keys are FQNs to the
|
| 1674 |
+
original parameters (not FlatParameters nor parmeter ID), gather all the
|
| 1675 |
+
states and unflatten them to the original dimensions. Note that all the
|
| 1676 |
+
params referred by the ``input_states`` must be managed by FSDP.
|
| 1677 |
+
"""
|
| 1678 |
+
fsdp_state = fsdp_param_info.state
|
| 1679 |
+
if (
|
| 1680 |
+
fsdp_state.world_size == 1
|
| 1681 |
+
or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
|
| 1682 |
+
):
|
| 1683 |
+
return input_states if to_save else {}
|
| 1684 |
+
|
| 1685 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
|
| 1686 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):
|
| 1687 |
+
gathered_state_info = _allgather_state_info(fsdp_state, input_states)
|
| 1688 |
+
output_states = _allgather_orig_param_states(
|
| 1689 |
+
fsdp_param_info,
|
| 1690 |
+
gathered_state_info,
|
| 1691 |
+
input_states,
|
| 1692 |
+
shard_state,
|
| 1693 |
+
to_save,
|
| 1694 |
+
cpu_offload,
|
| 1695 |
+
)
|
| 1696 |
+
if to_save:
|
| 1697 |
+
for key, idx in fsdp_param_info.param_indices.items():
|
| 1698 |
+
if key in output_states:
|
| 1699 |
+
continue
|
| 1700 |
+
if not fsdp_param_info.param_requires_grad[idx]:
|
| 1701 |
+
continue
|
| 1702 |
+
|
| 1703 |
+
raise RuntimeError(
|
| 1704 |
+
f"{key} is not in the output state. "
|
| 1705 |
+
"The FSDPParamInfo has the param keys "
|
| 1706 |
+
f"{sorted(fsdp_param_info.param_indices.keys())} while "
|
| 1707 |
+
"the output_states has the param keys "
|
| 1708 |
+
f"{sorted(output_states.keys())}."
|
| 1709 |
+
)
|
| 1710 |
+
return output_states
|
| 1711 |
+
else:
|
| 1712 |
+
return {}
|
| 1713 |
+
|
| 1714 |
+
|
| 1715 |
+
def _convert_state_with_orig_params(
|
| 1716 |
+
all_optim_state_keys: List[_OptimStateKey],
|
| 1717 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
|
| 1718 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1719 |
+
optim_state_dict: Dict[Union[str, int], Any],
|
| 1720 |
+
to_save: bool,
|
| 1721 |
+
shard_state: bool,
|
| 1722 |
+
cpu_offload: bool = True,
|
| 1723 |
+
) -> Dict[str, Any]:
|
| 1724 |
+
fsdp_osd_state: Dict[str, Any] = {}
|
| 1725 |
+
# This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo
|
| 1726 |
+
# usually corresponds to multiple parameters. We could not use FSDPParamInfo
|
| 1727 |
+
# as the key because FSDPParamInfo is not hashable. As a result, we fall back
|
| 1728 |
+
# to `id(FSDPParamInfo)`, which the type is an integer.
|
| 1729 |
+
all_states: Dict[int, Dict[str, Any]] = {}
|
| 1730 |
+
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
|
| 1731 |
+
# across ranks
|
| 1732 |
+
for optim_state_key in all_optim_state_keys:
|
| 1733 |
+
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
|
| 1734 |
+
optim_state_key, None
|
| 1735 |
+
)
|
| 1736 |
+
|
| 1737 |
+
if param_key is None and not optim_state_key.is_fsdp_managed:
|
| 1738 |
+
continue
|
| 1739 |
+
|
| 1740 |
+
if optim_state_key.is_fsdp_managed:
|
| 1741 |
+
fqn = optim_state_key.unflat_param_names[0]
|
| 1742 |
+
fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None)
|
| 1743 |
+
if fsdp_param_info is None:
|
| 1744 |
+
# This can happen if the not all FSDP instances have all the
|
| 1745 |
+
# parameters. This can happen with FSDP + some MPMD style
|
| 1746 |
+
# parallelism.
|
| 1747 |
+
|
| 1748 |
+
# TODO: it is unclear if we need to do the same check with
|
| 1749 |
+
# non-FSDP managed keys.
|
| 1750 |
+
continue
|
| 1751 |
+
state = {} if param_key is None else optim_state_dict[param_key]
|
| 1752 |
+
if id(fsdp_param_info) not in all_states:
|
| 1753 |
+
all_states[id(fsdp_param_info)] = {}
|
| 1754 |
+
all_states[id(fsdp_param_info)][fqn] = state
|
| 1755 |
+
|
| 1756 |
+
elif to_save:
|
| 1757 |
+
assert len(optim_state_key.unflat_param_names) == 1
|
| 1758 |
+
unflat_param_name = optim_state_key.unflat_param_names[0]
|
| 1759 |
+
with SimpleProfiler.profile("none_fsdp_managed_copy"):
|
| 1760 |
+
param_key = cast(Union[str, int], param_key)
|
| 1761 |
+
fsdp_osd_state[unflat_param_name] = copy.copy(
|
| 1762 |
+
optim_state_dict[param_key]
|
| 1763 |
+
)
|
| 1764 |
+
if cpu_offload:
|
| 1765 |
+
for state_name, value in sorted_items(
|
| 1766 |
+
fsdp_osd_state[unflat_param_name]
|
| 1767 |
+
):
|
| 1768 |
+
if not torch.is_tensor(value):
|
| 1769 |
+
continue
|
| 1770 |
+
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
|
| 1771 |
+
|
| 1772 |
+
# Instead of gathering the state of each parameter individually, we perform
|
| 1773 |
+
# the gathering all at once to speed up the process.
|
| 1774 |
+
for _all_states in all_states.values():
|
| 1775 |
+
fqn = next(iter(_all_states.keys()))
|
| 1776 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 1777 |
+
assert len(fsdp_param_info.param_requires_grad) > 0, (
|
| 1778 |
+
"With use_orig_params, FSDPParamInfo should have requires_grad "
|
| 1779 |
+
"information. However, the length is zero."
|
| 1780 |
+
)
|
| 1781 |
+
for key, idx in fsdp_param_info.param_indices.items():
|
| 1782 |
+
if key in _all_states:
|
| 1783 |
+
continue
|
| 1784 |
+
if not fsdp_param_info.param_requires_grad[idx]:
|
| 1785 |
+
continue
|
| 1786 |
+
raise RuntimeError(
|
| 1787 |
+
f"{key} is not in the optimizer state. "
|
| 1788 |
+
"The FSDPParamInfo has the param keys "
|
| 1789 |
+
f"{sorted(fsdp_param_info.param_indices.keys())} while "
|
| 1790 |
+
"the optimizer has the param keys "
|
| 1791 |
+
f"{sorted(_all_states.keys())}."
|
| 1792 |
+
)
|
| 1793 |
+
fsdp_osd_state.update(
|
| 1794 |
+
_gather_all_orig_param_state(
|
| 1795 |
+
fsdp_param_info,
|
| 1796 |
+
_all_states,
|
| 1797 |
+
shard_state,
|
| 1798 |
+
to_save,
|
| 1799 |
+
cpu_offload,
|
| 1800 |
+
)
|
| 1801 |
+
)
|
| 1802 |
+
|
| 1803 |
+
return fsdp_osd_state
|
| 1804 |
+
|
| 1805 |
+
|
| 1806 |
+
def _convert_state_with_flat_params(
|
| 1807 |
+
all_optim_state_keys: List[_OptimStateKey],
|
| 1808 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
|
| 1809 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1810 |
+
optim_state_dict: Dict[Union[str, int], Any],
|
| 1811 |
+
to_save: bool,
|
| 1812 |
+
shard_state: bool,
|
| 1813 |
+
cpu_offload: bool = True,
|
| 1814 |
+
) -> Dict[str, Any]:
|
| 1815 |
+
fsdp_osd_state: Dict[str, Any] = {}
|
| 1816 |
+
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
|
| 1817 |
+
# across ranks
|
| 1818 |
+
for optim_state_key in all_optim_state_keys:
|
| 1819 |
+
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
|
| 1820 |
+
optim_state_key, None
|
| 1821 |
+
)
|
| 1822 |
+
|
| 1823 |
+
assert param_key is not None, (
|
| 1824 |
+
"If use_orig_params is False, we must be able to find the "
|
| 1825 |
+
f"corresponding param id. {optim_state_key} {param_key}"
|
| 1826 |
+
)
|
| 1827 |
+
|
| 1828 |
+
if optim_state_key.is_fsdp_managed:
|
| 1829 |
+
# If there are multiple unflat_param_names (not use_orig_params),
|
| 1830 |
+
# they share the same FSDPParamInfo. So the first unflat_param_name
|
| 1831 |
+
# is sufficient to fetch the FSDPParamInfo.
|
| 1832 |
+
fqn = optim_state_key.unflat_param_names[0]
|
| 1833 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 1834 |
+
unflat_state = _unflatten_optim_state(
|
| 1835 |
+
fsdp_param_info,
|
| 1836 |
+
optim_state_dict[param_key],
|
| 1837 |
+
to_save,
|
| 1838 |
+
shard_state,
|
| 1839 |
+
cpu_offload,
|
| 1840 |
+
)
|
| 1841 |
+
if to_save:
|
| 1842 |
+
assert len(unflat_state) == len(optim_state_key.unflat_param_names)
|
| 1843 |
+
for unflat_param_name, unflat_param_state in zip(
|
| 1844 |
+
optim_state_key.unflat_param_names,
|
| 1845 |
+
unflat_state,
|
| 1846 |
+
):
|
| 1847 |
+
fsdp_osd_state[unflat_param_name] = unflat_param_state
|
| 1848 |
+
elif to_save:
|
| 1849 |
+
assert len(optim_state_key.unflat_param_names) == 1
|
| 1850 |
+
unflat_param_name = optim_state_key.unflat_param_names[0]
|
| 1851 |
+
fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key])
|
| 1852 |
+
if cpu_offload:
|
| 1853 |
+
for state_name, value in sorted_items(
|
| 1854 |
+
fsdp_osd_state[unflat_param_name]
|
| 1855 |
+
):
|
| 1856 |
+
if not torch.is_tensor(value):
|
| 1857 |
+
continue
|
| 1858 |
+
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
|
| 1859 |
+
|
| 1860 |
+
return fsdp_osd_state
|
| 1861 |
+
|
| 1862 |
+
|
| 1863 |
+
@torch.no_grad()
|
| 1864 |
+
def _optim_state_dict(
|
| 1865 |
+
model: nn.Module,
|
| 1866 |
+
optim: torch.optim.Optimizer,
|
| 1867 |
+
optim_state_dict: Dict[str, Any],
|
| 1868 |
+
optim_input: Optional[
|
| 1869 |
+
Union[
|
| 1870 |
+
List[Dict[str, Any]],
|
| 1871 |
+
Iterable[nn.Parameter],
|
| 1872 |
+
]
|
| 1873 |
+
],
|
| 1874 |
+
rank0_only: bool,
|
| 1875 |
+
shard_state: bool,
|
| 1876 |
+
group: Optional[dist.ProcessGroup],
|
| 1877 |
+
using_optim_input: bool,
|
| 1878 |
+
use_orig_params: bool = False,
|
| 1879 |
+
cpu_offload: bool = True,
|
| 1880 |
+
) -> Dict[str, Any]:
|
| 1881 |
+
"""
|
| 1882 |
+
Consolidates the optimizer state and returns it as a :class:`dict`
|
| 1883 |
+
following the convention of :meth:`torch.optim.Optimizer.state_dict`,
|
| 1884 |
+
i.e. with keys ``"state"`` and ``"param_groups"``.
|
| 1885 |
+
The flat parameters in ``FSDP`` modules contained in ``model`` are mapped
|
| 1886 |
+
back to their unflattened parameters.
|
| 1887 |
+
|
| 1888 |
+
Parameter keys are not well-defined. For a regular optimizer, the optimizer
|
| 1889 |
+
state_dict contains a mapping from parameter IDs to parameter states.
|
| 1890 |
+
Parameter IDs are the order of parameters in ``optim.param_groups()`` across
|
| 1891 |
+
all the groups. This API also allows user to pass ``optim_input`` for the
|
| 1892 |
+
mapping between parameters and parameter IDs. Using ``optim_input`` is being
|
| 1893 |
+
deprecated.
|
| 1894 |
+
|
| 1895 |
+
If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not
|
| 1896 |
+
contain parameter IDs mapping but a mapping from parameter FQNs to parameter
|
| 1897 |
+
states. This API finds the mapping from FQNs to parameters if the optimizer
|
| 1898 |
+
is a ``NamedOptimizer``.
|
| 1899 |
+
|
| 1900 |
+
If ``use_orig_params`` is True, each rank will have all FSDP-managed
|
| 1901 |
+
parameters but some of these parameters may be empty due to the sharding.
|
| 1902 |
+
For a regular optim.Optimizer, states for those empty parameters will
|
| 1903 |
+
not be initialized. So, when aggregating the FQNs across ranks, no assert
|
| 1904 |
+
will be raised on a rank even if it does not have all the states -- it is
|
| 1905 |
+
valid and FSDP knows how to aggregate them. However, FSDP has to ignore
|
| 1906 |
+
handling those parameters that are not managed by FSDP and do not exist on
|
| 1907 |
+
the local rank -- those are managed by other parallelisms and FSDP does not
|
| 1908 |
+
know how to handle/aggregate them.
|
| 1909 |
+
|
| 1910 |
+
Args:
|
| 1911 |
+
model (nn.Module): Root module (which may or may not be a
|
| 1912 |
+
:class:`FullyShardedDataParallel` instance) whose parameters
|
| 1913 |
+
were passed into the optimizer ``optim``.
|
| 1914 |
+
optim (torch.optim.Optimizer): Optimizer for ``model`` 's
|
| 1915 |
+
parameters.
|
| 1916 |
+
rank0_only (bool): If ``True``, saves the populated :class:`dict`
|
| 1917 |
+
only on rank 0; if ``False``, saves it on all ranks. (Default:
|
| 1918 |
+
``True``)
|
| 1919 |
+
shard_state (bool): If ``True``, shard and distribute all
|
| 1920 |
+
non-zero-dimension states.
|
| 1921 |
+
|
| 1922 |
+
Returns:
|
| 1923 |
+
Dict[str, Any]: A :class:`dict` containing the optimizer state for
|
| 1924 |
+
``model`` 's original unflattened parameters and including keys
|
| 1925 |
+
"state" and "param_groups" following the convention of
|
| 1926 |
+
:meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``,
|
| 1927 |
+
then nonzero ranks return an empty :class:`dict`.
|
| 1928 |
+
"""
|
| 1929 |
+
SimpleProfiler.reset()
|
| 1930 |
+
cm = ExitStack()
|
| 1931 |
+
cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL))
|
| 1932 |
+
_reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model))
|
| 1933 |
+
to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state
|
| 1934 |
+
|
| 1935 |
+
with SimpleProfiler.profile("preprocessing"):
|
| 1936 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 1937 |
+
flat_param_to_fqn = _get_flat_param_to_fqn(model)
|
| 1938 |
+
is_named_optimizer = _is_named_optimizer(optim_state_dict)
|
| 1939 |
+
|
| 1940 |
+
param_key_to_param = cast(
|
| 1941 |
+
Dict[Union[int, str], nn.Parameter],
|
| 1942 |
+
(
|
| 1943 |
+
_get_param_id_to_param_from_optim_input(model, optim_input)
|
| 1944 |
+
if using_optim_input
|
| 1945 |
+
else _get_param_key_to_param(
|
| 1946 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 1947 |
+
)
|
| 1948 |
+
),
|
| 1949 |
+
)
|
| 1950 |
+
fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
|
| 1951 |
+
|
| 1952 |
+
with SimpleProfiler.profile("preprocessing_with_comm"):
|
| 1953 |
+
(
|
| 1954 |
+
all_optim_state_keys,
|
| 1955 |
+
optim_state_key_to_param_key,
|
| 1956 |
+
) = _map_param_key_to_optim_keys(
|
| 1957 |
+
optim_state_dict,
|
| 1958 |
+
group,
|
| 1959 |
+
param_key_to_param,
|
| 1960 |
+
param_to_fqns,
|
| 1961 |
+
fqn_to_fsdp_param_info,
|
| 1962 |
+
merge_keys=use_orig_params,
|
| 1963 |
+
)
|
| 1964 |
+
|
| 1965 |
+
with SimpleProfiler.profile("state_converting"):
|
| 1966 |
+
convert_fn = (
|
| 1967 |
+
_convert_state_with_orig_params
|
| 1968 |
+
if use_orig_params
|
| 1969 |
+
else _convert_state_with_flat_params
|
| 1970 |
+
)
|
| 1971 |
+
fsdp_osd_state = convert_fn(
|
| 1972 |
+
all_optim_state_keys,
|
| 1973 |
+
optim_state_key_to_param_key,
|
| 1974 |
+
fqn_to_fsdp_param_info,
|
| 1975 |
+
optim_state_dict["state"],
|
| 1976 |
+
to_save,
|
| 1977 |
+
shard_state,
|
| 1978 |
+
cpu_offload,
|
| 1979 |
+
)
|
| 1980 |
+
|
| 1981 |
+
# At this point, communication is complete and ranks can return early if nothing
|
| 1982 |
+
# will be saved on that rank.
|
| 1983 |
+
if not to_save:
|
| 1984 |
+
return {}
|
| 1985 |
+
|
| 1986 |
+
fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state}
|
| 1987 |
+
|
| 1988 |
+
flat_param_fqns = set(flat_param_to_fqn.values())
|
| 1989 |
+
for key, value in optim_state_dict["state"].items():
|
| 1990 |
+
if key in fsdp_osd_state:
|
| 1991 |
+
continue
|
| 1992 |
+
if key in flat_param_fqns:
|
| 1993 |
+
continue
|
| 1994 |
+
if key in param_key_to_param:
|
| 1995 |
+
continue
|
| 1996 |
+
# This key is not recognized by FSDP. It may be a user-defined state
|
| 1997 |
+
# or some parameters state that FSDP is unable to map from
|
| 1998 |
+
# ``optim.param_groups``.
|
| 1999 |
+
warnings.warn(
|
| 2000 |
+
f"Found a optim state, {key}, that FSDP cannot process. FSDP "
|
| 2001 |
+
"will directly copy everything to the returned state_dict. In "
|
| 2002 |
+
"most cases, this is a user-defined state that is not "
|
| 2003 |
+
"associated with any particular parameter. Another possible "
|
| 2004 |
+
"case is this state is managed by TorchRec. Otherwise, there may "
|
| 2005 |
+
" be a mismatched assumption of optim_state_dict of this mode."
|
| 2006 |
+
)
|
| 2007 |
+
fsdp_osd_state[key] = value
|
| 2008 |
+
|
| 2009 |
+
if "param_groups" in optim_state_dict:
|
| 2010 |
+
fsdp_osd["param_groups"] = _unflatten_param_groups(
|
| 2011 |
+
optim_state_dict, param_key_to_param, param_to_fqns
|
| 2012 |
+
)
|
| 2013 |
+
|
| 2014 |
+
cm.close()
|
| 2015 |
+
SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ")
|
| 2016 |
+
|
| 2017 |
+
return fsdp_osd
|
| 2018 |
+
|
| 2019 |
+
|
| 2020 |
+
def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]:
|
| 2021 |
+
"""
|
| 2022 |
+
Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo``
|
| 2023 |
+
if the param is managed by FSDP. Shared parameters, or original parameters that
|
| 2024 |
+
are shared across multiple nn.Modules, are required to belong to one and only
|
| 2025 |
+
one FSDP instance and thus correspond to one ``FlatParameter``. Within the one
|
| 2026 |
+
``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared
|
| 2027 |
+
parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters.
|
| 2028 |
+
"""
|
| 2029 |
+
|
| 2030 |
+
def module_fn(module, prefix, tree_level, fqn_to_param_info):
|
| 2031 |
+
fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
|
| 2032 |
+
if fsdp_state is None:
|
| 2033 |
+
return
|
| 2034 |
+
_lazy_init(fsdp_state, module)
|
| 2035 |
+
handle = _module_handle(fsdp_state, module)
|
| 2036 |
+
if not handle:
|
| 2037 |
+
return
|
| 2038 |
+
flat_param = handle.flat_param
|
| 2039 |
+
fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, [])
|
| 2040 |
+
# NOTE: `idx` indexes into the data structures *without* padding
|
| 2041 |
+
# elements
|
| 2042 |
+
for idx, local_fqn in enumerate(flat_param._fqns):
|
| 2043 |
+
fqn = clean_tensor_name(prefix + local_fqn)
|
| 2044 |
+
if fqn in fqn_to_param_info:
|
| 2045 |
+
assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn
|
| 2046 |
+
fqn_to_param_info[fqn] = fsdp_param_info
|
| 2047 |
+
fsdp_param_info.param_indices[fqn] = idx
|
| 2048 |
+
if flat_param._params is not None:
|
| 2049 |
+
fsdp_param_info.param_requires_grad.append(
|
| 2050 |
+
flat_param._params[idx].requires_grad
|
| 2051 |
+
)
|
| 2052 |
+
|
| 2053 |
+
def return_fn(fqn_to_param_info):
|
| 2054 |
+
return fqn_to_param_info
|
| 2055 |
+
|
| 2056 |
+
fqn_to_param_info: Dict[str, FSDPParamInfo] = {}
|
| 2057 |
+
# FlatParameter._fqns stores the local fqn, starting from the root of the
|
| 2058 |
+
# FSDP. Using _apply_to_modules() with model (may not be the FSDP root
|
| 2059 |
+
# module) allows us to construct the global fqn.
|
| 2060 |
+
return _apply_to_modules(
|
| 2061 |
+
model,
|
| 2062 |
+
module_fn,
|
| 2063 |
+
return_fn,
|
| 2064 |
+
[fqn for fqn, _ in _named_parameters_with_duplicates(model)],
|
| 2065 |
+
fqn_to_param_info,
|
| 2066 |
+
)
|
| 2067 |
+
|
| 2068 |
+
|
| 2069 |
+
@no_type_check
|
| 2070 |
+
def _set_optim_use_dtensor(
|
| 2071 |
+
fsdp_state: _FSDPState,
|
| 2072 |
+
state_dict_settings: StateDictSettings,
|
| 2073 |
+
) -> None:
|
| 2074 |
+
# If device_mesh is passed in when initalizing FSDP, we automatically turn the
|
| 2075 |
+
# _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type
|
| 2076 |
+
# has to be set to SHARDED_STATE_DICT.
|
| 2077 |
+
if getattr(fsdp_state, "_device_mesh", None):
|
| 2078 |
+
state_dict_type = state_dict_settings.state_dict_type
|
| 2079 |
+
if state_dict_type == StateDictType.LOCAL_STATE_DICT:
|
| 2080 |
+
raise RuntimeError(
|
| 2081 |
+
"Found state_dict_type LOCAL_STATE_DICT.",
|
| 2082 |
+
"DeviceMesh is not compatible with LOCAL_STATE_DICT.",
|
| 2083 |
+
"Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
|
| 2084 |
+
)
|
| 2085 |
+
else:
|
| 2086 |
+
state_dict_settings.optim_state_dict_config._use_dtensor = True
|
moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import warnings
|
| 5 |
+
from functools import partial
|
| 6 |
+
from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union
|
| 7 |
+
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.distributed.fsdp._common_utils import (
|
| 10 |
+
_get_module_fsdp_state,
|
| 11 |
+
_override_module_mixed_precision,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from torch.distributed.fsdp.wrap import (
|
| 15 |
+
_construct_wrap_fn,
|
| 16 |
+
_or_policy,
|
| 17 |
+
_Policy,
|
| 18 |
+
_post_order_apply,
|
| 19 |
+
_recursive_wrap,
|
| 20 |
+
_run_mixed_precision_override_policy,
|
| 21 |
+
_wrap_module_cls_individually,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _auto_wrap(
|
| 26 |
+
root_module: nn.Module,
|
| 27 |
+
policy: Union[Callable, _Policy],
|
| 28 |
+
ignored_modules: Set[nn.Module],
|
| 29 |
+
ignored_params: Set[nn.Parameter],
|
| 30 |
+
root_kwargs: Dict[str, Any],
|
| 31 |
+
fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard`
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Auto wraps modules in ``root_module`` 's tree according to ``policy``
|
| 35 |
+
following a post-order traversal.
|
| 36 |
+
|
| 37 |
+
Precondition: ``root_kwargs`` should contain all arguments except
|
| 38 |
+
``module``. This function accepts the kwargs dict directly since it gets
|
| 39 |
+
forwarded into the post-order traversal function.
|
| 40 |
+
"""
|
| 41 |
+
mixed_precision = root_kwargs["mixed_precision"]
|
| 42 |
+
is_wrapper = inspect.isclass(fsdp_fn)
|
| 43 |
+
# TODO: We may relax this no-nested-wrapping constraint to support manual
|
| 44 |
+
# wrapping followed by auto wrapping.
|
| 45 |
+
_check_nested_wrapping(root_module)
|
| 46 |
+
|
| 47 |
+
if isinstance(policy, _Policy):
|
| 48 |
+
root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None
|
| 49 |
+
target_module_to_kwargs = policy._run_policy(
|
| 50 |
+
root_module, ignored_modules, root_kwargs
|
| 51 |
+
)
|
| 52 |
+
if mixed_precision is not None:
|
| 53 |
+
target_module_to_kwargs = _run_mixed_precision_override_policy(
|
| 54 |
+
root_module,
|
| 55 |
+
mixed_precision._module_classes_to_ignore,
|
| 56 |
+
ignored_modules,
|
| 57 |
+
root_kwargs,
|
| 58 |
+
target_module_to_kwargs,
|
| 59 |
+
)
|
| 60 |
+
overridden_module_classes = _override_module_mixed_precision(
|
| 61 |
+
root_module, mixed_precision._module_classes_to_ignore
|
| 62 |
+
)
|
| 63 |
+
_warn_on_overridden_mixed_precision(overridden_module_classes)
|
| 64 |
+
use_orig_params = root_kwargs.get("use_orig_params", False)
|
| 65 |
+
_validate_frozen_params(
|
| 66 |
+
root_module,
|
| 67 |
+
set(target_module_to_kwargs.keys()),
|
| 68 |
+
ignored_params,
|
| 69 |
+
use_orig_params,
|
| 70 |
+
)
|
| 71 |
+
wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn)
|
| 72 |
+
_post_order_apply(root_module, wrap_fn)
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
recursive_wrap_kwargs = {
|
| 76 |
+
"module": root_module,
|
| 77 |
+
"auto_wrap_policy": policy,
|
| 78 |
+
"wrapper_cls": fsdp_fn,
|
| 79 |
+
"ignored_modules": ignored_modules,
|
| 80 |
+
"ignored_params": ignored_params,
|
| 81 |
+
"only_wrap_children": True,
|
| 82 |
+
}
|
| 83 |
+
if mixed_precision is not None:
|
| 84 |
+
# Wrap modules of the ignored types separately and register forward
|
| 85 |
+
# hooks to cast to fp32 and back to the original dtype, respectively
|
| 86 |
+
overridden_module_classes = _override_module_mixed_precision(
|
| 87 |
+
root_module, mixed_precision._module_classes_to_ignore
|
| 88 |
+
)
|
| 89 |
+
policy = functools.partial(
|
| 90 |
+
_or_policy,
|
| 91 |
+
policies=[
|
| 92 |
+
policy,
|
| 93 |
+
partial(
|
| 94 |
+
_wrap_module_cls_individually,
|
| 95 |
+
module_classes=mixed_precision._module_classes_to_ignore,
|
| 96 |
+
),
|
| 97 |
+
],
|
| 98 |
+
)
|
| 99 |
+
recursive_wrap_kwargs["auto_wrap_policy"] = policy
|
| 100 |
+
_warn_on_overridden_mixed_precision(overridden_module_classes)
|
| 101 |
+
_recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _check_nested_wrapping(root_module: nn.Module):
|
| 105 |
+
for module_name, module in root_module.named_modules():
|
| 106 |
+
if _get_module_fsdp_state(module) is not None:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
"FSDP auto wrapping requires modules to not already have "
|
| 109 |
+
f"FSDP applied but found {module_name} in\n{root_module}"
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _warn_on_overridden_mixed_precision(
|
| 114 |
+
overridden_module_classes: Set[Type[nn.Module]],
|
| 115 |
+
):
|
| 116 |
+
if len(overridden_module_classes) == 0:
|
| 117 |
+
return
|
| 118 |
+
warnings.warn(
|
| 119 |
+
"Both mixed precision and an auto_wrap_policy were specified to FSDP, "
|
| 120 |
+
f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n"
|
| 121 |
+
"These modules will be wrapped as separate FSDP instacnes with mixed "
|
| 122 |
+
"precision disabled."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def _validate_frozen_params(
|
| 127 |
+
root_module: nn.Module,
|
| 128 |
+
modules_to_wrap: Set[nn.Module],
|
| 129 |
+
ignored_params: Set[nn.Parameter],
|
| 130 |
+
use_orig_params: bool,
|
| 131 |
+
):
|
| 132 |
+
"""
|
| 133 |
+
This checks that, given ``modules_to_wrap``, each module would manage
|
| 134 |
+
parameters that are uniformly frozen or non-frozen. This uniformity
|
| 135 |
+
requirement is strict for ``use_orig_params=False`` (hard error) and highly
|
| 136 |
+
recommended for ``use_orig_params=True`` (user warning).
|
| 137 |
+
"""
|
| 138 |
+
post_order_named_modules = _get_post_order_named_modules(root_module)
|
| 139 |
+
visited_modules: Set[nn.Module] = set()
|
| 140 |
+
for module_name, module in post_order_named_modules:
|
| 141 |
+
if module in modules_to_wrap:
|
| 142 |
+
param_to_fqn = _get_managed_param_to_fqn(
|
| 143 |
+
module, ignored_params, visited_modules, module_name
|
| 144 |
+
)
|
| 145 |
+
frozen_param_fqns: List[str] = []
|
| 146 |
+
frozen_param_numel = 0
|
| 147 |
+
nonfrozen_param_fqns: List[str] = []
|
| 148 |
+
nonfrozen_param_numel = 0
|
| 149 |
+
for param, fqn in param_to_fqn.items():
|
| 150 |
+
if param.requires_grad:
|
| 151 |
+
nonfrozen_param_fqns.append(fqn)
|
| 152 |
+
nonfrozen_param_numel += param.numel()
|
| 153 |
+
else:
|
| 154 |
+
frozen_param_fqns.append(fqn)
|
| 155 |
+
frozen_param_numel += param.numel()
|
| 156 |
+
if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0:
|
| 157 |
+
msg = f"{module_name} has both parameters with requires_grad=True and False."
|
| 158 |
+
if use_orig_params:
|
| 159 |
+
total_param_numel = frozen_param_numel + nonfrozen_param_numel
|
| 160 |
+
msg += (
|
| 161 |
+
" We do not recommend wrapping such modules since "
|
| 162 |
+
"the gradient memory usage will be higher than expected "
|
| 163 |
+
f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel "
|
| 164 |
+
"before sharding via reduce-scatter). "
|
| 165 |
+
)
|
| 166 |
+
else:
|
| 167 |
+
msg += " FSDP does not support wrapping such modules when use_orig_params=False. "
|
| 168 |
+
msg += "If possible, wrap the frozen parameters with FSDP separately.\n"
|
| 169 |
+
msg += (
|
| 170 |
+
f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n"
|
| 171 |
+
f"The following parameters have requires_grad=False:\n{frozen_param_fqns}"
|
| 172 |
+
)
|
| 173 |
+
if use_orig_params:
|
| 174 |
+
warnings.warn(msg)
|
| 175 |
+
else:
|
| 176 |
+
raise ValueError(msg)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _get_post_order_named_modules(
|
| 180 |
+
root_module: nn.Module,
|
| 181 |
+
) -> List[Tuple[str, nn.Module]]:
|
| 182 |
+
"""
|
| 183 |
+
This returns the named modules following a post-order traversal, which is a
|
| 184 |
+
valid reverse topological sort. We achieve this using the reverse of a
|
| 185 |
+
stack-based DFS order instead of reversing ``root_module.named_modules()``
|
| 186 |
+
since the former gives the modules in registration order at each level in
|
| 187 |
+
the module tree (as opposed to the reverse), which allows us to error/warn
|
| 188 |
+
on the first registered module that violates the condition.
|
| 189 |
+
|
| 190 |
+
For example, consider the following module structure:
|
| 191 |
+
M(
|
| 192 |
+
S1(),
|
| 193 |
+
S2(
|
| 194 |
+
SS1(),
|
| 195 |
+
SS2(),
|
| 196 |
+
),
|
| 197 |
+
S3(),
|
| 198 |
+
)
|
| 199 |
+
The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse
|
| 200 |
+
``named_modules()`` order is [S3, SS2, SS1, S2, S1, M].
|
| 201 |
+
"""
|
| 202 |
+
visited_modules = {root_module}
|
| 203 |
+
stack = [("", root_module)]
|
| 204 |
+
# Append and reverse at the end for linear-time algorithm
|
| 205 |
+
reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = []
|
| 206 |
+
while stack:
|
| 207 |
+
module_name, module = stack.pop()
|
| 208 |
+
reverse_post_order_named_modules.append((module_name, module))
|
| 209 |
+
for child_module_name, child_module in module.named_children():
|
| 210 |
+
if child_module is None: # only for overrides of `named_children()`
|
| 211 |
+
continue
|
| 212 |
+
if child_module not in visited_modules:
|
| 213 |
+
visited_modules.add(child_module)
|
| 214 |
+
if module_name != "":
|
| 215 |
+
child_module_name = module_name + "." + child_module_name
|
| 216 |
+
stack.append((child_module_name, child_module))
|
| 217 |
+
post_order_named_modules = list(reversed(reverse_post_order_named_modules))
|
| 218 |
+
return post_order_named_modules
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _get_managed_param_to_fqn(
|
| 222 |
+
module_to_wrap: nn.Module,
|
| 223 |
+
ignored_params: Set[nn.Parameter],
|
| 224 |
+
visited_modules: Set[nn.Module],
|
| 225 |
+
root_prefix: str,
|
| 226 |
+
) -> Dict[nn.Parameter, str]:
|
| 227 |
+
"""
|
| 228 |
+
This returns a dict that maps managed parameter to its FQN for the given
|
| 229 |
+
``module_to_wrap``. The dict's keys are exactly the parameters that would
|
| 230 |
+
be managed by the module, where this is achieved by calling this function
|
| 231 |
+
on the modules to wrap in reverse topological order, destructively updating
|
| 232 |
+
``visited_modules``, and not traversing into those modules. The FQNs are
|
| 233 |
+
prefixed from the root (via ``root_prefix``) to be more informative.
|
| 234 |
+
|
| 235 |
+
NOTE: This function is meant to be called pre-wrapping and iteratively in
|
| 236 |
+
reverse topological order to cover the full module tree. This differs from
|
| 237 |
+
the ``_get_param_to_fqn()`` function meant to be called post-wrapping and
|
| 238 |
+
on the full module tree in one shot. Given those differences, we do not try
|
| 239 |
+
to unify the two.
|
| 240 |
+
"""
|
| 241 |
+
param_to_fqn: Dict[nn.Parameter, str] = {}
|
| 242 |
+
# Run BFS (or any tree traversal works)
|
| 243 |
+
queue = collections.deque([(module_to_wrap, root_prefix)])
|
| 244 |
+
visited_modules.add(module_to_wrap)
|
| 245 |
+
while queue:
|
| 246 |
+
module, prefix = queue.popleft()
|
| 247 |
+
for param_name, param in module.named_parameters(recurse=False):
|
| 248 |
+
if param not in ignored_params:
|
| 249 |
+
fqn = param_name if prefix == "" else prefix + "." + param_name
|
| 250 |
+
param_to_fqn[param] = fqn
|
| 251 |
+
for child_module_name, child_module in module.named_children():
|
| 252 |
+
if child_module is None: # only for overrides of `named_children()`
|
| 253 |
+
continue
|
| 254 |
+
if child_module not in visited_modules:
|
| 255 |
+
visited_modules.add(child_module)
|
| 256 |
+
child_prefix = (
|
| 257 |
+
child_module_name
|
| 258 |
+
if prefix == ""
|
| 259 |
+
else prefix + "." + child_module_name
|
| 260 |
+
)
|
| 261 |
+
queue.append((child_module, child_prefix))
|
| 262 |
+
return param_to_fqn
|
moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (895 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (566 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc
ADDED
|
Binary file (5.12 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
"""
|
| 3 |
+
These are functions that should simply be applied to both mask and data.
|
| 4 |
+
Take select or stack as an example. This operation can be applied to
|
| 5 |
+
both the mask and data of a MaskedTensor and the result wrapped into
|
| 6 |
+
a new MaskedTensor as a result.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from .core import _map_mt_args_kwargs, _wrap_result
|
| 12 |
+
|
| 13 |
+
__all__ = [] # type: ignore[var-annotated]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
PASSTHROUGH_FNS = [
|
| 17 |
+
torch.ops.aten.select,
|
| 18 |
+
torch.ops.aten.transpose,
|
| 19 |
+
torch.ops.aten.split,
|
| 20 |
+
torch.ops.aten.t,
|
| 21 |
+
torch.ops.aten.slice,
|
| 22 |
+
torch.ops.aten.slice_backward,
|
| 23 |
+
torch.ops.aten.select_backward,
|
| 24 |
+
torch.ops.aten.index,
|
| 25 |
+
torch.ops.aten.expand,
|
| 26 |
+
torch.ops.aten.view,
|
| 27 |
+
torch.ops.aten._unsafe_view,
|
| 28 |
+
torch.ops.aten._reshape_alias,
|
| 29 |
+
torch.ops.aten.cat,
|
| 30 |
+
torch.ops.aten.unsqueeze,
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _is_pass_through_fn(fn):
|
| 35 |
+
return fn in PASSTHROUGH_FNS
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _apply_pass_through_fn(fn, *args, **kwargs):
|
| 39 |
+
data_args, data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data())
|
| 40 |
+
result_data = fn(*data_args, **data_kwargs)
|
| 41 |
+
mask_args, mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask())
|
| 42 |
+
result_mask = fn(*mask_args, **mask_kwargs)
|
| 43 |
+
return _wrap_result(result_data, result_mask)
|
moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from .core import is_masked_tensor
|
| 8 |
+
from .creation import as_masked_tensor, masked_tensor
|
| 9 |
+
|
| 10 |
+
__all__ = [] # type: ignore[var-annotated]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _masked_all_all(data, mask=None):
|
| 14 |
+
if mask is None:
|
| 15 |
+
return data.all()
|
| 16 |
+
return data.masked_fill(~mask, True).all()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _masked_all_dim(data, dim, keepdim=False, mask=None):
|
| 20 |
+
if mask is None:
|
| 21 |
+
return torch.all(data, dim=dim, keepdim=keepdim)
|
| 22 |
+
return torch.all(data.masked_fill(~mask, True), dim=dim, keepdim=keepdim)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _masked_all(*args, **kwargs):
|
| 26 |
+
if len(args) == 1 and len(kwargs) == 1:
|
| 27 |
+
return _masked_all_all(args[0], mask=kwargs["mask"])
|
| 28 |
+
return _masked_all_dim(*args, **kwargs)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _multidim_any(mask, dim, keepdim):
|
| 32 |
+
if isinstance(dim, int):
|
| 33 |
+
return _multidim_any(mask, [dim], keepdim)
|
| 34 |
+
for d in sorted(dim, reverse=True):
|
| 35 |
+
mask = torch.any(mask, dim=d, keepdim=keepdim)
|
| 36 |
+
return mask
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _get_masked_fn(fn):
|
| 40 |
+
if fn == "all":
|
| 41 |
+
return _masked_all
|
| 42 |
+
return getattr(torch.masked, fn)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _torch_reduce_all(fn):
|
| 46 |
+
def reduce_all(self):
|
| 47 |
+
masked_fn = _get_masked_fn(fn)
|
| 48 |
+
data = self.get_data()
|
| 49 |
+
mask = self.get_mask().values() if self.is_sparse else self.get_mask()
|
| 50 |
+
# When reduction is "all", then torch.argmin/torch.argmax needs to return the index of the
|
| 51 |
+
# element corresponding to the min/max, but this operation isn't supported correctly for sparse layouts.
|
| 52 |
+
# Therefore, this implementation calculates it using the strides.
|
| 53 |
+
if fn == "all":
|
| 54 |
+
result_data = masked_fn(data, mask=mask)
|
| 55 |
+
|
| 56 |
+
elif fn in {"argmin", "argmax"} and self.is_sparse_coo():
|
| 57 |
+
sparse_idx = masked_fn(data.values(), mask=mask).to(dtype=torch.int)
|
| 58 |
+
indices = (
|
| 59 |
+
data.to_sparse_coo().indices()
|
| 60 |
+
if not self.is_sparse_coo()
|
| 61 |
+
else data.indices()
|
| 62 |
+
)
|
| 63 |
+
idx = indices.unbind(1)[sparse_idx]
|
| 64 |
+
stride = data.size().numel() / torch.tensor(
|
| 65 |
+
data.size(), device=data.device
|
| 66 |
+
).cumprod(0)
|
| 67 |
+
result_data = torch.sum(idx * stride)
|
| 68 |
+
|
| 69 |
+
# we simply pass in the values for sparse COO/CSR tensors
|
| 70 |
+
elif self.is_sparse:
|
| 71 |
+
result_data = masked_fn(masked_tensor(data.values(), mask))
|
| 72 |
+
|
| 73 |
+
else:
|
| 74 |
+
result_data = masked_fn(self, mask=mask)
|
| 75 |
+
|
| 76 |
+
return as_masked_tensor(result_data, torch.any(mask))
|
| 77 |
+
|
| 78 |
+
return reduce_all
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _torch_reduce_dim(fn):
|
| 82 |
+
def reduce_dim(self, dim, keepdim=False, dtype=None):
|
| 83 |
+
if self.is_sparse:
|
| 84 |
+
msg = (
|
| 85 |
+
f"The sparse version of {fn} is not implemented in reductions.\n"
|
| 86 |
+
"If you would like this operator to be supported, please file an issue for a feature request at "
|
| 87 |
+
"https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n"
|
| 88 |
+
"In the case that the semantics for the operator are not trivial, it would be appreciated "
|
| 89 |
+
"to also include a proposal for the semantics."
|
| 90 |
+
)
|
| 91 |
+
warnings.warn(msg)
|
| 92 |
+
return NotImplemented
|
| 93 |
+
if not is_masked_tensor(self):
|
| 94 |
+
raise TypeError("Input to reduce_dim must be a MaskedTensor")
|
| 95 |
+
|
| 96 |
+
masked_fn = _get_masked_fn(fn)
|
| 97 |
+
data = self.get_data()
|
| 98 |
+
mask = self.get_mask()
|
| 99 |
+
if fn == "all":
|
| 100 |
+
result_data = masked_fn(data, dim=dim, keepdim=keepdim, mask=mask)
|
| 101 |
+
else:
|
| 102 |
+
result_data = masked_fn(
|
| 103 |
+
self, dim=dim, keepdim=keepdim, dtype=dtype, mask=self.get_mask()
|
| 104 |
+
)
|
| 105 |
+
return as_masked_tensor(result_data, _multidim_any(mask, dim, keepdim))
|
| 106 |
+
|
| 107 |
+
return reduce_dim
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _torch_reduce(fn):
|
| 111 |
+
def reduce_fn(*args, **kwargs):
|
| 112 |
+
if len(args) == 1 and len(kwargs) == 0:
|
| 113 |
+
return _torch_reduce_all(fn)(args[0])
|
| 114 |
+
return _torch_reduce_dim(fn)(*args, **kwargs)
|
| 115 |
+
|
| 116 |
+
return reduce_fn
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _reduce_dim_args(input, dim, keepdim=False, dtype=None):
|
| 120 |
+
return input, dim, keepdim, dtype
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _torch_grad_reduce(fn):
|
| 124 |
+
def grad_reduce(*args, **kwargs):
|
| 125 |
+
if len(args) == 1 and len(kwargs) == 0:
|
| 126 |
+
return _torch_reduce_all(fn)(args[0])
|
| 127 |
+
# TODO: autograd.Function doesn't support kwarg
|
| 128 |
+
input, dim, keepdim, dtype = _reduce_dim_args(*args, **kwargs)
|
| 129 |
+
return _torch_reduce_dim(fn)(input, dim, keepdim, dtype)
|
| 130 |
+
|
| 131 |
+
return grad_reduce
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
REDUCE_NAMES = [
|
| 135 |
+
"sum",
|
| 136 |
+
"mean",
|
| 137 |
+
"amin",
|
| 138 |
+
"amax",
|
| 139 |
+
"argmin",
|
| 140 |
+
"argmax",
|
| 141 |
+
"prod",
|
| 142 |
+
"all",
|
| 143 |
+
"norm",
|
| 144 |
+
"var",
|
| 145 |
+
"std",
|
| 146 |
+
]
|
| 147 |
+
|
| 148 |
+
NATIVE_REDUCE_MAP = {
|
| 149 |
+
getattr(torch.ops.aten, name): _torch_reduce(name) for name in REDUCE_NAMES
|
| 150 |
+
}
|
| 151 |
+
TORCH_REDUCE_MAP = {
|
| 152 |
+
getattr(torch, name): _torch_grad_reduce(name) for name in REDUCE_NAMES
|
| 153 |
+
}
|
| 154 |
+
TENSOR_REDUCE_MAP = {
|
| 155 |
+
getattr(torch.Tensor, name): _torch_grad_reduce(name) for name in REDUCE_NAMES
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
NATIVE_REDUCE_FNS = list(NATIVE_REDUCE_MAP.keys())
|
| 159 |
+
TORCH_REDUCE_FNS = list(TORCH_REDUCE_MAP.keys())
|
| 160 |
+
TENSOR_REDUCE_FNS = list(TENSOR_REDUCE_MAP.keys())
|
| 161 |
+
|
| 162 |
+
def _is_reduction(fn):
|
| 163 |
+
return fn in NATIVE_REDUCE_MAP or fn in TORCH_REDUCE_MAP or fn in TENSOR_REDUCE_MAP
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _apply_reduction(fn, *args, **kwargs):
|
| 167 |
+
if fn in NATIVE_REDUCE_MAP:
|
| 168 |
+
return NATIVE_REDUCE_MAP[fn](*args, **kwargs)
|
| 169 |
+
if fn in TORCH_REDUCE_MAP:
|
| 170 |
+
return TORCH_REDUCE_MAP[fn](*args, **kwargs)
|
| 171 |
+
if fn in TENSOR_REDUCE_MAP:
|
| 172 |
+
return TENSOR_REDUCE_MAP[fn](*args, **kwargs)
|
| 173 |
+
return NotImplemented
|
moondream/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc
ADDED
|
Binary file (6.93 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc
ADDED
|
Binary file (9.12 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc
ADDED
|
Binary file (6.57 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sympy
|
| 2 |
+
from sympy.multipledispatch import dispatch
|
| 3 |
+
|
| 4 |
+
__all__ = ["SingletonInt"]
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class SingletonInt(sympy.AtomicExpr):
|
| 8 |
+
# This is probably not super important unless we are in multiple dispatch
|
| 9 |
+
# situations with other more exotic Expr types.
|
| 10 |
+
_op_priority = 99999
|
| 11 |
+
|
| 12 |
+
def __new__(cls, *args, coeff=None, **kwargs):
|
| 13 |
+
instance = super().__new__(cls, *args, **kwargs)
|
| 14 |
+
return instance
|
| 15 |
+
|
| 16 |
+
# The semantics of this class should match that of NestedIntSymNodeImpl in
|
| 17 |
+
# c10/core/NestedIntSymNodeImpl.h
|
| 18 |
+
def __init__(self, val, *, coeff=1):
|
| 19 |
+
self._val = val
|
| 20 |
+
self._coeff = coeff
|
| 21 |
+
super().__init__()
|
| 22 |
+
|
| 23 |
+
# See NOTE [ Inequalities with nested int ]
|
| 24 |
+
def _eval_Eq(self, other):
|
| 25 |
+
if (
|
| 26 |
+
isinstance(other, SingletonInt)
|
| 27 |
+
and other._val == self._val
|
| 28 |
+
and self._coeff == other._coeff
|
| 29 |
+
):
|
| 30 |
+
return sympy.true
|
| 31 |
+
else:
|
| 32 |
+
return sympy.false
|
| 33 |
+
|
| 34 |
+
# This is necessary so that calling expr.free_symbols on exprs that contain
|
| 35 |
+
# this Singleton does not error
|
| 36 |
+
@property
|
| 37 |
+
def free_symbols(self):
|
| 38 |
+
return set()
|
| 39 |
+
|
| 40 |
+
def __mul__(self, other):
|
| 41 |
+
if isinstance(other, SingletonInt):
|
| 42 |
+
raise ValueError(
|
| 43 |
+
"SingletonInt cannot be multiplied by another SingletonInt"
|
| 44 |
+
)
|
| 45 |
+
return SingletonInt(self._val, coeff=self._coeff * other)
|
| 46 |
+
|
| 47 |
+
def __rmul__(self, other):
|
| 48 |
+
if isinstance(other, SingletonInt):
|
| 49 |
+
raise ValueError(
|
| 50 |
+
"SingletonInt cannot be multiplied by another SingletonInt"
|
| 51 |
+
)
|
| 52 |
+
return SingletonInt(self._val, coeff=self._coeff * other)
|
| 53 |
+
|
| 54 |
+
# Make sure we promptly raise an error instead of falling back to building
|
| 55 |
+
# an expression tree. There are probably more ops, how can we be exhaustive?
|
| 56 |
+
def __add__(self, other):
|
| 57 |
+
raise NotImplementedError("NYI")
|
| 58 |
+
|
| 59 |
+
def __sub__(self, other):
|
| 60 |
+
raise NotImplementedError("NYI")
|
| 61 |
+
|
| 62 |
+
def __truediv__(self, other):
|
| 63 |
+
raise NotImplementedError("NYI")
|
| 64 |
+
|
| 65 |
+
def __floordiv__(self, other):
|
| 66 |
+
raise NotImplementedError("NYI")
|
| 67 |
+
|
| 68 |
+
def __mod__(self, other):
|
| 69 |
+
raise NotImplementedError("NYI")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# See NOTE [ Inequalities with nested int ]
|
| 73 |
+
@dispatch(sympy.Integer, SingletonInt)
|
| 74 |
+
def _eval_is_ge(a, b):
|
| 75 |
+
if a < 2:
|
| 76 |
+
return sympy.false
|
| 77 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef]
|
| 81 |
+
def _eval_is_ge(a, b): # noqa: F811
|
| 82 |
+
if b <= 2:
|
| 83 |
+
return sympy.true
|
| 84 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef]
|
| 88 |
+
def _eval_is_ge(a, b): # noqa: F811
|
| 89 |
+
if a._val == b._val:
|
| 90 |
+
if a._coeff >= b._coeff:
|
| 91 |
+
return sympy.true
|
| 92 |
+
else:
|
| 93 |
+
return sympy.false
|
| 94 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
moondream/lib/python3.10/site-packages/torch/utils/_sympy/solve.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional, Tuple, Type
|
| 4 |
+
|
| 5 |
+
import sympy
|
| 6 |
+
|
| 7 |
+
from torch.utils._sympy.functions import FloorDiv
|
| 8 |
+
|
| 9 |
+
log = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
_MIRROR_REL_OP: Dict[Type[sympy.Basic], Type[sympy.Rel]] = {
|
| 12 |
+
sympy.Eq: sympy.Eq,
|
| 13 |
+
sympy.Ne: sympy.Ne,
|
| 14 |
+
sympy.Ge: sympy.Le,
|
| 15 |
+
sympy.Gt: sympy.Lt,
|
| 16 |
+
sympy.Le: sympy.Ge,
|
| 17 |
+
sympy.Lt: sympy.Gt,
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def mirror_rel_op(type: Type) -> Optional[Type[sympy.Rel]]:
|
| 24 |
+
return _MIRROR_REL_OP.get(type, None)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side.
|
| 28 |
+
#
|
| 29 |
+
# Returns a tuple of:
|
| 30 |
+
# 1. The simplified expression
|
| 31 |
+
# 2. The expression on the right-hand side
|
| 32 |
+
#
|
| 33 |
+
# Returns 'None' if it can't reach a state where the only thing in the left
|
| 34 |
+
# hand side is 'thing'.
|
| 35 |
+
#
|
| 36 |
+
# 'trials': number of times 'try_solve' will try to isolate 'thing' to the
|
| 37 |
+
# left-hand side.
|
| 38 |
+
#
|
| 39 |
+
# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into
|
| 40 |
+
# inequalities.
|
| 41 |
+
def try_solve(
|
| 42 |
+
expr: sympy.Basic,
|
| 43 |
+
thing: sympy.Basic,
|
| 44 |
+
trials: int = 5,
|
| 45 |
+
floordiv_inequality: bool = True,
|
| 46 |
+
) -> Optional[Tuple[sympy.Rel, sympy.Basic]]:
|
| 47 |
+
mirror = mirror_rel_op(type(expr))
|
| 48 |
+
|
| 49 |
+
# Ignore unsupported expressions:
|
| 50 |
+
# - Those that are not relational operations
|
| 51 |
+
# - Those that don't have a mirror (just avoiding unexpected classes)
|
| 52 |
+
if not isinstance(expr, sympy.Rel) or mirror is None:
|
| 53 |
+
log.debug("expression with unsupported type: %s", type(expr))
|
| 54 |
+
return None
|
| 55 |
+
|
| 56 |
+
lhs_has_thing = expr.lhs.has(thing)
|
| 57 |
+
rhs_has_thing = expr.rhs.has(thing)
|
| 58 |
+
|
| 59 |
+
# Give up when 'thing' appears on both sides of the relational expression.
|
| 60 |
+
# That is because, as is, we assume the thing we are trying to isolate is
|
| 61 |
+
# only on the right-hand side.
|
| 62 |
+
if lhs_has_thing and rhs_has_thing:
|
| 63 |
+
log.debug("thing (%s) found in both sides of expression: %s", thing, expr)
|
| 64 |
+
return None
|
| 65 |
+
|
| 66 |
+
# Try considering both LHS and RHS by mirroring the original expression:
|
| 67 |
+
# a < b ==> b > a
|
| 68 |
+
expressions = []
|
| 69 |
+
|
| 70 |
+
# Add each version of 'expr' if 'thing' is in its left-hand side.
|
| 71 |
+
if lhs_has_thing:
|
| 72 |
+
expressions.append(expr)
|
| 73 |
+
if rhs_has_thing:
|
| 74 |
+
expressions.append(mirror(expr.rhs, expr.lhs))
|
| 75 |
+
|
| 76 |
+
for e in expressions:
|
| 77 |
+
if e is None:
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
assert isinstance(e, sympy.Rel)
|
| 81 |
+
|
| 82 |
+
for _ in range(trials):
|
| 83 |
+
trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality)
|
| 84 |
+
# Stop if there was no change in this trial.
|
| 85 |
+
if trial == e:
|
| 86 |
+
break
|
| 87 |
+
e = trial # type: ignore[assignment]
|
| 88 |
+
|
| 89 |
+
# Return if we were able to isolate 'thing' on the left-hand side.
|
| 90 |
+
if isinstance(e, sympy.Rel) and e.lhs == thing:
|
| 91 |
+
return e, e.rhs
|
| 92 |
+
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _try_isolate_lhs(
|
| 97 |
+
expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool
|
| 98 |
+
) -> sympy.Basic:
|
| 99 |
+
e = expr
|
| 100 |
+
op = type(expr)
|
| 101 |
+
|
| 102 |
+
if isinstance(e, sympy.Rel):
|
| 103 |
+
# Move any constants in the left-hand side to the right-hand side.
|
| 104 |
+
lhs_not_thing = (
|
| 105 |
+
sum([a for a in e.lhs.args if not a.has(thing)])
|
| 106 |
+
if isinstance(e.lhs, sympy.Add)
|
| 107 |
+
else 0
|
| 108 |
+
)
|
| 109 |
+
e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined]
|
| 110 |
+
|
| 111 |
+
# Divide both sides by the factors that don't contain thing.
|
| 112 |
+
if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul):
|
| 113 |
+
lhs, rhs = e.args
|
| 114 |
+
other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)])
|
| 115 |
+
|
| 116 |
+
# If we can't tell whether 'other' is negative or positive, we do nothing.
|
| 117 |
+
# That is because we don't know whether we have mirror the operation or not.
|
| 118 |
+
if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None):
|
| 119 |
+
# Divide both sides by 'other'.
|
| 120 |
+
lhs = lhs / other
|
| 121 |
+
rhs = rhs / other
|
| 122 |
+
|
| 123 |
+
# If 'e' is an inequality and 'other' is negative, we have to
|
| 124 |
+
# mirror the expression.
|
| 125 |
+
if isinstance(e, INEQUALITY_TYPES) and other.is_negative:
|
| 126 |
+
op = mirror_rel_op(op) # type: ignore[assignment]
|
| 127 |
+
|
| 128 |
+
assert op is not None
|
| 129 |
+
e = op(lhs, rhs)
|
| 130 |
+
|
| 131 |
+
################################################################################
|
| 132 |
+
# left-hand side is FloorDiv
|
| 133 |
+
################################################################################
|
| 134 |
+
#
|
| 135 |
+
# Given the expression: a // b op c
|
| 136 |
+
# where 'op' is a relational operation, these rules only work if:
|
| 137 |
+
# - b > 0
|
| 138 |
+
# - c is an integer
|
| 139 |
+
if (
|
| 140 |
+
floordiv_inequality
|
| 141 |
+
and isinstance(e, sympy.Rel)
|
| 142 |
+
and isinstance(e.lhs, FloorDiv)
|
| 143 |
+
and e.lhs.divisor.is_positive
|
| 144 |
+
and e.rhs.is_integer
|
| 145 |
+
):
|
| 146 |
+
# a // b == expr
|
| 147 |
+
# => a >= (b * expr) and a < (b * (expr + 1))
|
| 148 |
+
if isinstance(expr, sympy.Eq):
|
| 149 |
+
numerator, denominator = e.lhs.args
|
| 150 |
+
return sympy.And(
|
| 151 |
+
sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
|
| 152 |
+
sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
|
| 153 |
+
)
|
| 154 |
+
# a // b != expr
|
| 155 |
+
# => a < (b * expr) or a >= (b * (expr + 1))
|
| 156 |
+
if isinstance(expr, sympy.Ne):
|
| 157 |
+
numerator, denominator = e.lhs.args
|
| 158 |
+
return sympy.Or(
|
| 159 |
+
sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
|
| 160 |
+
sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
|
| 161 |
+
)
|
| 162 |
+
# The transformations below only work if b is positive.
|
| 163 |
+
# Note: we only have this information for constants.
|
| 164 |
+
# a // b > expr => a >= b * (expr + 1)
|
| 165 |
+
# a // b >= expr => a >= b * expr
|
| 166 |
+
if isinstance(expr, (sympy.Gt, sympy.Ge)):
|
| 167 |
+
quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type]
|
| 168 |
+
return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
|
| 169 |
+
# a // b < expr => a < b * expr
|
| 170 |
+
# a // b <= expr => a < b * (expr + 1)
|
| 171 |
+
if isinstance(expr, (sympy.Lt, sympy.Le)):
|
| 172 |
+
quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type]
|
| 173 |
+
return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
|
| 174 |
+
|
| 175 |
+
return e
|
moondream/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py
ADDED
|
@@ -0,0 +1,782 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import dataclasses
|
| 4 |
+
import itertools
|
| 5 |
+
import sympy
|
| 6 |
+
from sympy.logic.boolalg import BooleanAtom, Boolean as SympyBoolean
|
| 7 |
+
import operator
|
| 8 |
+
import math
|
| 9 |
+
import logging
|
| 10 |
+
import torch
|
| 11 |
+
from typing import Dict, Optional, SupportsFloat, TypeVar, Generic, Union, overload, Callable, TYPE_CHECKING
|
| 12 |
+
from typing_extensions import TypeGuard
|
| 13 |
+
|
| 14 |
+
from torch._prims_common import dtype_to_type
|
| 15 |
+
from .interp import sympy_interp
|
| 16 |
+
from .functions import Round, RoundDecimal
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
__all__ = ["ValueRanges", "ValueRangeAnalysis", "bound_sympy"]
|
| 21 |
+
|
| 22 |
+
_T = TypeVar('_T', sympy.Expr, SympyBoolean)
|
| 23 |
+
|
| 24 |
+
class ValueRangeError(RuntimeError):
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Like sympify, but supports less stuff, and also ensures that direct
|
| 29 |
+
# sympy expressions don't have free variables
|
| 30 |
+
def simple_sympify(e):
|
| 31 |
+
if isinstance(e, bool):
|
| 32 |
+
return sympy.true if e else sympy.false
|
| 33 |
+
elif isinstance(e, int):
|
| 34 |
+
return sympy.Integer(e)
|
| 35 |
+
elif isinstance(e, float):
|
| 36 |
+
# infinity is special; we use it to bracket integers as well
|
| 37 |
+
if math.isinf(e):
|
| 38 |
+
return sympy.oo if e > 0 else -sympy.oo
|
| 39 |
+
return sympy.Float(e)
|
| 40 |
+
elif isinstance(e, sympy.Expr):
|
| 41 |
+
assert e.is_number, e
|
| 42 |
+
# NaNs can occur when doing things like 0 * sympy.oo, but it is better
|
| 43 |
+
# if the operator notices this and takes care of it, because sometimes
|
| 44 |
+
# the NaN is inappropriate (for example, for ints, the [-oo, oo] range
|
| 45 |
+
# should go to zero when multiplied with [0, 0])
|
| 46 |
+
assert e != sympy.nan
|
| 47 |
+
return e
|
| 48 |
+
elif isinstance(e, BooleanAtom):
|
| 49 |
+
return e
|
| 50 |
+
else:
|
| 51 |
+
raise AssertionError(f"not simple sympy type {type(e)}: {e}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Sympy atomics only. Unlike <=, it also works on Sympy bools.
|
| 55 |
+
def sympy_generic_le(lower, upper):
|
| 56 |
+
if isinstance(lower, sympy.Expr):
|
| 57 |
+
assert isinstance(upper, sympy.Expr)
|
| 58 |
+
return lower <= upper
|
| 59 |
+
else:
|
| 60 |
+
# only negative condition is True > False
|
| 61 |
+
assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean)
|
| 62 |
+
return not (lower and not upper)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]:
|
| 66 |
+
return vr.is_bool
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]:
|
| 70 |
+
return not vr.is_bool
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
ExprIn = Union[int, float, sympy.Expr]
|
| 74 |
+
BoolIn = Union[bool, SympyBoolean]
|
| 75 |
+
AllIn = Union[ExprIn, BoolIn]
|
| 76 |
+
ExprFn = Callable[[sympy.Expr], sympy.Expr]
|
| 77 |
+
ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr]
|
| 78 |
+
BoolFn = Callable[[SympyBoolean], SympyBoolean]
|
| 79 |
+
BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean]
|
| 80 |
+
AllFn = Union[ExprFn, BoolFn]
|
| 81 |
+
AllFn2 = Union[ExprFn2, BoolFn2]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@dataclasses.dataclass(frozen=True)
|
| 85 |
+
class ValueRanges(Generic[_T]):
|
| 86 |
+
if TYPE_CHECKING:
|
| 87 |
+
# ruff doesn't understand circular references but mypy does
|
| 88 |
+
ExprVR = ValueRanges[sympy.Expr] # noqa: F821
|
| 89 |
+
BoolVR = ValueRanges[SympyBoolean] # noqa: F821
|
| 90 |
+
AllVR = Union[ExprVR, BoolVR]
|
| 91 |
+
|
| 92 |
+
# Although the type signature here suggests you can pass any
|
| 93 |
+
# sympy expression, in practice the analysis here only works
|
| 94 |
+
# with constant sympy expressions
|
| 95 |
+
lower: _T
|
| 96 |
+
upper: _T
|
| 97 |
+
is_bool: bool
|
| 98 |
+
|
| 99 |
+
@overload
|
| 100 |
+
def __init__(self: ValueRanges[sympy.Expr], lower: ExprIn, upper: ExprIn) -> None:
|
| 101 |
+
...
|
| 102 |
+
|
| 103 |
+
@overload
|
| 104 |
+
def __init__(self: ValueRanges[SympyBoolean], lower: BoolIn, upper: BoolIn) -> None:
|
| 105 |
+
...
|
| 106 |
+
|
| 107 |
+
def __init__(self, lower: AllIn, upper: AllIn) -> None:
|
| 108 |
+
lower = simple_sympify(lower)
|
| 109 |
+
upper = simple_sympify(upper)
|
| 110 |
+
# TODO: when the bounds have free variables, this may be
|
| 111 |
+
# nontrivial to actually verify
|
| 112 |
+
if not sympy_generic_le(lower, upper):
|
| 113 |
+
raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]")
|
| 114 |
+
# Because this is a frozen class
|
| 115 |
+
object.__setattr__(self, "lower", lower)
|
| 116 |
+
object.__setattr__(self, "upper", upper)
|
| 117 |
+
object.__setattr__(self, "is_bool", isinstance(lower, SympyBoolean))
|
| 118 |
+
assert isinstance(upper, SympyBoolean) == self.is_bool
|
| 119 |
+
|
| 120 |
+
def boolify(self) -> ValueRanges[SympyBoolean]:
|
| 121 |
+
if vr_is_bool(self):
|
| 122 |
+
return self
|
| 123 |
+
elif self == ValueRanges.unknown():
|
| 124 |
+
return ValueRanges.unknown_bool()
|
| 125 |
+
else:
|
| 126 |
+
raise AssertionError(f"not bool like {self}")
|
| 127 |
+
|
| 128 |
+
def __contains__(self, x: AllIn) -> bool:
|
| 129 |
+
x = simple_sympify(x)
|
| 130 |
+
return sympy_generic_le(self.lower, x) and sympy_generic_le(x, self.upper)
|
| 131 |
+
|
| 132 |
+
def issubset(self, other):
|
| 133 |
+
return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(self.upper, other.upper)
|
| 134 |
+
|
| 135 |
+
def tighten(self, other) -> ValueRanges:
|
| 136 |
+
"""Given two ValueRanges, returns their intersection"""
|
| 137 |
+
return self & other
|
| 138 |
+
|
| 139 |
+
# Intersection
|
| 140 |
+
@overload
|
| 141 |
+
def __and__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]:
|
| 142 |
+
...
|
| 143 |
+
|
| 144 |
+
@overload
|
| 145 |
+
def __and__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]:
|
| 146 |
+
...
|
| 147 |
+
|
| 148 |
+
def __and__(self: AllVR, other: AllVR) -> AllVR:
|
| 149 |
+
if other == ValueRanges.unknown():
|
| 150 |
+
return self
|
| 151 |
+
if self == ValueRanges.unknown():
|
| 152 |
+
return other
|
| 153 |
+
assert self.is_bool == other.is_bool, (self, other)
|
| 154 |
+
if self.is_bool:
|
| 155 |
+
return ValueRanges(sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper))
|
| 156 |
+
else:
|
| 157 |
+
return ValueRanges(sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper))
|
| 158 |
+
|
| 159 |
+
# Union
|
| 160 |
+
@overload
|
| 161 |
+
def __or__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]:
|
| 162 |
+
...
|
| 163 |
+
|
| 164 |
+
@overload
|
| 165 |
+
def __or__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]:
|
| 166 |
+
...
|
| 167 |
+
|
| 168 |
+
def __or__(self: AllVR, other: AllVR) -> AllVR:
|
| 169 |
+
if ValueRanges.unknown() in (self, other):
|
| 170 |
+
return ValueRanges.unknown()
|
| 171 |
+
assert self.is_bool == other.is_bool, (self, other)
|
| 172 |
+
if self.is_bool:
|
| 173 |
+
return ValueRanges(sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper))
|
| 174 |
+
else:
|
| 175 |
+
return ValueRanges(sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper))
|
| 176 |
+
|
| 177 |
+
def is_singleton(self) -> bool:
|
| 178 |
+
return self.lower == self.upper
|
| 179 |
+
|
| 180 |
+
# TODO: this doesn't work with bools but arguably it should
|
| 181 |
+
@staticmethod
|
| 182 |
+
def unknown() -> ValueRanges[sympy.Expr]:
|
| 183 |
+
return ValueRanges(-sympy.oo, sympy.oo)
|
| 184 |
+
|
| 185 |
+
@staticmethod
|
| 186 |
+
def unknown_bool() -> ValueRanges[SympyBoolean]:
|
| 187 |
+
return ValueRanges(sympy.false, sympy.true)
|
| 188 |
+
|
| 189 |
+
@overload
|
| 190 |
+
@staticmethod
|
| 191 |
+
# work around the fact that bool and int overlap
|
| 192 |
+
def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap]
|
| 193 |
+
...
|
| 194 |
+
|
| 195 |
+
@overload
|
| 196 |
+
@staticmethod
|
| 197 |
+
def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR:
|
| 198 |
+
...
|
| 199 |
+
|
| 200 |
+
@staticmethod
|
| 201 |
+
def wrap(arg: Union[AllIn, AllVR]) -> AllVR:
|
| 202 |
+
if isinstance(arg, ValueRanges):
|
| 203 |
+
return arg
|
| 204 |
+
# arg is either ExprIn or BoolIn, but we don't know it here
|
| 205 |
+
return ValueRanges(arg, arg) # type: ignore[arg-type]
|
| 206 |
+
|
| 207 |
+
@staticmethod
|
| 208 |
+
def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
| 209 |
+
"""Increasing: x <= y => f(x) <= f(y)."""
|
| 210 |
+
x = ValueRanges.wrap(x)
|
| 211 |
+
return ValueRanges(fn(x.lower), fn(x.upper))
|
| 212 |
+
|
| 213 |
+
@overload
|
| 214 |
+
@staticmethod
|
| 215 |
+
def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
| 216 |
+
...
|
| 217 |
+
|
| 218 |
+
@overload
|
| 219 |
+
@staticmethod
|
| 220 |
+
def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR:
|
| 221 |
+
...
|
| 222 |
+
|
| 223 |
+
@staticmethod
|
| 224 |
+
def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR:
|
| 225 |
+
"""Decreasing: x <= y => f(x) >= f(y)."""
|
| 226 |
+
x = ValueRanges.wrap(x)
|
| 227 |
+
# consistently either Expr or Bool, but we don't know it here
|
| 228 |
+
return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type]
|
| 229 |
+
|
| 230 |
+
@staticmethod
|
| 231 |
+
def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
| 232 |
+
"""It's increasing or decreasing."""
|
| 233 |
+
x = ValueRanges.wrap(x)
|
| 234 |
+
l = fn(x.lower)
|
| 235 |
+
u = fn(x.upper)
|
| 236 |
+
return ValueRanges(min(l, u), max(l, u))
|
| 237 |
+
|
| 238 |
+
@staticmethod
|
| 239 |
+
def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
| 240 |
+
"""Fn is convex and has a minimum at 0."""
|
| 241 |
+
x = ValueRanges.wrap(x)
|
| 242 |
+
if 0 in x:
|
| 243 |
+
return ValueRanges(0, max(fn(x.lower), fn(x.upper)))
|
| 244 |
+
else:
|
| 245 |
+
return ValueRanges.monotone_map(x, fn)
|
| 246 |
+
|
| 247 |
+
@overload
|
| 248 |
+
@staticmethod
|
| 249 |
+
def coordinatewise_increasing_map(x: Union[ExprIn, ExprVR], y: Union[ExprIn, ExprVR], fn: ExprFn2) -> ExprVR:
|
| 250 |
+
...
|
| 251 |
+
|
| 252 |
+
@overload
|
| 253 |
+
@staticmethod
|
| 254 |
+
def coordinatewise_increasing_map(x: Union[BoolIn, BoolVR], y: Union[BoolIn, BoolVR], fn: BoolFn2) -> BoolVR:
|
| 255 |
+
...
|
| 256 |
+
|
| 257 |
+
@staticmethod
|
| 258 |
+
def coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR:
|
| 259 |
+
"""
|
| 260 |
+
It's increasing on each coordinate.
|
| 261 |
+
|
| 262 |
+
Mathematically:
|
| 263 |
+
For every 1 <= i <= n and x_i <= y_i we have that
|
| 264 |
+
f(x1, .., xn) <= f(x1, , yi, ..., xn)
|
| 265 |
+
"""
|
| 266 |
+
x, y = ValueRanges.wrap(x), ValueRanges.wrap(y)
|
| 267 |
+
return ValueRanges(
|
| 268 |
+
fn(x.lower, y.lower), # type: ignore[arg-type]
|
| 269 |
+
fn(x.upper, y.upper), # type: ignore[arg-type]
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
@classmethod
|
| 273 |
+
def coordinatewise_monotone_map(cls, x, y, fn):
|
| 274 |
+
"""It's increasing or decreasing on each coordinate."""
|
| 275 |
+
x, y = cls.wrap(x), cls.wrap(y)
|
| 276 |
+
products = [
|
| 277 |
+
fn(a, b)
|
| 278 |
+
for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])
|
| 279 |
+
]
|
| 280 |
+
return ValueRanges(min(products), max(products))
|
| 281 |
+
|
| 282 |
+
class SymPyValueRangeAnalysis:
|
| 283 |
+
"""
|
| 284 |
+
It gives bounds on a SymPy operator given bounds on its arguments
|
| 285 |
+
See the function `bound_sympy` for a function that applies this logic to a full SymPy expression
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
@staticmethod
|
| 289 |
+
def constant(value, dtype):
|
| 290 |
+
# NB: value is NOT a sympy expression, it's a constant!
|
| 291 |
+
is_python = isinstance(value, (int, float, bool))
|
| 292 |
+
assert is_python or isinstance(value, (BooleanAtom, sympy.Integer, sympy.Number))
|
| 293 |
+
|
| 294 |
+
# using nan makes subsequent computation throw, and for the purposes of optimization
|
| 295 |
+
# returning -math.inf - math.inf is equivalent to giving up
|
| 296 |
+
if isinstance(value, SupportsFloat) and math.isnan(value):
|
| 297 |
+
return ValueRanges.unknown()
|
| 298 |
+
|
| 299 |
+
if is_python:
|
| 300 |
+
type_ = dtype_to_type(dtype)
|
| 301 |
+
value = type_(value)
|
| 302 |
+
else:
|
| 303 |
+
# We do a type check on a best-effort basis
|
| 304 |
+
# We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision
|
| 305 |
+
if dtype == torch.bool:
|
| 306 |
+
assert isinstance(value, BooleanAtom)
|
| 307 |
+
elif dtype.is_floating_point:
|
| 308 |
+
assert not value.is_finite or value.is_real
|
| 309 |
+
else:
|
| 310 |
+
# dtype is intXX
|
| 311 |
+
assert value.is_integer
|
| 312 |
+
|
| 313 |
+
return ValueRanges.wrap(value)
|
| 314 |
+
|
| 315 |
+
@staticmethod
|
| 316 |
+
def not_(a):
|
| 317 |
+
a = ValueRanges.wrap(a)
|
| 318 |
+
a = a.boolify()
|
| 319 |
+
assert a.is_bool
|
| 320 |
+
return ValueRanges.decreasing_map(a, sympy.Not)
|
| 321 |
+
|
| 322 |
+
@staticmethod
|
| 323 |
+
def or_(a, b):
|
| 324 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or)
|
| 325 |
+
|
| 326 |
+
@staticmethod
|
| 327 |
+
def and_(a, b):
|
| 328 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And)
|
| 329 |
+
|
| 330 |
+
@staticmethod
|
| 331 |
+
def eq(a, b):
|
| 332 |
+
a = ValueRanges.wrap(a)
|
| 333 |
+
b = ValueRanges.wrap(b)
|
| 334 |
+
if a.is_singleton() and b.is_singleton() and a.lower == b.lower:
|
| 335 |
+
return ValueRanges.wrap(sympy.true)
|
| 336 |
+
elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint
|
| 337 |
+
return ValueRanges.wrap(sympy.false)
|
| 338 |
+
return ValueRanges(sympy.false, sympy.true)
|
| 339 |
+
|
| 340 |
+
@classmethod
|
| 341 |
+
def ne(cls, a, b):
|
| 342 |
+
return cls.not_(cls.eq(a, b))
|
| 343 |
+
|
| 344 |
+
@classmethod
|
| 345 |
+
def lt(cls, a, b):
|
| 346 |
+
a = ValueRanges.wrap(a)
|
| 347 |
+
b = ValueRanges.wrap(b)
|
| 348 |
+
assert a.is_bool == b.is_bool
|
| 349 |
+
if a.is_bool:
|
| 350 |
+
return cls.and_(cls.not_(a), b)
|
| 351 |
+
else:
|
| 352 |
+
if a.upper < b.lower:
|
| 353 |
+
return ValueRanges.wrap(sympy.true)
|
| 354 |
+
elif a.lower >= b.upper:
|
| 355 |
+
return ValueRanges.wrap(sympy.false)
|
| 356 |
+
return ValueRanges(sympy.false, sympy.true)
|
| 357 |
+
|
| 358 |
+
@classmethod
|
| 359 |
+
def gt(cls, a, b):
|
| 360 |
+
return cls.lt(b, a)
|
| 361 |
+
|
| 362 |
+
@classmethod
|
| 363 |
+
def le(cls, a, b):
|
| 364 |
+
return cls.not_(cls.gt(a, b))
|
| 365 |
+
|
| 366 |
+
@classmethod
|
| 367 |
+
def ge(cls, a, b):
|
| 368 |
+
return cls.not_(cls.lt(a, b))
|
| 369 |
+
|
| 370 |
+
@staticmethod
|
| 371 |
+
def add(a, b):
|
| 372 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, operator.add)
|
| 373 |
+
|
| 374 |
+
@classmethod
|
| 375 |
+
def mul(cls, a, b):
|
| 376 |
+
a = ValueRanges.wrap(a)
|
| 377 |
+
b = ValueRanges.wrap(b)
|
| 378 |
+
|
| 379 |
+
assert a.is_bool == b.is_bool
|
| 380 |
+
if a.is_bool:
|
| 381 |
+
return cls.and_(a, b)
|
| 382 |
+
|
| 383 |
+
def safe_mul(a, b):
|
| 384 |
+
# Make unknown() * wrap(0) == wrap(0)
|
| 385 |
+
if a == 0:
|
| 386 |
+
return a
|
| 387 |
+
elif b == 0:
|
| 388 |
+
return b
|
| 389 |
+
else:
|
| 390 |
+
return a * b
|
| 391 |
+
|
| 392 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, safe_mul)
|
| 393 |
+
|
| 394 |
+
@classmethod
|
| 395 |
+
def div(cls, a, b):
|
| 396 |
+
return cls.truediv(a, b)
|
| 397 |
+
|
| 398 |
+
@staticmethod
|
| 399 |
+
def truediv(a, b):
|
| 400 |
+
a = ValueRanges.wrap(a)
|
| 401 |
+
b = ValueRanges.wrap(b)
|
| 402 |
+
if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)):
|
| 403 |
+
return ValueRanges.unknown()
|
| 404 |
+
else:
|
| 405 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, operator.truediv)
|
| 406 |
+
|
| 407 |
+
@staticmethod
|
| 408 |
+
def floordiv(a, b):
|
| 409 |
+
a = ValueRanges.wrap(a)
|
| 410 |
+
b = ValueRanges.wrap(b)
|
| 411 |
+
if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)):
|
| 412 |
+
return ValueRanges.unknown()
|
| 413 |
+
else:
|
| 414 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, operator.floordiv)
|
| 415 |
+
|
| 416 |
+
@staticmethod
|
| 417 |
+
def mod(x, y):
|
| 418 |
+
x = ValueRanges.wrap(x)
|
| 419 |
+
y = ValueRanges.wrap(y)
|
| 420 |
+
if x.is_singleton() and y.is_singleton() and y.lower != 0:
|
| 421 |
+
return ValueRanges.wrap(x.lower % y.lower)
|
| 422 |
+
if y.lower <= 0:
|
| 423 |
+
return ValueRanges.unknown()
|
| 424 |
+
return ValueRanges(0, y.upper)
|
| 425 |
+
|
| 426 |
+
@classmethod
|
| 427 |
+
def modular_indexing(cls, a, b, c):
|
| 428 |
+
return cls.mod(cls.floordiv(a, b), c)
|
| 429 |
+
|
| 430 |
+
@classmethod
|
| 431 |
+
def is_non_overlapping_and_dense_indicator(cls, *args):
|
| 432 |
+
return ValueRanges.unknown()
|
| 433 |
+
|
| 434 |
+
@classmethod
|
| 435 |
+
def pow(cls, a, b):
|
| 436 |
+
def is_integer(val):
|
| 437 |
+
return isinstance(val, int) or (
|
| 438 |
+
hasattr(val, "is_integer") and val.is_integer
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
a = ValueRanges.wrap(a)
|
| 442 |
+
b = ValueRanges.wrap(b)
|
| 443 |
+
# Not implemented yet. It's a bit tricky
|
| 444 |
+
# If you want to implement it, compute the partial derivatives of a ** b
|
| 445 |
+
# and check the ranges where the function is increasing / decreasing
|
| 446 |
+
# Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a))
|
| 447 |
+
# If this second option is implemented, by carefult about the types and possible infinities here and there.
|
| 448 |
+
if not b.is_singleton():
|
| 449 |
+
return ValueRanges.unknown()
|
| 450 |
+
|
| 451 |
+
b = b.lower
|
| 452 |
+
if a.is_singleton():
|
| 453 |
+
a = a.lower
|
| 454 |
+
r = a ** b
|
| 455 |
+
if not r.is_finite:
|
| 456 |
+
return ValueRanges.unknown()
|
| 457 |
+
return ValueRanges.wrap(r)
|
| 458 |
+
|
| 459 |
+
if b == 0:
|
| 460 |
+
if not a.lower.is_finite:
|
| 461 |
+
return ValueRanges.unknown()
|
| 462 |
+
type_ = sympy.Float if a.lower.is_real else sympy.Integer
|
| 463 |
+
return ValueRanges.wrap(type_(1))
|
| 464 |
+
|
| 465 |
+
if b < 0:
|
| 466 |
+
a = cls.reciprocal(a)
|
| 467 |
+
b = -b
|
| 468 |
+
|
| 469 |
+
if a == ValueRanges.unknown():
|
| 470 |
+
return ValueRanges.unknown()
|
| 471 |
+
|
| 472 |
+
# Here b > 0
|
| 473 |
+
if not is_integer(b):
|
| 474 |
+
# If the base is positive, then we're good, otherwise nothing's defined
|
| 475 |
+
if a.lower >= 0:
|
| 476 |
+
return ValueRanges.increasing_map(a, lambda x: x ** b)
|
| 477 |
+
else:
|
| 478 |
+
return ValueRanges.unknown()
|
| 479 |
+
else:
|
| 480 |
+
# b > 0 integer
|
| 481 |
+
if b % 2 == 0:
|
| 482 |
+
# x^n where n is even
|
| 483 |
+
return ValueRanges.convex_min_zero_map(a, lambda x: x ** b)
|
| 484 |
+
else:
|
| 485 |
+
# x^n where n is odd
|
| 486 |
+
return ValueRanges.increasing_map(a, lambda x: x ** b)
|
| 487 |
+
|
| 488 |
+
@staticmethod
|
| 489 |
+
def reciprocal(x):
|
| 490 |
+
""" Needed as it's used in pow, but it won't appear on a SymPy expression """
|
| 491 |
+
x = ValueRanges.wrap(x)
|
| 492 |
+
if 0 in x:
|
| 493 |
+
return ValueRanges.unknown()
|
| 494 |
+
else:
|
| 495 |
+
return ValueRanges.decreasing_map(x, lambda y: 1 / y)
|
| 496 |
+
|
| 497 |
+
@staticmethod
|
| 498 |
+
def abs(x):
|
| 499 |
+
return ValueRanges.convex_min_zero_map(x, abs)
|
| 500 |
+
|
| 501 |
+
@staticmethod
|
| 502 |
+
def exp(x):
|
| 503 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.exponential.exp)
|
| 504 |
+
|
| 505 |
+
@staticmethod
|
| 506 |
+
def log(x):
|
| 507 |
+
x = ValueRanges.wrap(x)
|
| 508 |
+
if x.lower <= 0:
|
| 509 |
+
return ValueRanges.unknown()
|
| 510 |
+
return ValueRanges.increasing_map(x, sympy.log)
|
| 511 |
+
|
| 512 |
+
@classmethod
|
| 513 |
+
def minimum(cls, a, b):
|
| 514 |
+
return cls.min_or_max(a, b, sympy.Min)
|
| 515 |
+
|
| 516 |
+
@classmethod
|
| 517 |
+
def maximum(cls, a, b):
|
| 518 |
+
return cls.min_or_max(a, b, sympy.Max)
|
| 519 |
+
|
| 520 |
+
@staticmethod
|
| 521 |
+
def min_or_max(a, b, fn):
|
| 522 |
+
a = ValueRanges.wrap(a)
|
| 523 |
+
b = ValueRanges.wrap(b)
|
| 524 |
+
|
| 525 |
+
# Performs upcasting first
|
| 526 |
+
def fn_(x: sympy.Expr, y: sympy.Expr) -> sympy.Expr:
|
| 527 |
+
# Poorman's version of upcasting in Sympy
|
| 528 |
+
# Inf is not a float...
|
| 529 |
+
if x.is_Integer and y.is_Integer:
|
| 530 |
+
result_type = sympy.Integer
|
| 531 |
+
elif x.is_rational and y.is_rational:
|
| 532 |
+
result_type = sympy.Rational
|
| 533 |
+
else:
|
| 534 |
+
assert x.is_real or not x.is_finite or y.is_real or not y.is_finite
|
| 535 |
+
result_type = sympy.Float
|
| 536 |
+
return fn(result_type(x), result_type(y))
|
| 537 |
+
|
| 538 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, fn_)
|
| 539 |
+
|
| 540 |
+
@classmethod
|
| 541 |
+
def floor(cls, x):
|
| 542 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor)
|
| 543 |
+
|
| 544 |
+
@classmethod
|
| 545 |
+
def ceil(cls, x):
|
| 546 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.ceiling)
|
| 547 |
+
|
| 548 |
+
@classmethod
|
| 549 |
+
def round(cls, number, ndigits=None):
|
| 550 |
+
if ndigits is None:
|
| 551 |
+
fn = Round
|
| 552 |
+
else:
|
| 553 |
+
assert ndigits.is_singleton()
|
| 554 |
+
ndigits = ndigits.lower
|
| 555 |
+
# We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind
|
| 556 |
+
# the second parameter.
|
| 557 |
+
fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731
|
| 558 |
+
|
| 559 |
+
return ValueRanges.increasing_map(number, fn)
|
| 560 |
+
|
| 561 |
+
# It's used in some models on symints
|
| 562 |
+
@staticmethod
|
| 563 |
+
def sqrt(x):
|
| 564 |
+
x = ValueRanges.wrap(x)
|
| 565 |
+
if x.lower < 0:
|
| 566 |
+
return ValueRanges.unknown()
|
| 567 |
+
return ValueRanges.increasing_map(x, sympy.sqrt)
|
| 568 |
+
|
| 569 |
+
@staticmethod
|
| 570 |
+
def where(a, b, c):
|
| 571 |
+
b = ValueRanges.wrap(b)
|
| 572 |
+
c = ValueRanges.wrap(c)
|
| 573 |
+
a = a.boolify()
|
| 574 |
+
assert b.is_bool == c.is_bool
|
| 575 |
+
if b.is_bool:
|
| 576 |
+
return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper))
|
| 577 |
+
else:
|
| 578 |
+
return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper))
|
| 579 |
+
|
| 580 |
+
# expr_cond_pair is used to represent a single (expr, condition) pair in piecewise.
|
| 581 |
+
# We just return the value range of the expression and its corresponding condition as a tuple
|
| 582 |
+
# and defer the analysis to piecewise
|
| 583 |
+
@staticmethod
|
| 584 |
+
def expr_cond_pair(a, b):
|
| 585 |
+
b = b.boolify()
|
| 586 |
+
return (a, b)
|
| 587 |
+
|
| 588 |
+
# piecewise function can be used to convert a SymBool to SymInt:
|
| 589 |
+
# int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise.
|
| 590 |
+
#
|
| 591 |
+
# ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair.
|
| 592 |
+
# The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True.
|
| 593 |
+
@staticmethod
|
| 594 |
+
def piecewise(*ranges):
|
| 595 |
+
init_range = None
|
| 596 |
+
for expr_range, cond_range in ranges:
|
| 597 |
+
if sympy.true in cond_range:
|
| 598 |
+
if init_range is None:
|
| 599 |
+
init_range = expr_range
|
| 600 |
+
else:
|
| 601 |
+
init_range = init_range | expr_range
|
| 602 |
+
return init_range
|
| 603 |
+
|
| 604 |
+
@staticmethod
|
| 605 |
+
def cos(x):
|
| 606 |
+
# TODO: We should tighten value ranges
|
| 607 |
+
# If input range span is pi + 2*pi*k, then output range is (-1, 1)
|
| 608 |
+
# otherwise the minimum of the value of the function on the extremes
|
| 609 |
+
return ValueRanges(-1.0, 1.0)
|
| 610 |
+
|
| 611 |
+
@staticmethod
|
| 612 |
+
def cosh(x):
|
| 613 |
+
x = ValueRanges.wrap(x)
|
| 614 |
+
if x.lower > 0:
|
| 615 |
+
return ValueRanges.increasing_map(x, sympy.cosh)
|
| 616 |
+
elif x.upper < 0:
|
| 617 |
+
return ValueRanges.decreasing_map(x, sympy.cosh)
|
| 618 |
+
return ValueRanges(0.0, sympy.oo)
|
| 619 |
+
|
| 620 |
+
@staticmethod
|
| 621 |
+
def sin(x):
|
| 622 |
+
# TODO: We should tighten value ranges
|
| 623 |
+
# See details on cos
|
| 624 |
+
return ValueRanges(-1.0, 1.0)
|
| 625 |
+
|
| 626 |
+
@staticmethod
|
| 627 |
+
def sinh(x):
|
| 628 |
+
return ValueRanges.increasing_map(x, sympy.sinh)
|
| 629 |
+
|
| 630 |
+
@staticmethod
|
| 631 |
+
def tan(x):
|
| 632 |
+
return ValueRanges(-sympy.oo, sympy.oo)
|
| 633 |
+
|
| 634 |
+
@staticmethod
|
| 635 |
+
def tanh(x):
|
| 636 |
+
return ValueRanges.increasing_map(x, sympy.tanh)
|
| 637 |
+
|
| 638 |
+
@staticmethod
|
| 639 |
+
def asin(x):
|
| 640 |
+
x = ValueRanges.wrap(x)
|
| 641 |
+
if -1 <= x.lower and x.upper <= 1:
|
| 642 |
+
return ValueRanges.increasing_map(x, sympy.asin)
|
| 643 |
+
return ValueRanges.unknown()
|
| 644 |
+
|
| 645 |
+
@staticmethod
|
| 646 |
+
def acos(x):
|
| 647 |
+
x = ValueRanges.wrap(x)
|
| 648 |
+
if -1 <= x.lower and x.upper <= 1:
|
| 649 |
+
return ValueRanges.decreasing_map(x, sympy.acos)
|
| 650 |
+
return ValueRanges.unknown()
|
| 651 |
+
|
| 652 |
+
@staticmethod
|
| 653 |
+
def atan(x):
|
| 654 |
+
return ValueRanges.increasing_map(x, sympy.atan)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
class ValueRangeAnalysis(SymPyValueRangeAnalysis):
|
| 658 |
+
def __init__(self):
|
| 659 |
+
self.name = "ValueRangeAnalysis"
|
| 660 |
+
boolean_operators = (
|
| 661 |
+
"xor",
|
| 662 |
+
"logical_and",
|
| 663 |
+
"logical_or",
|
| 664 |
+
"logical_not",
|
| 665 |
+
)
|
| 666 |
+
for op in boolean_operators:
|
| 667 |
+
setattr(self, op, self.bool_handler)
|
| 668 |
+
|
| 669 |
+
@staticmethod
|
| 670 |
+
def bool_handler(*args, **kwargs):
|
| 671 |
+
# just assuming bools can have both values
|
| 672 |
+
return ValueRanges(sympy.false, sympy.true) # type: ignore[arg-type]
|
| 673 |
+
|
| 674 |
+
@staticmethod
|
| 675 |
+
def default_handler(*args, **kwargs):
|
| 676 |
+
# many ops are unlikely to show up in optimizable indexing compute,
|
| 677 |
+
# so we dont have full coverage
|
| 678 |
+
return ValueRanges.unknown()
|
| 679 |
+
|
| 680 |
+
def load(self, name: str, index: sympy.Expr):
|
| 681 |
+
return ValueRanges.unknown()
|
| 682 |
+
|
| 683 |
+
def store(self, name, index, value, mode=None):
|
| 684 |
+
return
|
| 685 |
+
|
| 686 |
+
def reduction(self, name, dtype, src_dtype, reduction_type, index, value):
|
| 687 |
+
return ValueRanges.unknown()
|
| 688 |
+
|
| 689 |
+
def index_expr(self, index, dtype):
|
| 690 |
+
assert isinstance(index, ValueRanges)
|
| 691 |
+
return index
|
| 692 |
+
|
| 693 |
+
@staticmethod
|
| 694 |
+
def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None):
|
| 695 |
+
x = ValueRanges.wrap(x)
|
| 696 |
+
|
| 697 |
+
if dtype == torch.bool:
|
| 698 |
+
if x.is_singleton():
|
| 699 |
+
return ValueRanges.wrap(x.lower != 0)
|
| 700 |
+
elif 0 not in x:
|
| 701 |
+
return ValueRanges.wrap(sympy.true)
|
| 702 |
+
else:
|
| 703 |
+
return ValueRanges(sympy.false, sympy.true)
|
| 704 |
+
|
| 705 |
+
def cast(x, dtype):
|
| 706 |
+
# dtype is int or float
|
| 707 |
+
if dtype.is_floating_point:
|
| 708 |
+
return sympy.Float(x)
|
| 709 |
+
else:
|
| 710 |
+
try:
|
| 711 |
+
return sympy.Integer(x)
|
| 712 |
+
except TypeError:
|
| 713 |
+
# inf cannot be cast to Integer
|
| 714 |
+
return x
|
| 715 |
+
|
| 716 |
+
if x.is_bool:
|
| 717 |
+
if x.is_singleton():
|
| 718 |
+
val = 1 if x.lower else 0
|
| 719 |
+
return ValueRanges.wrap(cast(val, dtype))
|
| 720 |
+
else:
|
| 721 |
+
return ValueRanges(cast(0, dtype), cast(1, dtype))
|
| 722 |
+
else:
|
| 723 |
+
# int to float or float to int
|
| 724 |
+
return ValueRanges(cast(x.lower, dtype), cast(x.upper, dtype))
|
| 725 |
+
|
| 726 |
+
@staticmethod
|
| 727 |
+
def square(x):
|
| 728 |
+
return ValueRanges.convex_min_zero_map(x, lambda y: y * y)
|
| 729 |
+
|
| 730 |
+
@staticmethod
|
| 731 |
+
def neg(x):
|
| 732 |
+
return ValueRanges.decreasing_map(x, operator.neg)
|
| 733 |
+
|
| 734 |
+
@classmethod
|
| 735 |
+
def truncdiv(cls, a, b):
|
| 736 |
+
x = cls.truediv(a, b)
|
| 737 |
+
if x == ValueRanges.unknown():
|
| 738 |
+
return x
|
| 739 |
+
|
| 740 |
+
def trunc(x):
|
| 741 |
+
return sympy.Integer(x) if x.is_finite else x
|
| 742 |
+
|
| 743 |
+
return ValueRanges.increasing_map(x, trunc)
|
| 744 |
+
|
| 745 |
+
@classmethod
|
| 746 |
+
def sub(cls, a, b):
|
| 747 |
+
return cls.add(a, cls.neg(b))
|
| 748 |
+
|
| 749 |
+
def __getattr__(self, name):
|
| 750 |
+
log.debug("unhandled ValueRange op %s", name)
|
| 751 |
+
return self.default_handler
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
def bound_sympy(expr: sympy.Expr, ranges: Optional[Dict[sympy.Symbol, ValueRanges]] = None) -> ValueRanges:
|
| 755 |
+
if isinstance(expr, sympy.Number):
|
| 756 |
+
return ValueRanges.wrap(expr)
|
| 757 |
+
|
| 758 |
+
ranges = ranges or {}
|
| 759 |
+
|
| 760 |
+
# If there's a tracing context, augment available constrained ranges.
|
| 761 |
+
context = torch._guards.TracingContext.try_get()
|
| 762 |
+
if context and context.fake_mode.shape_env:
|
| 763 |
+
ranges = {**context.fake_mode.shape_env.var_to_range, **ranges}
|
| 764 |
+
|
| 765 |
+
unbounded_vars = expr.free_symbols - ranges.keys()
|
| 766 |
+
if unbounded_vars:
|
| 767 |
+
# Give some bounds to the free variables via their SymPy assumptions
|
| 768 |
+
# TODO A better way of doing this would be to assign them a range upon creation, as
|
| 769 |
+
# size variables can come with a lower bound of 2, as we specialise on 0 and 1
|
| 770 |
+
unbounded_ranges: Dict[sympy.Symbol, ValueRanges] = {}
|
| 771 |
+
for s in unbounded_vars:
|
| 772 |
+
assert s.is_integer # type: ignore[attr-defined]
|
| 773 |
+
if s.is_positive: # type: ignore[attr-defined]
|
| 774 |
+
lower = 1
|
| 775 |
+
elif s.is_nonnegative: # type: ignore[attr-defined]
|
| 776 |
+
lower = 0
|
| 777 |
+
else:
|
| 778 |
+
lower = -math.inf # type: ignore[assignment]
|
| 779 |
+
unbounded_ranges[s] = ValueRanges(lower, math.inf) # type: ignore[index]
|
| 780 |
+
ranges = {**ranges, **unbounded_ranges}
|
| 781 |
+
|
| 782 |
+
return sympy_interp(SymPyValueRangeAnalysis, ranges, expr)
|
moondream/lib/python3.10/site-packages/torch/utils/data/__init__.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TODO(VitalyFedyunin): Rearranging this imports leads to crash,
|
| 2 |
+
# need to cleanup dependencies and fix it
|
| 3 |
+
from torch.utils.data.sampler import (
|
| 4 |
+
BatchSampler,
|
| 5 |
+
RandomSampler,
|
| 6 |
+
Sampler,
|
| 7 |
+
SequentialSampler,
|
| 8 |
+
SubsetRandomSampler,
|
| 9 |
+
WeightedRandomSampler,
|
| 10 |
+
)
|
| 11 |
+
from torch.utils.data.dataset import (
|
| 12 |
+
ChainDataset,
|
| 13 |
+
ConcatDataset,
|
| 14 |
+
Dataset,
|
| 15 |
+
IterableDataset,
|
| 16 |
+
StackDataset,
|
| 17 |
+
Subset,
|
| 18 |
+
TensorDataset,
|
| 19 |
+
random_split,
|
| 20 |
+
)
|
| 21 |
+
from torch.utils.data.datapipes.datapipe import (
|
| 22 |
+
DFIterDataPipe,
|
| 23 |
+
DataChunk,
|
| 24 |
+
IterDataPipe,
|
| 25 |
+
MapDataPipe,
|
| 26 |
+
)
|
| 27 |
+
from torch.utils.data.dataloader import (
|
| 28 |
+
DataLoader,
|
| 29 |
+
_DatasetKind,
|
| 30 |
+
get_worker_info,
|
| 31 |
+
default_collate,
|
| 32 |
+
default_convert,
|
| 33 |
+
)
|
| 34 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 35 |
+
from torch.utils.data.datapipes._decorator import (
|
| 36 |
+
argument_validation,
|
| 37 |
+
functional_datapipe,
|
| 38 |
+
guaranteed_datapipes_determinism,
|
| 39 |
+
non_deterministic,
|
| 40 |
+
runtime_validation,
|
| 41 |
+
runtime_validation_disabled,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
__all__ = ['BatchSampler',
|
| 45 |
+
'ChainDataset',
|
| 46 |
+
'ConcatDataset',
|
| 47 |
+
'DFIterDataPipe',
|
| 48 |
+
'DataChunk',
|
| 49 |
+
'DataLoader',
|
| 50 |
+
'Dataset',
|
| 51 |
+
'DistributedSampler',
|
| 52 |
+
'IterDataPipe',
|
| 53 |
+
'IterableDataset',
|
| 54 |
+
'MapDataPipe',
|
| 55 |
+
'RandomSampler',
|
| 56 |
+
'Sampler',
|
| 57 |
+
'SequentialSampler',
|
| 58 |
+
'StackDataset',
|
| 59 |
+
'Subset',
|
| 60 |
+
'SubsetRandomSampler',
|
| 61 |
+
'TensorDataset',
|
| 62 |
+
'WeightedRandomSampler',
|
| 63 |
+
'_DatasetKind',
|
| 64 |
+
'argument_validation',
|
| 65 |
+
'default_collate',
|
| 66 |
+
'default_convert',
|
| 67 |
+
'functional_datapipe',
|
| 68 |
+
'get_worker_info',
|
| 69 |
+
'guaranteed_datapipes_determinism',
|
| 70 |
+
'non_deterministic',
|
| 71 |
+
'random_split',
|
| 72 |
+
'runtime_validation',
|
| 73 |
+
'runtime_validation_disabled']
|
| 74 |
+
|
| 75 |
+
# Please keep this list sorted
|
| 76 |
+
assert __all__ == sorted(__all__)
|
moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc
ADDED
|
Binary file (463 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc
ADDED
|
Binary file (28 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc
ADDED
|
Binary file (3.06 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc
ADDED
|
Binary file (2.64 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc
ADDED
|
Binary file (7.72 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""Signal handling for multiprocessing data loading.
|
| 2 |
+
|
| 3 |
+
NOTE [ Signal handling in multiprocessing data loading ]
|
| 4 |
+
|
| 5 |
+
In cases like DataLoader, if a worker process dies due to bus error/segfault
|
| 6 |
+
or just hang, the main process will hang waiting for data. This is difficult
|
| 7 |
+
to avoid on PyTorch side as it can be caused by limited shm, or other
|
| 8 |
+
libraries users call in the workers. In this file and `DataLoader.cpp`, we make
|
| 9 |
+
our best effort to provide some error message to users when such unfortunate
|
| 10 |
+
events happen.
|
| 11 |
+
|
| 12 |
+
When a _BaseDataLoaderIter starts worker processes, their pids are registered in a
|
| 13 |
+
defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ]
|
| 14 |
+
via `_set_worker_pids`.
|
| 15 |
+
|
| 16 |
+
When an error happens in a worker process, the main process received a SIGCHLD,
|
| 17 |
+
and Python will eventually call the handler registered below
|
| 18 |
+
(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails`
|
| 19 |
+
call checks all registered worker pids and raise proper error message to
|
| 20 |
+
prevent main process from hanging waiting for data from worker.
|
| 21 |
+
|
| 22 |
+
Additionally, at the beginning of each worker's `_utils.worker._worker_loop`,
|
| 23 |
+
`_set_worker_signal_handlers` is called to register critical signal handlers
|
| 24 |
+
(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error
|
| 25 |
+
message to stderr before triggering the default handler. So a message will also
|
| 26 |
+
be printed from the worker process when it is killed by such signals.
|
| 27 |
+
|
| 28 |
+
See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of
|
| 29 |
+
this signal handling design and other mechanism we implement to make our
|
| 30 |
+
multiprocessing data loading robust to errors.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
import signal
|
| 34 |
+
import threading
|
| 35 |
+
from . import IS_WINDOWS
|
| 36 |
+
|
| 37 |
+
# Some of the following imported functions are not used in this file, but are to
|
| 38 |
+
# be used `_utils.signal_handling.XXXXX`.
|
| 39 |
+
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
|
| 40 |
+
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
|
| 41 |
+
|
| 42 |
+
_SIGCHLD_handler_set = False
|
| 43 |
+
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
|
| 44 |
+
handler needs to be set for all DataLoaders in a process."""
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _set_SIGCHLD_handler():
|
| 48 |
+
# Windows doesn't support SIGCHLD handler
|
| 49 |
+
if IS_WINDOWS:
|
| 50 |
+
return
|
| 51 |
+
# can't set signal in child threads
|
| 52 |
+
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
|
| 53 |
+
return
|
| 54 |
+
global _SIGCHLD_handler_set
|
| 55 |
+
if _SIGCHLD_handler_set:
|
| 56 |
+
return
|
| 57 |
+
previous_handler = signal.getsignal(signal.SIGCHLD)
|
| 58 |
+
if not callable(previous_handler):
|
| 59 |
+
# This doesn't catch default handler, but SIGCHLD default handler is a
|
| 60 |
+
# no-op.
|
| 61 |
+
previous_handler = None
|
| 62 |
+
|
| 63 |
+
def handler(signum, frame):
|
| 64 |
+
# This following call uses `waitid` with WNOHANG from C side. Therefore,
|
| 65 |
+
# Python can still get and update the process status successfully.
|
| 66 |
+
_error_if_any_worker_fails()
|
| 67 |
+
if previous_handler is not None:
|
| 68 |
+
assert callable(previous_handler)
|
| 69 |
+
previous_handler(signum, frame)
|
| 70 |
+
|
| 71 |
+
signal.signal(signal.SIGCHLD, handler)
|
| 72 |
+
_SIGCHLD_handler_set = True
|
moondream/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
|
| 2 |
+
|
| 3 |
+
These **needs** to be in global scope since Py2 doesn't support serializing
|
| 4 |
+
static methods.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import random
|
| 9 |
+
import os
|
| 10 |
+
import queue
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from torch._utils import ExceptionWrapper
|
| 13 |
+
from typing import Optional, Union, TYPE_CHECKING
|
| 14 |
+
from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from torch.utils.data import Dataset
|
| 17 |
+
|
| 18 |
+
if IS_WINDOWS:
|
| 19 |
+
import ctypes
|
| 20 |
+
from ctypes.wintypes import DWORD, BOOL, HANDLE
|
| 21 |
+
|
| 22 |
+
# On Windows, the parent ID of the worker process remains unchanged when the manager process
|
| 23 |
+
# is gone, and the only way to check it through OS is to let the worker have a process handle
|
| 24 |
+
# of the manager and ask if the process status has changed.
|
| 25 |
+
class ManagerWatchdog:
|
| 26 |
+
def __init__(self):
|
| 27 |
+
self.manager_pid = os.getppid()
|
| 28 |
+
|
| 29 |
+
# mypy cannot detect this code is windows only
|
| 30 |
+
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) # type: ignore[attr-defined]
|
| 31 |
+
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
|
| 32 |
+
self.kernel32.OpenProcess.restype = HANDLE
|
| 33 |
+
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
|
| 34 |
+
self.kernel32.WaitForSingleObject.restype = DWORD
|
| 35 |
+
|
| 36 |
+
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
|
| 37 |
+
SYNCHRONIZE = 0x00100000
|
| 38 |
+
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
|
| 39 |
+
|
| 40 |
+
if not self.manager_handle:
|
| 41 |
+
raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined]
|
| 42 |
+
|
| 43 |
+
self.manager_dead = False
|
| 44 |
+
|
| 45 |
+
def is_alive(self):
|
| 46 |
+
if not self.manager_dead:
|
| 47 |
+
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
|
| 48 |
+
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
|
| 49 |
+
return not self.manager_dead
|
| 50 |
+
else:
|
| 51 |
+
class ManagerWatchdog: # type: ignore[no-redef]
|
| 52 |
+
def __init__(self):
|
| 53 |
+
self.manager_pid = os.getppid()
|
| 54 |
+
self.manager_dead = False
|
| 55 |
+
|
| 56 |
+
def is_alive(self):
|
| 57 |
+
if not self.manager_dead:
|
| 58 |
+
self.manager_dead = os.getppid() != self.manager_pid
|
| 59 |
+
return not self.manager_dead
|
| 60 |
+
|
| 61 |
+
_worker_info: Optional["WorkerInfo"] = None
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class WorkerInfo:
|
| 65 |
+
id: int
|
| 66 |
+
num_workers: int
|
| 67 |
+
seed: int
|
| 68 |
+
dataset: 'Dataset'
|
| 69 |
+
__initialized = False
|
| 70 |
+
|
| 71 |
+
def __init__(self, **kwargs):
|
| 72 |
+
for k, v in kwargs.items():
|
| 73 |
+
setattr(self, k, v)
|
| 74 |
+
self.__keys = tuple(kwargs.keys())
|
| 75 |
+
self.__initialized = True
|
| 76 |
+
|
| 77 |
+
def __setattr__(self, key, val):
|
| 78 |
+
if self.__initialized:
|
| 79 |
+
raise RuntimeError(f"Cannot assign attributes to {self.__class__.__name__} objects")
|
| 80 |
+
return super().__setattr__(key, val)
|
| 81 |
+
|
| 82 |
+
def __repr__(self):
|
| 83 |
+
items = []
|
| 84 |
+
for k in self.__keys:
|
| 85 |
+
items.append(f'{k}={getattr(self, k)}')
|
| 86 |
+
return f"{self.__class__.__name__}({', '.join(items)})"
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_worker_info() -> Optional[WorkerInfo]:
|
| 90 |
+
r"""Returns the information about the current
|
| 91 |
+
:class:`~torch.utils.data.DataLoader` iterator worker process.
|
| 92 |
+
|
| 93 |
+
When called in a worker, this returns an object guaranteed to have the
|
| 94 |
+
following attributes:
|
| 95 |
+
|
| 96 |
+
* :attr:`id`: the current worker id.
|
| 97 |
+
* :attr:`num_workers`: the total number of workers.
|
| 98 |
+
* :attr:`seed`: the random seed set for the current worker. This value is
|
| 99 |
+
determined by main process RNG and the worker id. See
|
| 100 |
+
:class:`~torch.utils.data.DataLoader`'s documentation for more details.
|
| 101 |
+
* :attr:`dataset`: the copy of the dataset object in **this** process. Note
|
| 102 |
+
that this will be a different object in a different process than the one
|
| 103 |
+
in the main process.
|
| 104 |
+
|
| 105 |
+
When called in the main process, this returns ``None``.
|
| 106 |
+
|
| 107 |
+
.. note::
|
| 108 |
+
When used in a :attr:`worker_init_fn` passed over to
|
| 109 |
+
:class:`~torch.utils.data.DataLoader`, this method can be useful to
|
| 110 |
+
set up each worker process differently, for instance, using ``worker_id``
|
| 111 |
+
to configure the ``dataset`` object to only read a specific fraction of a
|
| 112 |
+
sharded dataset, or use ``seed`` to seed other libraries used in dataset
|
| 113 |
+
code.
|
| 114 |
+
"""
|
| 115 |
+
return _worker_info
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
r"""Dummy class used to signal the end of an IterableDataset"""
|
| 119 |
+
@dataclass(frozen=True)
|
| 120 |
+
class _IterableDatasetStopIteration:
|
| 121 |
+
worker_id: int
|
| 122 |
+
|
| 123 |
+
r"""Dummy class used to resume the fetching when worker reuse is enabled"""
|
| 124 |
+
@dataclass(frozen=True)
|
| 125 |
+
class _ResumeIteration:
|
| 126 |
+
seed: Optional[int] = None
|
| 127 |
+
|
| 128 |
+
# The function `_generate_state` is adapted from `numpy.random.SeedSequence`
|
| 129 |
+
# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx
|
| 130 |
+
# It's MIT licensed, here is the copyright:
|
| 131 |
+
|
| 132 |
+
# Copyright (c) 2015 Melissa E. O'Neill
|
| 133 |
+
# Copyright (c) 2019 NumPy Developers
|
| 134 |
+
#
|
| 135 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 136 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 137 |
+
# in the Software without restriction, including without limitation the rights
|
| 138 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 139 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 140 |
+
# furnished to do so, subject to the following conditions:
|
| 141 |
+
#
|
| 142 |
+
# The above copyright notice and this permission notice shall be included in
|
| 143 |
+
# all copies or substantial portions of the Software.
|
| 144 |
+
#
|
| 145 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 146 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 147 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 148 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 149 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 150 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 151 |
+
# SOFTWARE.
|
| 152 |
+
|
| 153 |
+
# This function generates an array of int32 as the seed for
|
| 154 |
+
# `numpy.random`, in order to prevent state collision due to same
|
| 155 |
+
# seed and algorithm for `numpy.random` and `random` modules.
|
| 156 |
+
# TODO: Implement `SeedSequence` like object for `torch.random`
|
| 157 |
+
def _generate_state(base_seed, worker_id):
|
| 158 |
+
INIT_A = 0x43b0d7e5
|
| 159 |
+
MULT_A = 0x931e8875
|
| 160 |
+
INIT_B = 0x8b51f9dd
|
| 161 |
+
MULT_B = 0x58f38ded
|
| 162 |
+
MIX_MULT_L = 0xca01f9dd
|
| 163 |
+
MIX_MULT_R = 0x4973f715
|
| 164 |
+
XSHIFT = 4 * 8 // 2
|
| 165 |
+
MASK32 = 0xFFFFFFFF
|
| 166 |
+
|
| 167 |
+
entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0]
|
| 168 |
+
pool = [0] * 4
|
| 169 |
+
|
| 170 |
+
hash_const_A = INIT_A
|
| 171 |
+
|
| 172 |
+
def hash(value):
|
| 173 |
+
nonlocal hash_const_A
|
| 174 |
+
value = (value ^ hash_const_A) & MASK32
|
| 175 |
+
hash_const_A = (hash_const_A * MULT_A) & MASK32
|
| 176 |
+
value = (value * hash_const_A) & MASK32
|
| 177 |
+
value = (value ^ (value >> XSHIFT)) & MASK32
|
| 178 |
+
return value
|
| 179 |
+
|
| 180 |
+
def mix(x, y):
|
| 181 |
+
result_x = (MIX_MULT_L * x) & MASK32
|
| 182 |
+
result_y = (MIX_MULT_R * y) & MASK32
|
| 183 |
+
result = (result_x - result_y) & MASK32
|
| 184 |
+
result = (result ^ (result >> XSHIFT)) & MASK32
|
| 185 |
+
return result
|
| 186 |
+
|
| 187 |
+
# Add in the entropy to the pool.
|
| 188 |
+
for i in range(len(pool)):
|
| 189 |
+
pool[i] = hash(entropy[i])
|
| 190 |
+
|
| 191 |
+
# Mix all bits together so late bits can affect earlier bits.
|
| 192 |
+
for i_src in range(len(pool)):
|
| 193 |
+
for i_dst in range(len(pool)):
|
| 194 |
+
if i_src != i_dst:
|
| 195 |
+
pool[i_dst] = mix(pool[i_dst], hash(pool[i_src]))
|
| 196 |
+
|
| 197 |
+
hash_const_B = INIT_B
|
| 198 |
+
state = []
|
| 199 |
+
for i_dst in range(4):
|
| 200 |
+
data_val = pool[i_dst]
|
| 201 |
+
data_val = (data_val ^ hash_const_B) & MASK32
|
| 202 |
+
hash_const_B = (hash_const_B * MULT_B) & MASK32
|
| 203 |
+
data_val = (data_val * hash_const_B) & MASK32
|
| 204 |
+
data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32
|
| 205 |
+
state.append(data_val)
|
| 206 |
+
return state
|
| 207 |
+
|
| 208 |
+
def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event,
|
| 209 |
+
auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id,
|
| 210 |
+
num_workers, persistent_workers, shared_seed):
|
| 211 |
+
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
|
| 212 |
+
# logic of this function.
|
| 213 |
+
|
| 214 |
+
try:
|
| 215 |
+
# Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
|
| 216 |
+
# module's handlers are executed after Python returns from C low-level
|
| 217 |
+
# handlers, likely when the same fatal signal had already happened
|
| 218 |
+
# again.
|
| 219 |
+
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
|
| 220 |
+
signal_handling._set_worker_signal_handlers()
|
| 221 |
+
|
| 222 |
+
torch.set_num_threads(1)
|
| 223 |
+
seed = base_seed + worker_id
|
| 224 |
+
random.seed(seed)
|
| 225 |
+
torch.manual_seed(seed)
|
| 226 |
+
if HAS_NUMPY:
|
| 227 |
+
np_seed = _generate_state(base_seed, worker_id)
|
| 228 |
+
import numpy as np
|
| 229 |
+
np.random.seed(np_seed)
|
| 230 |
+
|
| 231 |
+
from torch.utils.data import IterDataPipe
|
| 232 |
+
from torch.utils.data.graph_settings import apply_random_seed
|
| 233 |
+
|
| 234 |
+
shared_rng = torch.Generator()
|
| 235 |
+
if isinstance(dataset, IterDataPipe):
|
| 236 |
+
assert shared_seed is not None
|
| 237 |
+
shared_rng.manual_seed(shared_seed)
|
| 238 |
+
dataset = apply_random_seed(dataset, shared_rng)
|
| 239 |
+
|
| 240 |
+
global _worker_info
|
| 241 |
+
_worker_info = WorkerInfo(id=worker_id, num_workers=num_workers,
|
| 242 |
+
seed=seed, dataset=dataset)
|
| 243 |
+
|
| 244 |
+
from torch.utils.data import _DatasetKind
|
| 245 |
+
|
| 246 |
+
init_exception = None
|
| 247 |
+
|
| 248 |
+
try:
|
| 249 |
+
if init_fn is not None:
|
| 250 |
+
init_fn(worker_id)
|
| 251 |
+
|
| 252 |
+
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last)
|
| 253 |
+
except Exception:
|
| 254 |
+
init_exception = ExceptionWrapper(
|
| 255 |
+
where=f"in DataLoader worker process {worker_id}")
|
| 256 |
+
|
| 257 |
+
# When using Iterable mode, some worker can exit earlier than others due
|
| 258 |
+
# to the IterableDataset behaving differently for different workers.
|
| 259 |
+
# When such things happen, an `_IterableDatasetStopIteration` object is
|
| 260 |
+
# sent over to the main process with the ID of this worker, so that the
|
| 261 |
+
# main process won't send more tasks to this worker, and will send
|
| 262 |
+
# `None` to this worker to properly exit it.
|
| 263 |
+
#
|
| 264 |
+
# Note that we cannot set `done_event` from a worker as it is shared
|
| 265 |
+
# among all processes. Instead, we set the `iteration_end` flag to
|
| 266 |
+
# signify that the iterator is exhausted. When either `done_event` or
|
| 267 |
+
# `iteration_end` is set, we skip all processing step and just wait for
|
| 268 |
+
# `None`.
|
| 269 |
+
iteration_end = False
|
| 270 |
+
|
| 271 |
+
watchdog = ManagerWatchdog()
|
| 272 |
+
|
| 273 |
+
while watchdog.is_alive():
|
| 274 |
+
try:
|
| 275 |
+
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
|
| 276 |
+
except queue.Empty:
|
| 277 |
+
continue
|
| 278 |
+
if isinstance(r, _ResumeIteration):
|
| 279 |
+
# Acknowledge the main process
|
| 280 |
+
data_queue.put((r, None))
|
| 281 |
+
iteration_end = False
|
| 282 |
+
|
| 283 |
+
if isinstance(dataset, IterDataPipe):
|
| 284 |
+
assert r.seed is not None
|
| 285 |
+
shared_rng.manual_seed(r.seed)
|
| 286 |
+
dataset = apply_random_seed(dataset, shared_rng)
|
| 287 |
+
|
| 288 |
+
# Recreate the fetcher for worker-reuse policy
|
| 289 |
+
fetcher = _DatasetKind.create_fetcher(
|
| 290 |
+
dataset_kind, dataset, auto_collation, collate_fn, drop_last)
|
| 291 |
+
continue
|
| 292 |
+
elif r is None:
|
| 293 |
+
# Received the final signal
|
| 294 |
+
assert done_event.is_set() or iteration_end
|
| 295 |
+
break
|
| 296 |
+
elif done_event.is_set() or iteration_end:
|
| 297 |
+
# `done_event` is set. But I haven't received the final signal
|
| 298 |
+
# (None) yet. I will keep continuing until get it, and skip the
|
| 299 |
+
# processing steps.
|
| 300 |
+
continue
|
| 301 |
+
idx, index = r
|
| 302 |
+
data: Union[_IterableDatasetStopIteration, ExceptionWrapper]
|
| 303 |
+
if init_exception is not None:
|
| 304 |
+
data = init_exception
|
| 305 |
+
init_exception = None
|
| 306 |
+
else:
|
| 307 |
+
try:
|
| 308 |
+
data = fetcher.fetch(index) # type: ignore[possibly-undefined]
|
| 309 |
+
except Exception as e:
|
| 310 |
+
if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable:
|
| 311 |
+
data = _IterableDatasetStopIteration(worker_id)
|
| 312 |
+
# Set `iteration_end`
|
| 313 |
+
# (1) to save future `next(...)` calls, and
|
| 314 |
+
# (2) to avoid sending multiple `_IterableDatasetStopIteration`s.
|
| 315 |
+
iteration_end = True
|
| 316 |
+
else:
|
| 317 |
+
# It is important that we don't store exc_info in a variable.
|
| 318 |
+
# `ExceptionWrapper` does the correct thing.
|
| 319 |
+
# See NOTE [ Python Traceback Reference Cycle Problem ]
|
| 320 |
+
data = ExceptionWrapper(
|
| 321 |
+
where=f"in DataLoader worker process {worker_id}")
|
| 322 |
+
data_queue.put((idx, data))
|
| 323 |
+
del data, idx, index, r # save memory
|
| 324 |
+
except KeyboardInterrupt:
|
| 325 |
+
# Main process will raise KeyboardInterrupt anyways.
|
| 326 |
+
pass
|
| 327 |
+
if done_event.is_set():
|
| 328 |
+
data_queue.cancel_join_thread()
|
| 329 |
+
data_queue.close()
|
moondream/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
def worker_init_fn(worker_id):
|
| 4 |
+
warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated"
|
| 5 |
+
" as DataLoader automatically applies sharding in every worker")
|
moondream/lib/python3.10/site-packages/torch/utils/data/dataloader.py
ADDED
|
@@ -0,0 +1,1479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter.
|
| 2 |
+
|
| 3 |
+
To support these two classes, in `./_utils` we define many utility methods and
|
| 4 |
+
functions to be run in multiprocessing. E.g., the data loading worker loop is
|
| 5 |
+
in `./_utils/worker.py`.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import functools
|
| 9 |
+
import itertools
|
| 10 |
+
import logging
|
| 11 |
+
import os
|
| 12 |
+
import queue
|
| 13 |
+
import threading
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import multiprocessing as python_multiprocessing
|
| 19 |
+
import torch
|
| 20 |
+
import torch.distributed as dist
|
| 21 |
+
import torch.multiprocessing as multiprocessing
|
| 22 |
+
import torch.utils.data.graph_settings
|
| 23 |
+
|
| 24 |
+
from torch._utils import ExceptionWrapper
|
| 25 |
+
|
| 26 |
+
from . import (
|
| 27 |
+
IterDataPipe,
|
| 28 |
+
MapDataPipe,
|
| 29 |
+
IterableDataset,
|
| 30 |
+
Sampler,
|
| 31 |
+
SequentialSampler,
|
| 32 |
+
RandomSampler,
|
| 33 |
+
BatchSampler,
|
| 34 |
+
Dataset,)
|
| 35 |
+
|
| 36 |
+
from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
|
| 37 |
+
|
| 38 |
+
from . import _utils
|
| 39 |
+
|
| 40 |
+
__all__ = [
|
| 41 |
+
"DataLoader",
|
| 42 |
+
"get_worker_info",
|
| 43 |
+
"default_collate",
|
| 44 |
+
"default_convert",
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
T_co = TypeVar('T_co', covariant=True)
|
| 48 |
+
T = TypeVar('T')
|
| 49 |
+
_worker_init_fn_t = Callable[[int], None]
|
| 50 |
+
|
| 51 |
+
# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
|
| 52 |
+
# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
|
| 53 |
+
# See https://github.com/python/mypy/issues/3737.
|
| 54 |
+
_collate_fn_t = Callable[[List[T]], Any]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# These functions used to be defined in this file. However, it was moved to
|
| 58 |
+
# _utils/collate.py. Although it is rather hard to access this from user land
|
| 59 |
+
# (one has to explicitly directly `import torch.utils.data.dataloader`), there
|
| 60 |
+
# probably is user code out there using it. This aliasing maintains BC in this
|
| 61 |
+
# aspect.
|
| 62 |
+
default_collate: _collate_fn_t = _utils.collate.default_collate
|
| 63 |
+
default_convert = _utils.collate.default_convert
|
| 64 |
+
|
| 65 |
+
get_worker_info = _utils.worker.get_worker_info
|
| 66 |
+
|
| 67 |
+
logger = logging.getLogger(__name__)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class _DatasetKind:
|
| 71 |
+
Map = 0
|
| 72 |
+
Iterable = 1
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
|
| 76 |
+
if kind == _DatasetKind.Map:
|
| 77 |
+
return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
|
| 78 |
+
else:
|
| 79 |
+
return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class _InfiniteConstantSampler(Sampler):
|
| 83 |
+
r"""Analogous to ``itertools.repeat(None, None)``.
|
| 84 |
+
|
| 85 |
+
Used as sampler for :class:`~torch.utils.data.IterableDataset`.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __iter__(self):
|
| 89 |
+
while True:
|
| 90 |
+
yield None
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _get_distributed_settings():
|
| 94 |
+
if dist.is_available() and dist.is_initialized():
|
| 95 |
+
return dist.get_world_size(), dist.get_rank()
|
| 96 |
+
else:
|
| 97 |
+
return 1, 0
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
|
| 101 |
+
global_worker_id = worker_id
|
| 102 |
+
info = torch.utils.data.get_worker_info()
|
| 103 |
+
assert info is not None
|
| 104 |
+
total_workers = info.num_workers
|
| 105 |
+
datapipe = info.dataset
|
| 106 |
+
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
|
| 107 |
+
# To distribute elements across distributed process evenly, we should shard data on distributed
|
| 108 |
+
# processes first then shard on worker processes
|
| 109 |
+
total_workers *= world_size
|
| 110 |
+
global_worker_id = global_worker_id * world_size + rank_id
|
| 111 |
+
# For BC, use default SHARDING_PRIORITIES
|
| 112 |
+
torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id)
|
| 113 |
+
if worker_init_fn is not None:
|
| 114 |
+
worker_init_fn(worker_id)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _share_dist_seed(generator, pg):
|
| 118 |
+
_shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator)
|
| 119 |
+
if isinstance(pg, dist.ProcessGroup):
|
| 120 |
+
dist.broadcast(_shared_seed, src=0, group=pg)
|
| 121 |
+
return _shared_seed.item()
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class DataLoader(Generic[T_co]):
|
| 125 |
+
r"""
|
| 126 |
+
Data loader combines a dataset and a sampler, and provides an iterable over the given dataset.
|
| 127 |
+
|
| 128 |
+
The :class:`~torch.utils.data.DataLoader` supports both map-style and
|
| 129 |
+
iterable-style datasets with single- or multi-process loading, customizing
|
| 130 |
+
loading order and optional automatic batching (collation) and memory pinning.
|
| 131 |
+
|
| 132 |
+
See :py:mod:`torch.utils.data` documentation page for more details.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
dataset (Dataset): dataset from which to load the data.
|
| 136 |
+
batch_size (int, optional): how many samples per batch to load
|
| 137 |
+
(default: ``1``).
|
| 138 |
+
shuffle (bool, optional): set to ``True`` to have the data reshuffled
|
| 139 |
+
at every epoch (default: ``False``).
|
| 140 |
+
sampler (Sampler or Iterable, optional): defines the strategy to draw
|
| 141 |
+
samples from the dataset. Can be any ``Iterable`` with ``__len__``
|
| 142 |
+
implemented. If specified, :attr:`shuffle` must not be specified.
|
| 143 |
+
batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
|
| 144 |
+
returns a batch of indices at a time. Mutually exclusive with
|
| 145 |
+
:attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
|
| 146 |
+
and :attr:`drop_last`.
|
| 147 |
+
num_workers (int, optional): how many subprocesses to use for data
|
| 148 |
+
loading. ``0`` means that the data will be loaded in the main process.
|
| 149 |
+
(default: ``0``)
|
| 150 |
+
collate_fn (Callable, optional): merges a list of samples to form a
|
| 151 |
+
mini-batch of Tensor(s). Used when using batched loading from a
|
| 152 |
+
map-style dataset.
|
| 153 |
+
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
|
| 154 |
+
into device/CUDA pinned memory before returning them. If your data elements
|
| 155 |
+
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
|
| 156 |
+
see the example below.
|
| 157 |
+
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
|
| 158 |
+
if the dataset size is not divisible by the batch size. If ``False`` and
|
| 159 |
+
the size of dataset is not divisible by the batch size, then the last batch
|
| 160 |
+
will be smaller. (default: ``False``)
|
| 161 |
+
timeout (numeric, optional): if positive, the timeout value for collecting a batch
|
| 162 |
+
from workers. Should always be non-negative. (default: ``0``)
|
| 163 |
+
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
|
| 164 |
+
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
|
| 165 |
+
input, after seeding and before data loading. (default: ``None``)
|
| 166 |
+
multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
|
| 167 |
+
``None``, the default `multiprocessing context`_ of your operating system will
|
| 168 |
+
be used. (default: ``None``)
|
| 169 |
+
generator (torch.Generator, optional): If not ``None``, this RNG will be used
|
| 170 |
+
by RandomSampler to generate random indexes and multiprocessing to generate
|
| 171 |
+
``base_seed`` for workers. (default: ``None``)
|
| 172 |
+
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
|
| 173 |
+
in advance by each worker. ``2`` means there will be a total of
|
| 174 |
+
2 * num_workers batches prefetched across all workers. (default value depends
|
| 175 |
+
on the set value for num_workers. If value of num_workers=0 default is ``None``.
|
| 176 |
+
Otherwise, if value of ``num_workers > 0`` default is ``2``).
|
| 177 |
+
persistent_workers (bool, optional): If ``True``, the data loader will not shut down
|
| 178 |
+
the worker processes after a dataset has been consumed once. This allows to
|
| 179 |
+
maintain the workers `Dataset` instances alive. (default: ``False``)
|
| 180 |
+
pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
|
| 181 |
+
``True``.
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
.. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
|
| 185 |
+
cannot be an unpicklable object, e.g., a lambda function. See
|
| 186 |
+
:ref:`multiprocessing-best-practices` on more details related
|
| 187 |
+
to multiprocessing in PyTorch.
|
| 188 |
+
|
| 189 |
+
.. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
|
| 190 |
+
When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
|
| 191 |
+
it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
|
| 192 |
+
rounding depending on :attr:`drop_last`, regardless of multi-process loading
|
| 193 |
+
configurations. This represents the best guess PyTorch can make because PyTorch
|
| 194 |
+
trusts user :attr:`dataset` code in correctly handling multi-process
|
| 195 |
+
loading to avoid duplicate data.
|
| 196 |
+
|
| 197 |
+
However, if sharding results in multiple workers having incomplete last batches,
|
| 198 |
+
this estimate can still be inaccurate, because (1) an otherwise complete batch can
|
| 199 |
+
be broken into multiple ones and (2) more than one batch worth of samples can be
|
| 200 |
+
dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
|
| 201 |
+
cases in general.
|
| 202 |
+
|
| 203 |
+
See `Dataset Types`_ for more details on these two types of datasets and how
|
| 204 |
+
:class:`~torch.utils.data.IterableDataset` interacts with
|
| 205 |
+
`Multi-process data loading`_.
|
| 206 |
+
|
| 207 |
+
.. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
|
| 208 |
+
:ref:`data-loading-randomness` notes for random seed related questions.
|
| 209 |
+
|
| 210 |
+
.. _multiprocessing context:
|
| 211 |
+
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
|
| 212 |
+
"""
|
| 213 |
+
|
| 214 |
+
dataset: Dataset[T_co]
|
| 215 |
+
batch_size: Optional[int]
|
| 216 |
+
num_workers: int
|
| 217 |
+
pin_memory: bool
|
| 218 |
+
drop_last: bool
|
| 219 |
+
timeout: float
|
| 220 |
+
sampler: Union[Sampler, Iterable]
|
| 221 |
+
pin_memory_device: str
|
| 222 |
+
prefetch_factor: Optional[int]
|
| 223 |
+
_iterator : Optional['_BaseDataLoaderIter']
|
| 224 |
+
__initialized = False
|
| 225 |
+
|
| 226 |
+
def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,
|
| 227 |
+
shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None,
|
| 228 |
+
batch_sampler: Union[Sampler[List], Iterable[List], None] = None,
|
| 229 |
+
num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None,
|
| 230 |
+
pin_memory: bool = False, drop_last: bool = False,
|
| 231 |
+
timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None,
|
| 232 |
+
multiprocessing_context=None, generator=None,
|
| 233 |
+
*, prefetch_factor: Optional[int] = None,
|
| 234 |
+
persistent_workers: bool = False,
|
| 235 |
+
pin_memory_device: str = ""):
|
| 236 |
+
torch._C._log_api_usage_once("python.data_loader")
|
| 237 |
+
|
| 238 |
+
if num_workers < 0:
|
| 239 |
+
raise ValueError('num_workers option should be non-negative; '
|
| 240 |
+
'use num_workers=0 to disable multiprocessing.')
|
| 241 |
+
|
| 242 |
+
if timeout < 0:
|
| 243 |
+
raise ValueError('timeout option should be non-negative')
|
| 244 |
+
|
| 245 |
+
if num_workers == 0 and prefetch_factor is not None:
|
| 246 |
+
raise ValueError('prefetch_factor option could only be specified in multiprocessing.'
|
| 247 |
+
'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.')
|
| 248 |
+
elif num_workers > 0 and prefetch_factor is None:
|
| 249 |
+
prefetch_factor = 2
|
| 250 |
+
elif prefetch_factor is not None and prefetch_factor < 0:
|
| 251 |
+
raise ValueError('prefetch_factor option should be non-negative')
|
| 252 |
+
|
| 253 |
+
if persistent_workers and num_workers == 0:
|
| 254 |
+
raise ValueError('persistent_workers option needs num_workers > 0')
|
| 255 |
+
|
| 256 |
+
self.dataset = dataset
|
| 257 |
+
self.num_workers = num_workers
|
| 258 |
+
self.prefetch_factor = prefetch_factor
|
| 259 |
+
self.pin_memory = pin_memory
|
| 260 |
+
self.pin_memory_device = pin_memory_device
|
| 261 |
+
self.timeout = timeout
|
| 262 |
+
self.worker_init_fn = worker_init_fn
|
| 263 |
+
self.multiprocessing_context = multiprocessing_context
|
| 264 |
+
|
| 265 |
+
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
|
| 266 |
+
# _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
|
| 267 |
+
if isinstance(self.dataset, IterDataPipe):
|
| 268 |
+
self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
|
| 269 |
+
elif isinstance(self.dataset, MapDataPipe):
|
| 270 |
+
self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
|
| 271 |
+
|
| 272 |
+
# Arg-check dataset related before checking samplers because we want to
|
| 273 |
+
# tell users that iterable-style datasets are incompatible with custom
|
| 274 |
+
# samplers first, so that they don't learn that this combo doesn't work
|
| 275 |
+
# after spending time fixing the custom sampler errors.
|
| 276 |
+
if isinstance(dataset, IterableDataset):
|
| 277 |
+
self._dataset_kind = _DatasetKind.Iterable
|
| 278 |
+
# NOTE [ Custom Samplers and IterableDataset ]
|
| 279 |
+
#
|
| 280 |
+
# `IterableDataset` does not support custom `batch_sampler` or
|
| 281 |
+
# `sampler` since the key is irrelevant (unless we support
|
| 282 |
+
# generator-style dataset one day...).
|
| 283 |
+
#
|
| 284 |
+
# For `sampler`, we always create a dummy sampler. This is an
|
| 285 |
+
# infinite sampler even when the dataset may have an implemented
|
| 286 |
+
# finite `__len__` because in multi-process data loading, naive
|
| 287 |
+
# settings will return duplicated data (which may be desired), and
|
| 288 |
+
# thus using a sampler with length matching that of dataset will
|
| 289 |
+
# cause data lost (you may have duplicates of the first couple
|
| 290 |
+
# batches, but never see anything afterwards). Therefore,
|
| 291 |
+
# `Iterabledataset` always uses an infinite sampler, an instance of
|
| 292 |
+
# `_InfiniteConstantSampler` defined above.
|
| 293 |
+
#
|
| 294 |
+
# A custom `batch_sampler` essentially only controls the batch size.
|
| 295 |
+
# However, it is unclear how useful it would be since an iterable-style
|
| 296 |
+
# dataset can handle that within itself. Moreover, it is pointless
|
| 297 |
+
# in multi-process data loading as the assignment order of batches
|
| 298 |
+
# to workers is an implementation detail so users can not control
|
| 299 |
+
# how to batchify each worker's iterable. Thus, we disable this
|
| 300 |
+
# option. If this turns out to be useful in future, we can re-enable
|
| 301 |
+
# this, and support custom samplers that specify the assignments to
|
| 302 |
+
# specific workers.
|
| 303 |
+
if isinstance(dataset, IterDataPipe):
|
| 304 |
+
if shuffle is not None:
|
| 305 |
+
dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
|
| 306 |
+
# We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
|
| 307 |
+
elif shuffle not in {False, None}:
|
| 308 |
+
raise ValueError(
|
| 309 |
+
f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}")
|
| 310 |
+
|
| 311 |
+
if sampler is not None:
|
| 312 |
+
# See NOTE [ Custom Samplers and IterableDataset ]
|
| 313 |
+
raise ValueError(
|
| 314 |
+
f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}")
|
| 315 |
+
elif batch_sampler is not None:
|
| 316 |
+
# See NOTE [ Custom Samplers and IterableDataset ]
|
| 317 |
+
raise ValueError(
|
| 318 |
+
"DataLoader with IterableDataset: expected unspecified "
|
| 319 |
+
f"batch_sampler option, but got batch_sampler={batch_sampler}")
|
| 320 |
+
else:
|
| 321 |
+
shuffle = bool(shuffle)
|
| 322 |
+
self._dataset_kind = _DatasetKind.Map
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
if sampler is not None and shuffle:
|
| 327 |
+
raise ValueError('sampler option is mutually exclusive with '
|
| 328 |
+
'shuffle')
|
| 329 |
+
|
| 330 |
+
if batch_sampler is not None:
|
| 331 |
+
# auto_collation with custom batch_sampler
|
| 332 |
+
if batch_size != 1 or shuffle or sampler is not None or drop_last:
|
| 333 |
+
raise ValueError('batch_sampler option is mutually exclusive '
|
| 334 |
+
'with batch_size, shuffle, sampler, and '
|
| 335 |
+
'drop_last')
|
| 336 |
+
batch_size = None
|
| 337 |
+
drop_last = False
|
| 338 |
+
elif batch_size is None:
|
| 339 |
+
# no auto_collation
|
| 340 |
+
if drop_last:
|
| 341 |
+
raise ValueError('batch_size=None option disables auto-batching '
|
| 342 |
+
'and is mutually exclusive with drop_last')
|
| 343 |
+
|
| 344 |
+
if sampler is None: # give default samplers
|
| 345 |
+
if self._dataset_kind == _DatasetKind.Iterable:
|
| 346 |
+
# See NOTE [ Custom Samplers and IterableDataset ]
|
| 347 |
+
sampler = _InfiniteConstantSampler()
|
| 348 |
+
else: # map-style
|
| 349 |
+
if shuffle:
|
| 350 |
+
sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
|
| 351 |
+
else:
|
| 352 |
+
sampler = SequentialSampler(dataset) # type: ignore[arg-type]
|
| 353 |
+
|
| 354 |
+
if batch_size is not None and batch_sampler is None:
|
| 355 |
+
# auto_collation without custom batch_sampler
|
| 356 |
+
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
|
| 357 |
+
|
| 358 |
+
self.batch_size = batch_size
|
| 359 |
+
self.drop_last = drop_last
|
| 360 |
+
self.sampler = sampler
|
| 361 |
+
self.batch_sampler = batch_sampler
|
| 362 |
+
self.generator = generator
|
| 363 |
+
|
| 364 |
+
if collate_fn is None:
|
| 365 |
+
if self._auto_collation:
|
| 366 |
+
collate_fn = _utils.collate.default_collate
|
| 367 |
+
else:
|
| 368 |
+
collate_fn = _utils.collate.default_convert
|
| 369 |
+
|
| 370 |
+
self.collate_fn = collate_fn
|
| 371 |
+
self.persistent_workers = persistent_workers
|
| 372 |
+
|
| 373 |
+
self.__initialized = True
|
| 374 |
+
self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]
|
| 375 |
+
|
| 376 |
+
self._iterator = None
|
| 377 |
+
|
| 378 |
+
self.check_worker_number_rationality()
|
| 379 |
+
|
| 380 |
+
torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined]
|
| 381 |
+
|
| 382 |
+
def _get_iterator(self) -> '_BaseDataLoaderIter':
|
| 383 |
+
if self.num_workers == 0:
|
| 384 |
+
return _SingleProcessDataLoaderIter(self)
|
| 385 |
+
else:
|
| 386 |
+
self.check_worker_number_rationality()
|
| 387 |
+
return _MultiProcessingDataLoaderIter(self)
|
| 388 |
+
|
| 389 |
+
@property
|
| 390 |
+
def multiprocessing_context(self):
|
| 391 |
+
return self.__multiprocessing_context
|
| 392 |
+
|
| 393 |
+
@multiprocessing_context.setter
|
| 394 |
+
def multiprocessing_context(self, multiprocessing_context):
|
| 395 |
+
if multiprocessing_context is not None:
|
| 396 |
+
if self.num_workers > 0:
|
| 397 |
+
if isinstance(multiprocessing_context, str):
|
| 398 |
+
valid_start_methods = multiprocessing.get_all_start_methods()
|
| 399 |
+
if multiprocessing_context not in valid_start_methods:
|
| 400 |
+
raise ValueError(
|
| 401 |
+
'multiprocessing_context option '
|
| 402 |
+
f'should specify a valid start method in {valid_start_methods!r}, but got '
|
| 403 |
+
f'multiprocessing_context={multiprocessing_context!r}')
|
| 404 |
+
multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
|
| 405 |
+
|
| 406 |
+
if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):
|
| 407 |
+
raise TypeError('multiprocessing_context option should be a valid context '
|
| 408 |
+
'object or a string specifying the start method, but got '
|
| 409 |
+
f'multiprocessing_context={multiprocessing_context}')
|
| 410 |
+
else:
|
| 411 |
+
raise ValueError('multiprocessing_context can only be used with '
|
| 412 |
+
'multi-process loading (num_workers > 0), but got '
|
| 413 |
+
f'num_workers={self.num_workers}')
|
| 414 |
+
|
| 415 |
+
self.__multiprocessing_context = multiprocessing_context
|
| 416 |
+
|
| 417 |
+
def __setattr__(self, attr, val):
|
| 418 |
+
if self.__initialized and attr in (
|
| 419 |
+
'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'):
|
| 420 |
+
raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized')
|
| 421 |
+
|
| 422 |
+
super().__setattr__(attr, val)
|
| 423 |
+
|
| 424 |
+
# We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
|
| 425 |
+
# since '_BaseDataLoaderIter' references 'DataLoader'.
|
| 426 |
+
def __iter__(self) -> '_BaseDataLoaderIter':
|
| 427 |
+
# When using a single worker the returned iterator should be
|
| 428 |
+
# created everytime to avoid resetting its state
|
| 429 |
+
# However, in the case of a multiple workers iterator
|
| 430 |
+
# the iterator is only created once in the lifetime of the
|
| 431 |
+
# DataLoader object so that workers can be reused
|
| 432 |
+
if self.persistent_workers and self.num_workers > 0:
|
| 433 |
+
if self._iterator is None:
|
| 434 |
+
self._iterator = self._get_iterator()
|
| 435 |
+
else:
|
| 436 |
+
self._iterator._reset(self)
|
| 437 |
+
return self._iterator
|
| 438 |
+
else:
|
| 439 |
+
return self._get_iterator()
|
| 440 |
+
|
| 441 |
+
@property
|
| 442 |
+
def _auto_collation(self):
|
| 443 |
+
return self.batch_sampler is not None
|
| 444 |
+
|
| 445 |
+
@property
|
| 446 |
+
def _index_sampler(self):
|
| 447 |
+
# The actual sampler used for generating indices for `_DatasetFetcher`
|
| 448 |
+
# (see _utils/fetch.py) to read data at each time. This would be
|
| 449 |
+
# `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.
|
| 450 |
+
# We can't change `.sampler` and `.batch_sampler` attributes for BC
|
| 451 |
+
# reasons.
|
| 452 |
+
if self._auto_collation:
|
| 453 |
+
return self.batch_sampler
|
| 454 |
+
else:
|
| 455 |
+
return self.sampler
|
| 456 |
+
|
| 457 |
+
def __len__(self) -> int:
|
| 458 |
+
if self._dataset_kind == _DatasetKind.Iterable:
|
| 459 |
+
# NOTE [ IterableDataset and __len__ ]
|
| 460 |
+
#
|
| 461 |
+
# For `IterableDataset`, `__len__` could be inaccurate when one naively
|
| 462 |
+
# does multi-processing data loading, since the samples will be duplicated.
|
| 463 |
+
# However, no real use case should be actually using that behavior, so
|
| 464 |
+
# it should count as a user error. We should generally trust user
|
| 465 |
+
# code to do the proper thing (e.g., configure each replica differently
|
| 466 |
+
# in `__iter__`), and give us the correct `__len__` if they choose to
|
| 467 |
+
# implement it (this will still throw if the dataset does not implement
|
| 468 |
+
# a `__len__`).
|
| 469 |
+
#
|
| 470 |
+
# To provide a further warning, we track if `__len__` was called on the
|
| 471 |
+
# `DataLoader`, save the returned value in `self._len_called`, and warn
|
| 472 |
+
# if the iterator ends up yielding more than this number of samples.
|
| 473 |
+
|
| 474 |
+
# Cannot statically verify that dataset is Sized
|
| 475 |
+
length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type]
|
| 476 |
+
if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler
|
| 477 |
+
from math import ceil
|
| 478 |
+
if self.drop_last:
|
| 479 |
+
length = length // self.batch_size
|
| 480 |
+
else:
|
| 481 |
+
length = ceil(length / self.batch_size)
|
| 482 |
+
return length
|
| 483 |
+
else:
|
| 484 |
+
return len(self._index_sampler)
|
| 485 |
+
|
| 486 |
+
def check_worker_number_rationality(self):
|
| 487 |
+
# This function check whether the dataloader's worker number is rational based on
|
| 488 |
+
# current system's resource. Current rule is that if the number of workers this
|
| 489 |
+
# Dataloader will create is bigger than the number of logical cpus that is allowed to
|
| 490 |
+
# use, than we will pop up a warning to let user pay attention.
|
| 491 |
+
#
|
| 492 |
+
# eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2
|
| 493 |
+
# threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current
|
| 494 |
+
# DataLoader process can use half of them which is 32, then the rational max number of
|
| 495 |
+
# worker that initiated from this process is 32.
|
| 496 |
+
# Now, let's say the created DataLoader has num_works = 40, which is bigger than 32.
|
| 497 |
+
# So the warning message is triggered to notify the user to lower the worker number if
|
| 498 |
+
# necessary.
|
| 499 |
+
#
|
| 500 |
+
#
|
| 501 |
+
# [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is
|
| 502 |
+
# available (available in most of Linux system, but not OSX and Windows).
|
| 503 |
+
# When os.sched_getaffinity is not available, os.cpu_count() is called instead, but
|
| 504 |
+
# it doesn't repect cpuset.
|
| 505 |
+
# We don't take threading into account since each worker process is single threaded
|
| 506 |
+
# at this time.
|
| 507 |
+
#
|
| 508 |
+
# We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc)
|
| 509 |
+
# other than `torch.set_num_threads` to 1 in the worker process, if the passing
|
| 510 |
+
# in functions use 3rd party modules that rely on those threading flags to determine
|
| 511 |
+
# how many thread to create (eg. numpy, etc), then it is caller's responsibility to
|
| 512 |
+
# set those flags correctly.
|
| 513 |
+
def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
|
| 514 |
+
|
| 515 |
+
suggested_max_worker_msg = ((
|
| 516 |
+
"Our suggested max number of worker in current system is {}{}, which is smaller "
|
| 517 |
+
"than what this DataLoader is going to create.").format(
|
| 518 |
+
num_worker_suggest,
|
| 519 |
+
("" if cpuset_checked else " (`cpuset` is not taken into account)"))
|
| 520 |
+
) if num_worker_suggest is not None else (
|
| 521 |
+
"DataLoader is not able to compute a suggested max number of worker in current system.")
|
| 522 |
+
|
| 523 |
+
warn_msg = (
|
| 524 |
+
"This DataLoader will create {} worker processes in total. {} "
|
| 525 |
+
"Please be aware that excessive worker creation might get DataLoader running slow or even freeze, "
|
| 526 |
+
"lower the worker number to avoid potential slowness/freeze if necessary.").format(
|
| 527 |
+
num_worker_created,
|
| 528 |
+
suggested_max_worker_msg)
|
| 529 |
+
return warn_msg
|
| 530 |
+
|
| 531 |
+
if not self.num_workers or self.num_workers == 0:
|
| 532 |
+
return
|
| 533 |
+
|
| 534 |
+
# try to compute a suggested max number of worker based on system's resource
|
| 535 |
+
max_num_worker_suggest = None
|
| 536 |
+
cpuset_checked = False
|
| 537 |
+
if hasattr(os, 'sched_getaffinity'):
|
| 538 |
+
try:
|
| 539 |
+
max_num_worker_suggest = len(os.sched_getaffinity(0))
|
| 540 |
+
cpuset_checked = True
|
| 541 |
+
except Exception:
|
| 542 |
+
pass
|
| 543 |
+
if max_num_worker_suggest is None:
|
| 544 |
+
# os.cpu_count() could return Optional[int]
|
| 545 |
+
# get cpu count first and check None in order to satisfy mypy check
|
| 546 |
+
cpu_count = os.cpu_count()
|
| 547 |
+
if cpu_count is not None:
|
| 548 |
+
max_num_worker_suggest = cpu_count
|
| 549 |
+
|
| 550 |
+
if max_num_worker_suggest is None:
|
| 551 |
+
warnings.warn(_create_warning_msg(
|
| 552 |
+
max_num_worker_suggest,
|
| 553 |
+
self.num_workers,
|
| 554 |
+
cpuset_checked))
|
| 555 |
+
return
|
| 556 |
+
|
| 557 |
+
if self.num_workers > max_num_worker_suggest:
|
| 558 |
+
warnings.warn(_create_warning_msg(
|
| 559 |
+
max_num_worker_suggest,
|
| 560 |
+
self.num_workers,
|
| 561 |
+
cpuset_checked))
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
class _BaseDataLoaderIter:
|
| 565 |
+
def __init__(self, loader: DataLoader) -> None:
|
| 566 |
+
self._dataset = loader.dataset
|
| 567 |
+
self._shared_seed = None
|
| 568 |
+
self._pg = None
|
| 569 |
+
if isinstance(self._dataset, IterDataPipe):
|
| 570 |
+
if dist.is_available() and dist.is_initialized():
|
| 571 |
+
self._pg = dist.new_group(backend="gloo")
|
| 572 |
+
self._shared_seed = _share_dist_seed(loader.generator, self._pg)
|
| 573 |
+
shared_rng = torch.Generator()
|
| 574 |
+
shared_rng.manual_seed(self._shared_seed)
|
| 575 |
+
self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
|
| 576 |
+
self._dataset_kind = loader._dataset_kind
|
| 577 |
+
self._IterableDataset_len_called = loader._IterableDataset_len_called
|
| 578 |
+
self._auto_collation = loader._auto_collation
|
| 579 |
+
self._drop_last = loader.drop_last
|
| 580 |
+
self._index_sampler = loader._index_sampler
|
| 581 |
+
self._num_workers = loader.num_workers
|
| 582 |
+
ws, rank = _get_distributed_settings()
|
| 583 |
+
self._world_size = ws
|
| 584 |
+
self._rank = rank
|
| 585 |
+
# for other backends, pin_memory_device need to set. if not set
|
| 586 |
+
# default behaviour is CUDA device. if pin_memory_device is selected
|
| 587 |
+
# and pin_memory is not set, the default behaviour false.
|
| 588 |
+
if (len(loader.pin_memory_device) == 0):
|
| 589 |
+
self._pin_memory = loader.pin_memory and torch.cuda.is_available()
|
| 590 |
+
self._pin_memory_device = None
|
| 591 |
+
else:
|
| 592 |
+
if not loader.pin_memory:
|
| 593 |
+
warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used"
|
| 594 |
+
"please set pin_memory to true, if you need to use the device pin memory")
|
| 595 |
+
warnings.warn(warn_msg)
|
| 596 |
+
|
| 597 |
+
self._pin_memory = loader.pin_memory
|
| 598 |
+
self._pin_memory_device = loader.pin_memory_device
|
| 599 |
+
self._timeout = loader.timeout
|
| 600 |
+
self._collate_fn = loader.collate_fn
|
| 601 |
+
self._sampler_iter = iter(self._index_sampler)
|
| 602 |
+
self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
|
| 603 |
+
self._persistent_workers = loader.persistent_workers
|
| 604 |
+
self._num_yielded = 0
|
| 605 |
+
self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__"
|
| 606 |
+
|
| 607 |
+
def __iter__(self) -> '_BaseDataLoaderIter':
|
| 608 |
+
return self
|
| 609 |
+
|
| 610 |
+
def _reset(self, loader, first_iter=False):
|
| 611 |
+
self._sampler_iter = iter(self._index_sampler)
|
| 612 |
+
self._num_yielded = 0
|
| 613 |
+
self._IterableDataset_len_called = loader._IterableDataset_len_called
|
| 614 |
+
if isinstance(self._dataset, IterDataPipe):
|
| 615 |
+
self._shared_seed = _share_dist_seed(loader.generator, self._pg)
|
| 616 |
+
shared_rng = torch.Generator()
|
| 617 |
+
shared_rng.manual_seed(self._shared_seed)
|
| 618 |
+
self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
|
| 619 |
+
|
| 620 |
+
def _next_index(self):
|
| 621 |
+
return next(self._sampler_iter) # may raise StopIteration
|
| 622 |
+
|
| 623 |
+
def _next_data(self):
|
| 624 |
+
raise NotImplementedError
|
| 625 |
+
|
| 626 |
+
def __next__(self) -> Any:
|
| 627 |
+
with torch.autograd.profiler.record_function(self._profile_name):
|
| 628 |
+
if self._sampler_iter is None:
|
| 629 |
+
# TODO(https://github.com/pytorch/pytorch/issues/76750)
|
| 630 |
+
self._reset() # type: ignore[call-arg]
|
| 631 |
+
data = self._next_data()
|
| 632 |
+
self._num_yielded += 1
|
| 633 |
+
if self._dataset_kind == _DatasetKind.Iterable and \
|
| 634 |
+
self._IterableDataset_len_called is not None and \
|
| 635 |
+
self._num_yielded > self._IterableDataset_len_called:
|
| 636 |
+
warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} "
|
| 637 |
+
"samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called,
|
| 638 |
+
self._num_yielded)
|
| 639 |
+
if self._num_workers > 0:
|
| 640 |
+
warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the "
|
| 641 |
+
"IterableDataset replica at each worker. Please see "
|
| 642 |
+
"https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.")
|
| 643 |
+
warnings.warn(warn_msg)
|
| 644 |
+
return data
|
| 645 |
+
|
| 646 |
+
def __len__(self) -> int:
|
| 647 |
+
return len(self._index_sampler)
|
| 648 |
+
|
| 649 |
+
def __getstate__(self):
|
| 650 |
+
# TODO: add limited pickling support for sharing an iterator
|
| 651 |
+
# across multiple threads for HOGWILD.
|
| 652 |
+
# Probably the best way to do this is by moving the sample pushing
|
| 653 |
+
# to a separate thread and then just sharing the data queue
|
| 654 |
+
# but signalling the end is tricky without a non-blocking API
|
| 655 |
+
raise NotImplementedError("{} cannot be pickled", self.__class__.__name__)
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
class _SingleProcessDataLoaderIter(_BaseDataLoaderIter):
|
| 659 |
+
def __init__(self, loader):
|
| 660 |
+
super().__init__(loader)
|
| 661 |
+
assert self._timeout == 0
|
| 662 |
+
assert self._num_workers == 0
|
| 663 |
+
|
| 664 |
+
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
|
| 665 |
+
# Taking care of distributed sharding
|
| 666 |
+
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
|
| 667 |
+
# For BC, use default SHARDING_PRIORITIES
|
| 668 |
+
torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank)
|
| 669 |
+
|
| 670 |
+
self._dataset_fetcher = _DatasetKind.create_fetcher(
|
| 671 |
+
self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
|
| 672 |
+
|
| 673 |
+
def _next_data(self):
|
| 674 |
+
index = self._next_index() # may raise StopIteration
|
| 675 |
+
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
|
| 676 |
+
if self._pin_memory:
|
| 677 |
+
data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
|
| 678 |
+
return data
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
|
| 682 |
+
r"""Iterates once over the DataLoader's dataset, as specified by the sampler."""
|
| 683 |
+
|
| 684 |
+
# NOTE [ Data Loader Multiprocessing Shutdown Logic ]
|
| 685 |
+
#
|
| 686 |
+
# Preliminary:
|
| 687 |
+
#
|
| 688 |
+
# Our data model looks like this (queues are indicated with curly brackets):
|
| 689 |
+
#
|
| 690 |
+
# main process ||
|
| 691 |
+
# | ||
|
| 692 |
+
# {index_queue} ||
|
| 693 |
+
# | ||
|
| 694 |
+
# worker processes || DATA
|
| 695 |
+
# | ||
|
| 696 |
+
# {worker_result_queue} || FLOW
|
| 697 |
+
# | ||
|
| 698 |
+
# pin_memory_thread of main process || DIRECTION
|
| 699 |
+
# | ||
|
| 700 |
+
# {data_queue} ||
|
| 701 |
+
# | ||
|
| 702 |
+
# data output \/
|
| 703 |
+
#
|
| 704 |
+
# P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
|
| 705 |
+
# `pin_memory=False`.
|
| 706 |
+
#
|
| 707 |
+
#
|
| 708 |
+
# Terminating multiprocessing logic requires very careful design. In
|
| 709 |
+
# particular, we need to make sure that
|
| 710 |
+
#
|
| 711 |
+
# 1. The iterator gracefully exits the workers when its last reference is
|
| 712 |
+
# gone or it is depleted.
|
| 713 |
+
#
|
| 714 |
+
# In this case, the workers should be gracefully exited because the
|
| 715 |
+
# main process may still need to continue to run, and we want cleaning
|
| 716 |
+
# up code in the workers to be executed (e.g., releasing GPU memory).
|
| 717 |
+
# Naturally, we implement the shutdown logic in `__del__` of
|
| 718 |
+
# DataLoaderIterator.
|
| 719 |
+
#
|
| 720 |
+
# We delay the discussion on the logic in this case until later.
|
| 721 |
+
#
|
| 722 |
+
# 2. The iterator exits the workers when the loader process and/or worker
|
| 723 |
+
# processes exits normally or with error.
|
| 724 |
+
#
|
| 725 |
+
# We set all workers and `pin_memory_thread` to have `daemon=True`.
|
| 726 |
+
#
|
| 727 |
+
# You may ask, why can't we make the workers non-daemonic, and
|
| 728 |
+
# gracefully exit using the same logic as we have in `__del__` when the
|
| 729 |
+
# iterator gets deleted (see 1 above)?
|
| 730 |
+
#
|
| 731 |
+
# First of all, `__del__` is **not** guaranteed to be called when
|
| 732 |
+
# interpreter exits. Even if it is called, by the time it executes,
|
| 733 |
+
# many Python core library resources may already be freed, and even
|
| 734 |
+
# simple things like acquiring an internal lock of a queue may hang.
|
| 735 |
+
# Therefore, in this case, we actually need to prevent `__del__` from
|
| 736 |
+
# being executed, and rely on the automatic termination of daemonic
|
| 737 |
+
# children.
|
| 738 |
+
#
|
| 739 |
+
# Thus, we register an `atexit` hook that sets a global flag
|
| 740 |
+
# `_utils.python_exit_status`. Since `atexit` hooks are executed in the
|
| 741 |
+
# reverse order of registration, we are guaranteed that this flag is
|
| 742 |
+
# set before library resources we use are freed (which, at least in
|
| 743 |
+
# CPython, is done via an `atexit` handler defined in
|
| 744 |
+
# `multiprocessing/util.py`
|
| 745 |
+
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
|
| 746 |
+
# registered when an object requiring this mechanism is first
|
| 747 |
+
# created, e.g., `mp.Queue`
|
| 748 |
+
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
|
| 749 |
+
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
|
| 750 |
+
# )
|
| 751 |
+
#
|
| 752 |
+
# So in `__del__`, we check if `_utils.python_exit_status` is set or
|
| 753 |
+
# `None` (freed), and perform no-op if so.
|
| 754 |
+
#
|
| 755 |
+
# However, simply letting library clean-up codes run can also be bad,
|
| 756 |
+
# because such codes (i.e., `multiprocessing.util._exit_function()`)
|
| 757 |
+
# include join putting threads for `mp.Queue`, which can be blocking.
|
| 758 |
+
# Hence, the main process putting threads are called with
|
| 759 |
+
# `cancel_join_thread` at creation. See later section
|
| 760 |
+
# [ 3b. A process won't hang when putting into a queue; ]
|
| 761 |
+
# for more details.
|
| 762 |
+
#
|
| 763 |
+
# Here are two example cases where library clean-up codes can run
|
| 764 |
+
# before `__del__` is called:
|
| 765 |
+
#
|
| 766 |
+
# 1. If we hold onto a reference to the iterator, it more often
|
| 767 |
+
# than not tries to do `multiprocessing` library cleaning before
|
| 768 |
+
# clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
|
| 769 |
+
# and thus prevents our cleaning-up code to run first.
|
| 770 |
+
#
|
| 771 |
+
# 2. A similar issue araises when a `DataLoader` is used in a subprocess.
|
| 772 |
+
# When a process ends, it shuts the all its daemonic children
|
| 773 |
+
# down with a SIGTERM (instead of joining them without a timeout).
|
| 774 |
+
# Simiarly for threads, but by a different mechanism. This fact,
|
| 775 |
+
# together with a few implementation details of multiprocessing, forces
|
| 776 |
+
# us to make workers daemonic. All of our problems arise when a
|
| 777 |
+
# DataLoader is used in a subprocess, and are caused by multiprocessing
|
| 778 |
+
# code which looks more or less like this:
|
| 779 |
+
#
|
| 780 |
+
# try:
|
| 781 |
+
# your_function_using_a_dataloader()
|
| 782 |
+
# finally:
|
| 783 |
+
# multiprocessing.util._exit_function()
|
| 784 |
+
#
|
| 785 |
+
# The joining/termination mentioned above happens inside
|
| 786 |
+
# `_exit_function()`. Now, if `your_function_using_a_dataloader()`
|
| 787 |
+
# throws, the stack trace stored in the exception will prevent the
|
| 788 |
+
# frame which uses `DataLoaderIter` to be freed. If the frame has any
|
| 789 |
+
# reference to the `DataLoaderIter` (e.g., in a method of the iter),
|
| 790 |
+
# its `__del__`, which starts the shutdown procedure, will not be
|
| 791 |
+
# called. That, in turn, means that workers aren't notified. Attempting
|
| 792 |
+
# to join in `_exit_function` will then result in a hang.
|
| 793 |
+
#
|
| 794 |
+
# For context, `_exit_function` is also registered as an `atexit` call.
|
| 795 |
+
# So it is unclear to me (@ssnl) why this is needed in a finally block.
|
| 796 |
+
# The code dates back to 2008 and there is no comment on the original
|
| 797 |
+
# PEP 371 or patch https://bugs.python.org/issue3050 (containing both
|
| 798 |
+
# the finally block and the `atexit` registration) that explains this.
|
| 799 |
+
#
|
| 800 |
+
#
|
| 801 |
+
# Finally, another choice is to just shutdown workers with logic in 1
|
| 802 |
+
# above whenever we see an error in `next`. This isn't ideal because
|
| 803 |
+
# a. It prevents users from using try-catch to resume data loading.
|
| 804 |
+
# b. It doesn't prevent hanging if users have references to the
|
| 805 |
+
# iterator.
|
| 806 |
+
#
|
| 807 |
+
# 3. All processes exit if any of them die unexpectedly by fatal signals.
|
| 808 |
+
#
|
| 809 |
+
# As shown above, the workers are set as daemonic children of the main
|
| 810 |
+
# process. However, automatic cleaning-up of such child processes only
|
| 811 |
+
# happens if the parent process exits gracefully (e.g., not via fatal
|
| 812 |
+
# signals like SIGKILL). So we must ensure that each process will exit
|
| 813 |
+
# even the process that should send/receive data to/from it were
|
| 814 |
+
# killed, i.e.,
|
| 815 |
+
#
|
| 816 |
+
# a. A process won't hang when getting from a queue.
|
| 817 |
+
#
|
| 818 |
+
# Even with carefully designed data dependencies (i.e., a `put()`
|
| 819 |
+
# always corresponding to a `get()`), hanging on `get()` can still
|
| 820 |
+
# happen when data in queue is corrupted (e.g., due to
|
| 821 |
+
# `cancel_join_thread` or unexpected exit).
|
| 822 |
+
#
|
| 823 |
+
# For child exit, we set a timeout whenever we try to get data
|
| 824 |
+
# from `data_queue`, and check the workers' status on each timeout
|
| 825 |
+
# and error.
|
| 826 |
+
# See `_DataLoaderiter._get_batch()` and
|
| 827 |
+
# `_DataLoaderiter._try_get_data()` for details.
|
| 828 |
+
#
|
| 829 |
+
# Additionally, for child exit on non-Windows platforms, we also
|
| 830 |
+
# register a SIGCHLD handler (which is supported on Windows) on
|
| 831 |
+
# the main process, which checks if any of the workers fail in the
|
| 832 |
+
# (Python) handler. This is more efficient and faster in detecting
|
| 833 |
+
# worker failures, compared to only using the above mechanism.
|
| 834 |
+
# See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
|
| 835 |
+
#
|
| 836 |
+
# For `.get()` calls where the sender(s) is not the workers, we
|
| 837 |
+
# guard them with timeouts, and check the status of the sender
|
| 838 |
+
# when timeout happens:
|
| 839 |
+
# + in the workers, the `_utils.worker.ManagerWatchdog` class
|
| 840 |
+
# checks the status of the main process.
|
| 841 |
+
# + if `pin_memory=True`, when getting from `pin_memory_thread`,
|
| 842 |
+
# check `pin_memory_thread` status periodically until `.get()`
|
| 843 |
+
# returns or see that `pin_memory_thread` died.
|
| 844 |
+
#
|
| 845 |
+
# b. A process won't hang when putting into a queue;
|
| 846 |
+
#
|
| 847 |
+
# We use `mp.Queue` which has a separate background thread to put
|
| 848 |
+
# objects from an unbounded buffer array. The background thread is
|
| 849 |
+
# daemonic and usually automatically joined when the process
|
| 850 |
+
# *exits*.
|
| 851 |
+
#
|
| 852 |
+
# In case that the receiver has ended abruptly while
|
| 853 |
+
# reading from the pipe, the join will hang forever. The usual
|
| 854 |
+
# solution for this in Python is calling `q.cancel_join_thread`,
|
| 855 |
+
# which prevents automatically joining it when finalizing
|
| 856 |
+
# (exiting).
|
| 857 |
+
#
|
| 858 |
+
# Nonetheless, `cancel_join_thread` must only be called when the
|
| 859 |
+
# queue is **not** going to be read from or write into by another
|
| 860 |
+
# process, because it may hold onto a lock or leave corrupted data
|
| 861 |
+
# in the queue, leading other readers/writers to hang.
|
| 862 |
+
#
|
| 863 |
+
# Hence,
|
| 864 |
+
# + For worker processes, we only do so (for their output
|
| 865 |
+
# queues, i.e., `worker_result_queue`) before exiting.
|
| 866 |
+
# + For `pin_memory_thread`, its output queue `data_queue` is a
|
| 867 |
+
# `queue.Queue` that does blocking `put` if the queue is full.
|
| 868 |
+
# So there is no above problem, but as a result, in
|
| 869 |
+
# `_pin_memory_loop`, we do need to wrap the `put` in a loop
|
| 870 |
+
# that breaks not only upon success, but also when the main
|
| 871 |
+
# process stops reading, i.e., is shutting down.
|
| 872 |
+
# + For loader process, we `cancel_join_thread()` for all
|
| 873 |
+
# `_index_queues` because the whole purpose of workers and
|
| 874 |
+
# `pin_memory_thread` is to serve the loader process. If
|
| 875 |
+
# loader process is already exiting, we don't really care if
|
| 876 |
+
# the queues are corrupted.
|
| 877 |
+
#
|
| 878 |
+
#
|
| 879 |
+
# Now let's get back to 1:
|
| 880 |
+
# how we gracefully exit the workers when the last reference to the
|
| 881 |
+
# iterator is gone.
|
| 882 |
+
#
|
| 883 |
+
# To achieve this, we implement the following logic along with the design
|
| 884 |
+
# choices mentioned above:
|
| 885 |
+
#
|
| 886 |
+
# `workers_done_event`:
|
| 887 |
+
# A `multiprocessing.Event` shared among the main process and all worker
|
| 888 |
+
# processes. This is used to signal the workers that the iterator is
|
| 889 |
+
# shutting down. After it is set, they will not send processed data to
|
| 890 |
+
# queues anymore, and only wait for the final `None` before exiting.
|
| 891 |
+
# `done_event` isn't strictly needed. I.e., we can just check for `None`
|
| 892 |
+
# from the input queue, but it allows us to skip wasting resources
|
| 893 |
+
# processing data if we are already shutting down.
|
| 894 |
+
#
|
| 895 |
+
# `pin_memory_thread_done_event`:
|
| 896 |
+
# A `threading.Event` for a similar purpose to that of
|
| 897 |
+
# `workers_done_event`, but is for the `pin_memory_thread`. The reason
|
| 898 |
+
# that separate events are needed is that `pin_memory_thread` reads from
|
| 899 |
+
# the output queue of the workers. But the workers, upon seeing that
|
| 900 |
+
# `workers_done_event` is set, only wants to see the final `None`, and is
|
| 901 |
+
# not required to flush all data in the output queue (e.g., it may call
|
| 902 |
+
# `cancel_join_thread` on that queue if its `IterableDataset` iterator
|
| 903 |
+
# happens to exhaust coincidentally, which is out of the control of the
|
| 904 |
+
# main process). Thus, since we will exit `pin_memory_thread` before the
|
| 905 |
+
# workers (see below), two separete events are used.
|
| 906 |
+
#
|
| 907 |
+
# NOTE: In short, the protocol is that the main process will set these
|
| 908 |
+
# `done_event`s and then the corresponding processes/threads a `None`,
|
| 909 |
+
# and that they may exit at any time after receiving the `None`.
|
| 910 |
+
#
|
| 911 |
+
# NOTE: Using `None` as the final signal is valid, since normal data will
|
| 912 |
+
# always be a 2-tuple with the 1st element being the index of the data
|
| 913 |
+
# transferred (different from dataset index/key), and the 2nd being
|
| 914 |
+
# either the dataset key or the data sample (depending on which part
|
| 915 |
+
# of the data model the queue is at).
|
| 916 |
+
#
|
| 917 |
+
# [ worker processes ]
|
| 918 |
+
# While loader process is alive:
|
| 919 |
+
# Get from `index_queue`.
|
| 920 |
+
# If get anything else,
|
| 921 |
+
# Check `workers_done_event`.
|
| 922 |
+
# If set, continue to next iteration
|
| 923 |
+
# i.e., keep getting until see the `None`, then exit.
|
| 924 |
+
# Otherwise, process data:
|
| 925 |
+
# If is fetching from an `IterableDataset` and the iterator
|
| 926 |
+
# is exhausted, send an `_IterableDatasetStopIteration`
|
| 927 |
+
# object to signal iteration end. The main process, upon
|
| 928 |
+
# receiving such an object, will send `None` to this
|
| 929 |
+
# worker and not use the corresponding `index_queue`
|
| 930 |
+
# anymore.
|
| 931 |
+
# If timed out,
|
| 932 |
+
# No matter `workers_done_event` is set (still need to see `None`)
|
| 933 |
+
# or not, must continue to next iteration.
|
| 934 |
+
# (outside loop)
|
| 935 |
+
# If `workers_done_event` is set, (this can be False with `IterableDataset`)
|
| 936 |
+
# `data_queue.cancel_join_thread()`. (Everything is ending here:
|
| 937 |
+
# main process won't read from it;
|
| 938 |
+
# other workers will also call
|
| 939 |
+
# `cancel_join_thread`.)
|
| 940 |
+
#
|
| 941 |
+
# [ pin_memory_thread ]
|
| 942 |
+
# # No need to check main thread. If this thread is alive, the main loader
|
| 943 |
+
# # thread must be alive, because this thread is set as daemonic.
|
| 944 |
+
# While `pin_memory_thread_done_event` is not set:
|
| 945 |
+
# Get from `worker_result_queue`.
|
| 946 |
+
# If timed out, continue to get in the next iteration.
|
| 947 |
+
# Otherwise, process data.
|
| 948 |
+
# While `pin_memory_thread_done_event` is not set:
|
| 949 |
+
# Put processed data to `data_queue` (a `queue.Queue` with blocking put)
|
| 950 |
+
# If timed out, continue to put in the next iteration.
|
| 951 |
+
# Otherwise, break, i.e., continuing to the out loop.
|
| 952 |
+
#
|
| 953 |
+
# NOTE: we don't check the status of the main thread because
|
| 954 |
+
# 1. if the process is killed by fatal signal, `pin_memory_thread`
|
| 955 |
+
# ends.
|
| 956 |
+
# 2. in other cases, either the cleaning-up in __del__ or the
|
| 957 |
+
# automatic exit of daemonic thread will take care of it.
|
| 958 |
+
# This won't busy-wait either because `.get(timeout)` does not
|
| 959 |
+
# busy-wait.
|
| 960 |
+
#
|
| 961 |
+
# [ main process ]
|
| 962 |
+
# In the DataLoader Iter's `__del__`
|
| 963 |
+
# b. Exit `pin_memory_thread`
|
| 964 |
+
# i. Set `pin_memory_thread_done_event`.
|
| 965 |
+
# ii Put `None` in `worker_result_queue`.
|
| 966 |
+
# iii. Join the `pin_memory_thread`.
|
| 967 |
+
# iv. `worker_result_queue.cancel_join_thread()`.
|
| 968 |
+
#
|
| 969 |
+
# c. Exit the workers.
|
| 970 |
+
# i. Set `workers_done_event`.
|
| 971 |
+
# ii. Put `None` in each worker's `index_queue`.
|
| 972 |
+
# iii. Join the workers.
|
| 973 |
+
# iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
|
| 974 |
+
#
|
| 975 |
+
# NOTE: (c) is better placed after (b) because it may leave corrupted
|
| 976 |
+
# data in `worker_result_queue`, which `pin_memory_thread`
|
| 977 |
+
# reads from, in which case the `pin_memory_thread` can only
|
| 978 |
+
# happen at timing out, which is slow. Nonetheless, same thing
|
| 979 |
+
# happens if a worker is killed by signal at unfortunate times,
|
| 980 |
+
# but in other cases, we are better off having a non-corrupted
|
| 981 |
+
# `worker_result_queue` for `pin_memory_thread`.
|
| 982 |
+
#
|
| 983 |
+
# NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
|
| 984 |
+
# can be omitted
|
| 985 |
+
#
|
| 986 |
+
# NB: `done_event`s isn't strictly needed. E.g., we can just check for
|
| 987 |
+
# `None` from `index_queue`, but it allows us to skip wasting resources
|
| 988 |
+
# processing indices already in `index_queue` if we are already shutting
|
| 989 |
+
# down.
|
| 990 |
+
|
| 991 |
+
def __init__(self, loader):
|
| 992 |
+
super().__init__(loader)
|
| 993 |
+
|
| 994 |
+
self._prefetch_factor = loader.prefetch_factor
|
| 995 |
+
|
| 996 |
+
assert self._num_workers > 0
|
| 997 |
+
assert self._prefetch_factor > 0
|
| 998 |
+
|
| 999 |
+
if loader.multiprocessing_context is None:
|
| 1000 |
+
multiprocessing_context = multiprocessing
|
| 1001 |
+
else:
|
| 1002 |
+
multiprocessing_context = loader.multiprocessing_context
|
| 1003 |
+
|
| 1004 |
+
self._worker_init_fn = loader.worker_init_fn
|
| 1005 |
+
|
| 1006 |
+
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
|
| 1007 |
+
# Additional worker init function will take care of sharding in MP and Distributed
|
| 1008 |
+
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
|
| 1009 |
+
self._worker_init_fn = functools.partial(
|
| 1010 |
+
_sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank)
|
| 1011 |
+
|
| 1012 |
+
# No certainty which module multiprocessing_context is
|
| 1013 |
+
self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
|
| 1014 |
+
self._worker_pids_set = False
|
| 1015 |
+
self._shutdown = False
|
| 1016 |
+
self._workers_done_event = multiprocessing_context.Event()
|
| 1017 |
+
|
| 1018 |
+
self._index_queues = []
|
| 1019 |
+
self._workers = []
|
| 1020 |
+
for i in range(self._num_workers):
|
| 1021 |
+
# No certainty which module multiprocessing_context is
|
| 1022 |
+
index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
|
| 1023 |
+
# Need to `cancel_join_thread` here!
|
| 1024 |
+
# See sections (2) and (3b) above.
|
| 1025 |
+
index_queue.cancel_join_thread()
|
| 1026 |
+
w = multiprocessing_context.Process(
|
| 1027 |
+
target=_utils.worker._worker_loop,
|
| 1028 |
+
args=(self._dataset_kind, self._dataset, index_queue,
|
| 1029 |
+
self._worker_result_queue, self._workers_done_event,
|
| 1030 |
+
self._auto_collation, self._collate_fn, self._drop_last,
|
| 1031 |
+
self._base_seed, self._worker_init_fn, i, self._num_workers,
|
| 1032 |
+
self._persistent_workers, self._shared_seed))
|
| 1033 |
+
w.daemon = True
|
| 1034 |
+
# NB: Process.start() actually take some time as it needs to
|
| 1035 |
+
# start a process and pass the arguments over via a pipe.
|
| 1036 |
+
# Therefore, we only add a worker to self._workers list after
|
| 1037 |
+
# it started, so that we do not call .join() if program dies
|
| 1038 |
+
# before it starts, and __del__ tries to join but will get:
|
| 1039 |
+
# AssertionError: can only join a started process.
|
| 1040 |
+
w.start()
|
| 1041 |
+
self._index_queues.append(index_queue)
|
| 1042 |
+
self._workers.append(w)
|
| 1043 |
+
|
| 1044 |
+
if self._pin_memory:
|
| 1045 |
+
self._pin_memory_thread_done_event = threading.Event()
|
| 1046 |
+
|
| 1047 |
+
# Queue is not type-annotated
|
| 1048 |
+
self._data_queue = queue.Queue() # type: ignore[var-annotated]
|
| 1049 |
+
if self._pin_memory_device == "xpu":
|
| 1050 |
+
current_device = torch.xpu.current_device() # type: ignore[attr-defined]
|
| 1051 |
+
elif self._pin_memory_device == torch._C._get_privateuse1_backend_name():
|
| 1052 |
+
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
|
| 1053 |
+
current_device = custom_device_mod.current_device()
|
| 1054 |
+
else:
|
| 1055 |
+
current_device = torch.cuda.current_device() # choose cuda for default
|
| 1056 |
+
pin_memory_thread = threading.Thread(
|
| 1057 |
+
target=_utils.pin_memory._pin_memory_loop,
|
| 1058 |
+
args=(self._worker_result_queue, self._data_queue,
|
| 1059 |
+
current_device,
|
| 1060 |
+
self._pin_memory_thread_done_event, self._pin_memory_device))
|
| 1061 |
+
pin_memory_thread.daemon = True
|
| 1062 |
+
pin_memory_thread.start()
|
| 1063 |
+
# Similar to workers (see comment above), we only register
|
| 1064 |
+
# pin_memory_thread once it is started.
|
| 1065 |
+
self._pin_memory_thread = pin_memory_thread
|
| 1066 |
+
else:
|
| 1067 |
+
self._data_queue = self._worker_result_queue # type: ignore[assignment]
|
| 1068 |
+
|
| 1069 |
+
# In some rare cases, persistent workers (daemonic processes)
|
| 1070 |
+
# would be terminated before `__del__` of iterator is invoked
|
| 1071 |
+
# when main process exits
|
| 1072 |
+
# It would cause failure when pin_memory_thread tries to read
|
| 1073 |
+
# corrupted data from worker_result_queue
|
| 1074 |
+
# atexit is used to shutdown thread and child processes in the
|
| 1075 |
+
# right sequence before main process exits
|
| 1076 |
+
if self._persistent_workers and self._pin_memory:
|
| 1077 |
+
import atexit
|
| 1078 |
+
for w in self._workers:
|
| 1079 |
+
atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w)
|
| 1080 |
+
|
| 1081 |
+
# .pid can be None only before process is spawned (not the case, so ignore)
|
| 1082 |
+
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc]
|
| 1083 |
+
_utils.signal_handling._set_SIGCHLD_handler()
|
| 1084 |
+
self._worker_pids_set = True
|
| 1085 |
+
self._reset(loader, first_iter=True)
|
| 1086 |
+
|
| 1087 |
+
def _reset(self, loader, first_iter=False):
|
| 1088 |
+
super()._reset(loader, first_iter)
|
| 1089 |
+
self._send_idx = 0 # idx of the next task to be sent to workers
|
| 1090 |
+
self._rcvd_idx = 0 # idx of the next task to be returned in __next__
|
| 1091 |
+
# information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
|
| 1092 |
+
# map: task idx => - (worker_id,) if data isn't fetched (outstanding)
|
| 1093 |
+
# \ (worker_id, data) if data is already fetched (out-of-order)
|
| 1094 |
+
self._task_info = {}
|
| 1095 |
+
self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1)
|
| 1096 |
+
# A list of booleans representing whether each worker still has work to
|
| 1097 |
+
# do, i.e., not having exhausted its iterable dataset object. It always
|
| 1098 |
+
# contains all `True`s if not using an iterable-style dataset
|
| 1099 |
+
# (i.e., if kind != Iterable).
|
| 1100 |
+
# Not that this indicates that a worker still has work to do *for this epoch*.
|
| 1101 |
+
# It does not mean that a worker is dead. In case of `_persistent_workers`,
|
| 1102 |
+
# the worker will be reset to available in the next epoch.
|
| 1103 |
+
self._workers_status = [True for i in range(self._num_workers)]
|
| 1104 |
+
# Reset the worker queue cycle so it resumes next epoch at worker 0
|
| 1105 |
+
self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
|
| 1106 |
+
# We resume the prefetching in case it was enabled
|
| 1107 |
+
if not first_iter:
|
| 1108 |
+
for idx in range(self._num_workers):
|
| 1109 |
+
self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed))
|
| 1110 |
+
resume_iteration_cnt = self._num_workers
|
| 1111 |
+
while resume_iteration_cnt > 0:
|
| 1112 |
+
return_idx, return_data = self._get_data()
|
| 1113 |
+
if isinstance(return_idx, _utils.worker._ResumeIteration):
|
| 1114 |
+
assert return_data is None
|
| 1115 |
+
resume_iteration_cnt -= 1
|
| 1116 |
+
# prime the prefetch loop
|
| 1117 |
+
for _ in range(self._prefetch_factor * self._num_workers):
|
| 1118 |
+
self._try_put_index()
|
| 1119 |
+
|
| 1120 |
+
def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
|
| 1121 |
+
# Tries to fetch data from `self._data_queue` once for a given timeout.
|
| 1122 |
+
# This can also be used as inner loop of fetching without timeout, with
|
| 1123 |
+
# the sender status as the loop condition.
|
| 1124 |
+
#
|
| 1125 |
+
# This raises a `RuntimeError` if any worker died expectedly. This error
|
| 1126 |
+
# can come from either the SIGCHLD handler in `_utils/signal_handling.py`
|
| 1127 |
+
# (only for non-Windows platforms), or the manual check below on errors
|
| 1128 |
+
# and timeouts.
|
| 1129 |
+
#
|
| 1130 |
+
# Returns a 2-tuple:
|
| 1131 |
+
# (bool: whether successfully get data, any: data if successful else None)
|
| 1132 |
+
try:
|
| 1133 |
+
data = self._data_queue.get(timeout=timeout)
|
| 1134 |
+
return (True, data)
|
| 1135 |
+
except Exception as e:
|
| 1136 |
+
# At timeout and error, we manually check whether any worker has
|
| 1137 |
+
# failed. Note that this is the only mechanism for Windows to detect
|
| 1138 |
+
# worker failures.
|
| 1139 |
+
failed_workers = []
|
| 1140 |
+
for worker_id, w in enumerate(self._workers):
|
| 1141 |
+
if self._workers_status[worker_id] and not w.is_alive():
|
| 1142 |
+
failed_workers.append(w)
|
| 1143 |
+
self._mark_worker_as_unavailable(worker_id)
|
| 1144 |
+
if len(failed_workers) > 0:
|
| 1145 |
+
pids_str = ', '.join(str(w.pid) for w in failed_workers)
|
| 1146 |
+
raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e
|
| 1147 |
+
if isinstance(e, queue.Empty):
|
| 1148 |
+
return (False, None)
|
| 1149 |
+
import tempfile
|
| 1150 |
+
import errno
|
| 1151 |
+
try:
|
| 1152 |
+
# Raise an exception if we are this close to the FDs limit.
|
| 1153 |
+
# Apparently, trying to open only one file is not a sufficient
|
| 1154 |
+
# test.
|
| 1155 |
+
# See NOTE [ DataLoader on Linux and open files limit ]
|
| 1156 |
+
fds_limit_margin = 10
|
| 1157 |
+
fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
|
| 1158 |
+
except OSError as e:
|
| 1159 |
+
if e.errno == errno.EMFILE:
|
| 1160 |
+
raise RuntimeError(
|
| 1161 |
+
"Too many open files. Communication with the"
|
| 1162 |
+
" workers is no longer possible. Please increase the"
|
| 1163 |
+
" limit using `ulimit -n` in the shell or change the"
|
| 1164 |
+
" sharing strategy by calling"
|
| 1165 |
+
" `torch.multiprocessing.set_sharing_strategy('file_system')`"
|
| 1166 |
+
" at the beginning of your code") from None
|
| 1167 |
+
raise
|
| 1168 |
+
|
| 1169 |
+
# NOTE [ DataLoader on Linux and open files limit ]
|
| 1170 |
+
#
|
| 1171 |
+
# On Linux when DataLoader is used with multiprocessing we pass the data between
|
| 1172 |
+
# the root process and the workers through SHM files. We remove those files from
|
| 1173 |
+
# the filesystem as soon as they are created and keep them alive by
|
| 1174 |
+
# passing around their file descriptors through AF_UNIX sockets. (See
|
| 1175 |
+
# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in
|
| 1176 |
+
# the wiki (https://github.com/pytorch/pytorch/wiki).)
|
| 1177 |
+
#
|
| 1178 |
+
# This sometimes leads us to exceeding the open files limit. When that happens,
|
| 1179 |
+
# and the offending file descriptor is coming over a socket, the `socket` Python
|
| 1180 |
+
# package silently strips the file descriptor from the message, setting only the
|
| 1181 |
+
# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that
|
| 1182 |
+
# it _indicates that some control data were discarded due to lack of space in
|
| 1183 |
+
# the buffer for ancillary data_). This might reflect the C implementation of
|
| 1184 |
+
# AF_UNIX sockets.
|
| 1185 |
+
#
|
| 1186 |
+
# This behaviour can be reproduced with the script and instructions at the
|
| 1187 |
+
# bottom of this note.
|
| 1188 |
+
#
|
| 1189 |
+
# When that happens, the standard Python `multiprocessing` (and not
|
| 1190 |
+
# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`
|
| 1191 |
+
#
|
| 1192 |
+
# Sometimes, instead of the FD being stripped, you may get an `OSError:
|
| 1193 |
+
# Too many open files`, both in the script below and in DataLoader. However,
|
| 1194 |
+
# this is rare and seems to be nondeterministic.
|
| 1195 |
+
#
|
| 1196 |
+
#
|
| 1197 |
+
# #!/usr/bin/env python3
|
| 1198 |
+
# import sys
|
| 1199 |
+
# import socket
|
| 1200 |
+
# import os
|
| 1201 |
+
# import array
|
| 1202 |
+
# import shutil
|
| 1203 |
+
# import socket
|
| 1204 |
+
#
|
| 1205 |
+
#
|
| 1206 |
+
# if len(sys.argv) != 4:
|
| 1207 |
+
# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)")
|
| 1208 |
+
# sys.exit(1)
|
| 1209 |
+
#
|
| 1210 |
+
# if __name__ == '__main__':
|
| 1211 |
+
# dirname = sys.argv[1]
|
| 1212 |
+
# sock_path = dirname + "/sock"
|
| 1213 |
+
# iterations = int(sys.argv[2])
|
| 1214 |
+
# def dummy_path(i):
|
| 1215 |
+
# return dirname + "/" + str(i) + ".dummy"
|
| 1216 |
+
#
|
| 1217 |
+
#
|
| 1218 |
+
# if sys.argv[3] == 'send':
|
| 1219 |
+
# while not os.path.exists(sock_path):
|
| 1220 |
+
# pass
|
| 1221 |
+
# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
| 1222 |
+
# client.connect(sock_path)
|
| 1223 |
+
# for i in range(iterations):
|
| 1224 |
+
# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)
|
| 1225 |
+
# ancdata = array.array('i', [fd])
|
| 1226 |
+
# msg = bytes([i % 256])
|
| 1227 |
+
# print("Sending fd ", fd, " (iteration #", i, ")")
|
| 1228 |
+
# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])
|
| 1229 |
+
#
|
| 1230 |
+
#
|
| 1231 |
+
# else:
|
| 1232 |
+
# assert sys.argv[3] == 'recv'
|
| 1233 |
+
#
|
| 1234 |
+
# if os.path.exists(dirname):
|
| 1235 |
+
# raise Exception("Directory exists")
|
| 1236 |
+
#
|
| 1237 |
+
# os.mkdir(dirname)
|
| 1238 |
+
#
|
| 1239 |
+
# print("Opening socket...")
|
| 1240 |
+
# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
| 1241 |
+
# server.bind(sock_path)
|
| 1242 |
+
#
|
| 1243 |
+
# print("Listening...")
|
| 1244 |
+
# for i in range(iterations):
|
| 1245 |
+
# a = array.array('i')
|
| 1246 |
+
# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))
|
| 1247 |
+
# assert(len(ancdata) == 1)
|
| 1248 |
+
# cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
| 1249 |
+
# a.frombytes(cmsg_data)
|
| 1250 |
+
# print("Received fd ", a[0], " (iteration #", i, ")")
|
| 1251 |
+
#
|
| 1252 |
+
# shutil.rmtree(dirname)
|
| 1253 |
+
#
|
| 1254 |
+
# Steps to reproduce:
|
| 1255 |
+
#
|
| 1256 |
+
# 1. Run two shells and set lower file descriptor limit in the receiving one:
|
| 1257 |
+
# (shell1) ulimit -n 1020
|
| 1258 |
+
# (shell2) ulimit -n 1022
|
| 1259 |
+
#
|
| 1260 |
+
# 2. Run the script above with the `recv` option in the first shell
|
| 1261 |
+
# (shell1) ./test_socket.py sock_tmp 1017 recv
|
| 1262 |
+
#
|
| 1263 |
+
# 3. Run the script with the `send` option in the second shell:
|
| 1264 |
+
# (shell2) ./test_socket.py sock_tmp 1017 send
|
| 1265 |
+
|
| 1266 |
+
def _get_data(self):
|
| 1267 |
+
# Fetches data from `self._data_queue`.
|
| 1268 |
+
#
|
| 1269 |
+
# We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
|
| 1270 |
+
# which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`
|
| 1271 |
+
# in a loop. This is the only mechanism to detect worker failures for
|
| 1272 |
+
# Windows. For other platforms, a SIGCHLD handler is also used for
|
| 1273 |
+
# worker failure detection.
|
| 1274 |
+
#
|
| 1275 |
+
# If `pin_memory=True`, we also need check if `pin_memory_thread` had
|
| 1276 |
+
# died at timeouts.
|
| 1277 |
+
if self._timeout > 0:
|
| 1278 |
+
success, data = self._try_get_data(self._timeout)
|
| 1279 |
+
if success:
|
| 1280 |
+
return data
|
| 1281 |
+
else:
|
| 1282 |
+
raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds')
|
| 1283 |
+
elif self._pin_memory:
|
| 1284 |
+
while self._pin_memory_thread.is_alive():
|
| 1285 |
+
success, data = self._try_get_data()
|
| 1286 |
+
if success:
|
| 1287 |
+
return data
|
| 1288 |
+
else:
|
| 1289 |
+
# while condition is false, i.e., pin_memory_thread died.
|
| 1290 |
+
raise RuntimeError('Pin memory thread exited unexpectedly')
|
| 1291 |
+
# In this case, `self._data_queue` is a `queue.Queue`,. But we don't
|
| 1292 |
+
# need to call `.task_done()` because we don't use `.join()`.
|
| 1293 |
+
else:
|
| 1294 |
+
while True:
|
| 1295 |
+
success, data = self._try_get_data()
|
| 1296 |
+
if success:
|
| 1297 |
+
return data
|
| 1298 |
+
|
| 1299 |
+
def _next_data(self):
|
| 1300 |
+
while True:
|
| 1301 |
+
# If the worker responsible for `self._rcvd_idx` has already ended
|
| 1302 |
+
# and was unable to fulfill this task (due to exhausting an `IterableDataset`),
|
| 1303 |
+
# we try to advance `self._rcvd_idx` to find the next valid index.
|
| 1304 |
+
#
|
| 1305 |
+
# This part needs to run in the loop because both the `self._get_data()`
|
| 1306 |
+
# call and `_IterableDatasetStopIteration` check below can mark
|
| 1307 |
+
# extra worker(s) as dead.
|
| 1308 |
+
while self._rcvd_idx < self._send_idx:
|
| 1309 |
+
info = self._task_info[self._rcvd_idx]
|
| 1310 |
+
worker_id = info[0]
|
| 1311 |
+
if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active
|
| 1312 |
+
break
|
| 1313 |
+
del self._task_info[self._rcvd_idx]
|
| 1314 |
+
self._rcvd_idx += 1
|
| 1315 |
+
else:
|
| 1316 |
+
# no valid `self._rcvd_idx` is found (i.e., didn't break)
|
| 1317 |
+
if not self._persistent_workers:
|
| 1318 |
+
self._shutdown_workers()
|
| 1319 |
+
raise StopIteration
|
| 1320 |
+
|
| 1321 |
+
# Now `self._rcvd_idx` is the batch index we want to fetch
|
| 1322 |
+
|
| 1323 |
+
# Check if the next sample has already been generated
|
| 1324 |
+
if len(self._task_info[self._rcvd_idx]) == 2:
|
| 1325 |
+
data = self._task_info.pop(self._rcvd_idx)[1]
|
| 1326 |
+
return self._process_data(data)
|
| 1327 |
+
|
| 1328 |
+
assert not self._shutdown and self._tasks_outstanding > 0
|
| 1329 |
+
idx, data = self._get_data()
|
| 1330 |
+
self._tasks_outstanding -= 1
|
| 1331 |
+
if self._dataset_kind == _DatasetKind.Iterable:
|
| 1332 |
+
# Check for _IterableDatasetStopIteration
|
| 1333 |
+
if isinstance(data, _utils.worker._IterableDatasetStopIteration):
|
| 1334 |
+
if self._persistent_workers:
|
| 1335 |
+
self._workers_status[data.worker_id] = False
|
| 1336 |
+
else:
|
| 1337 |
+
self._mark_worker_as_unavailable(data.worker_id)
|
| 1338 |
+
self._try_put_index()
|
| 1339 |
+
continue
|
| 1340 |
+
|
| 1341 |
+
if idx != self._rcvd_idx:
|
| 1342 |
+
# store out-of-order samples
|
| 1343 |
+
self._task_info[idx] += (data,)
|
| 1344 |
+
else:
|
| 1345 |
+
del self._task_info[idx]
|
| 1346 |
+
return self._process_data(data)
|
| 1347 |
+
|
| 1348 |
+
def _try_put_index(self):
|
| 1349 |
+
assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
|
| 1350 |
+
|
| 1351 |
+
try:
|
| 1352 |
+
index = self._next_index()
|
| 1353 |
+
except StopIteration:
|
| 1354 |
+
return
|
| 1355 |
+
for _ in range(self._num_workers): # find the next active worker, if any
|
| 1356 |
+
worker_queue_idx = next(self._worker_queue_idx_cycle)
|
| 1357 |
+
if self._workers_status[worker_queue_idx]:
|
| 1358 |
+
break
|
| 1359 |
+
else:
|
| 1360 |
+
# not found (i.e., didn't break)
|
| 1361 |
+
return
|
| 1362 |
+
|
| 1363 |
+
self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined]
|
| 1364 |
+
self._task_info[self._send_idx] = (worker_queue_idx,)
|
| 1365 |
+
self._tasks_outstanding += 1
|
| 1366 |
+
self._send_idx += 1
|
| 1367 |
+
|
| 1368 |
+
def _process_data(self, data):
|
| 1369 |
+
self._rcvd_idx += 1
|
| 1370 |
+
self._try_put_index()
|
| 1371 |
+
if isinstance(data, ExceptionWrapper):
|
| 1372 |
+
data.reraise()
|
| 1373 |
+
return data
|
| 1374 |
+
|
| 1375 |
+
def _mark_worker_as_unavailable(self, worker_id, shutdown=False):
|
| 1376 |
+
# Mark a worker as having finished its work e.g., due to
|
| 1377 |
+
# exhausting an `IterableDataset`. This should be used only when this
|
| 1378 |
+
# `_MultiProcessingDataLoaderIter` is going to continue running.
|
| 1379 |
+
|
| 1380 |
+
assert self._workers_status[worker_id] or (self._persistent_workers and shutdown)
|
| 1381 |
+
|
| 1382 |
+
# Signal termination to that specific worker.
|
| 1383 |
+
q = self._index_queues[worker_id]
|
| 1384 |
+
# Indicate that no more data will be put on this queue by the current
|
| 1385 |
+
# process.
|
| 1386 |
+
q.put(None)
|
| 1387 |
+
|
| 1388 |
+
# Note that we don't actually join the worker here, nor do we remove the
|
| 1389 |
+
# worker's pid from C side struct because (1) joining may be slow, and
|
| 1390 |
+
# (2) since we don't join, the worker may still raise error, and we
|
| 1391 |
+
# prefer capturing those, rather than ignoring them, even though they
|
| 1392 |
+
# are raised after the worker has finished its job.
|
| 1393 |
+
# Joinning is deferred to `_shutdown_workers`, which it is called when
|
| 1394 |
+
# all workers finish their jobs (e.g., `IterableDataset` replicas) or
|
| 1395 |
+
# when this iterator is garbage collected.
|
| 1396 |
+
|
| 1397 |
+
self._workers_status[worker_id] = False
|
| 1398 |
+
|
| 1399 |
+
assert self._workers_done_event.is_set() == shutdown
|
| 1400 |
+
|
| 1401 |
+
def _shutdown_workers(self):
|
| 1402 |
+
# Called when shutting down this `_MultiProcessingDataLoaderIter`.
|
| 1403 |
+
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
|
| 1404 |
+
# the logic of this function.
|
| 1405 |
+
if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None:
|
| 1406 |
+
# See (2) of the note. If Python is shutting down, do no-op.
|
| 1407 |
+
return
|
| 1408 |
+
# Normal exit when last reference is gone / iterator is depleted.
|
| 1409 |
+
# See (1) and the second half of the note.
|
| 1410 |
+
if not self._shutdown:
|
| 1411 |
+
self._shutdown = True
|
| 1412 |
+
try:
|
| 1413 |
+
# Normal exit when last reference is gone / iterator is depleted.
|
| 1414 |
+
# See (1) and the second half of the note.
|
| 1415 |
+
|
| 1416 |
+
# Exit `pin_memory_thread` first because exiting workers may leave
|
| 1417 |
+
# corrupted data in `worker_result_queue` which `pin_memory_thread`
|
| 1418 |
+
# reads from.
|
| 1419 |
+
if hasattr(self, '_pin_memory_thread'):
|
| 1420 |
+
# Use hasattr in case error happens before we set the attribute.
|
| 1421 |
+
self._pin_memory_thread_done_event.set()
|
| 1422 |
+
# Send something to pin_memory_thread in case it is waiting
|
| 1423 |
+
# so that it can wake up and check `pin_memory_thread_done_event`
|
| 1424 |
+
self._worker_result_queue.put((None, None))
|
| 1425 |
+
self._pin_memory_thread.join()
|
| 1426 |
+
self._worker_result_queue.cancel_join_thread()
|
| 1427 |
+
self._worker_result_queue.close()
|
| 1428 |
+
|
| 1429 |
+
# Exit workers now.
|
| 1430 |
+
self._workers_done_event.set()
|
| 1431 |
+
for worker_id in range(len(self._workers)):
|
| 1432 |
+
# Get number of workers from `len(self._workers)` instead of
|
| 1433 |
+
# `self._num_workers` in case we error before starting all
|
| 1434 |
+
# workers.
|
| 1435 |
+
# If we are using workers_status with persistent_workers
|
| 1436 |
+
# we have to shut it down because the worker is paused
|
| 1437 |
+
if self._persistent_workers or self._workers_status[worker_id]:
|
| 1438 |
+
self._mark_worker_as_unavailable(worker_id, shutdown=True)
|
| 1439 |
+
for w in self._workers:
|
| 1440 |
+
# We should be able to join here, but in case anything went
|
| 1441 |
+
# wrong, we set a timeout and if the workers fail to join,
|
| 1442 |
+
# they are killed in the `finally` block.
|
| 1443 |
+
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
|
| 1444 |
+
for q in self._index_queues:
|
| 1445 |
+
q.cancel_join_thread()
|
| 1446 |
+
q.close()
|
| 1447 |
+
finally:
|
| 1448 |
+
# Even though all this function does is putting into queues that
|
| 1449 |
+
# we have called `cancel_join_thread` on, weird things can
|
| 1450 |
+
# happen when a worker is killed by a signal, e.g., hanging in
|
| 1451 |
+
# `Event.set()`. So we need to guard this with SIGCHLD handler,
|
| 1452 |
+
# and remove pids from the C side data structure only at the
|
| 1453 |
+
# end.
|
| 1454 |
+
#
|
| 1455 |
+
# FIXME: Unfortunately, for Windows, we are missing a worker
|
| 1456 |
+
# error detection mechanism here in this function, as it
|
| 1457 |
+
# doesn't provide a SIGCHLD handler.
|
| 1458 |
+
if self._worker_pids_set:
|
| 1459 |
+
_utils.signal_handling._remove_worker_pids(id(self))
|
| 1460 |
+
self._worker_pids_set = False
|
| 1461 |
+
for w in self._workers:
|
| 1462 |
+
if w.is_alive():
|
| 1463 |
+
# Existing mechanisms try to make the workers exit
|
| 1464 |
+
# peacefully, but in case that we unfortunately reach
|
| 1465 |
+
# here, which we shouldn't, (e.g., pytorch/pytorch#39570),
|
| 1466 |
+
# we kill the worker.
|
| 1467 |
+
w.terminate()
|
| 1468 |
+
|
| 1469 |
+
# staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter`
|
| 1470 |
+
@staticmethod
|
| 1471 |
+
def _clean_up_worker(w):
|
| 1472 |
+
try:
|
| 1473 |
+
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
|
| 1474 |
+
finally:
|
| 1475 |
+
if w.is_alive():
|
| 1476 |
+
w.terminate()
|
| 1477 |
+
|
| 1478 |
+
def __del__(self):
|
| 1479 |
+
self._shutdown_workers()
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import iter
|
| 2 |
+
from . import map
|
| 3 |
+
from . import dataframe
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (268 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc
ADDED
|
Binary file (6.05 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import functools
|
| 3 |
+
from enum import Enum
|
| 4 |
+
|
| 5 |
+
import torch.autograd
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class _SnapshotState(Enum):
|
| 9 |
+
r"""
|
| 10 |
+
These are the snapshotting-related states that IterDataPipes can be in.
|
| 11 |
+
|
| 12 |
+
`NotStarted` - allows you to restore a snapshot and create an iterator with reset
|
| 13 |
+
`Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe
|
| 14 |
+
`Iterating` - can restore, will reset if you create a new iterator
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
NotStarted = 0
|
| 18 |
+
Restored = 1
|
| 19 |
+
Iterating = 2
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _simplify_obj_name(obj) -> str:
|
| 23 |
+
"""Simplify the display strings of objects for the purpose of rendering within DataPipe error messages."""
|
| 24 |
+
if inspect.isfunction(obj):
|
| 25 |
+
return obj.__name__
|
| 26 |
+
else:
|
| 27 |
+
return repr(obj)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _strip_datapipe_from_name(name: str) -> str:
|
| 31 |
+
return name.replace("IterDataPipe", "").replace("MapDataPipe", "")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _generate_input_args_string(obj):
|
| 35 |
+
"""Generate a string for the input arguments of an object."""
|
| 36 |
+
signature = inspect.signature(obj.__class__)
|
| 37 |
+
input_param_names = set()
|
| 38 |
+
for param_name in signature.parameters.keys():
|
| 39 |
+
input_param_names.add(param_name)
|
| 40 |
+
result = []
|
| 41 |
+
for name, value in inspect.getmembers(obj):
|
| 42 |
+
if name in input_param_names:
|
| 43 |
+
result.append((name, _simplify_obj_name(value)))
|
| 44 |
+
return ', '.join([f'{name}={value}' for name, value in result])
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False):
|
| 48 |
+
output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
|
| 49 |
+
if simplify_dp_name:
|
| 50 |
+
output_string = _strip_datapipe_from_name(output_string)
|
| 51 |
+
return output_string
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _gen_invalid_iterdatapipe_msg(datapipe):
|
| 55 |
+
return ("This iterator has been invalidated because another iterator has been created "
|
| 56 |
+
f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n"
|
| 57 |
+
"This may be caused multiple references to the same IterDataPipe. We recommend "
|
| 58 |
+
"using `.fork()` if that is necessary.")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
_feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free "
|
| 62 |
+
"to comment on this issue: https://github.com/pytorch/data/issues/45.")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:
|
| 66 |
+
r"""
|
| 67 |
+
Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception.
|
| 68 |
+
|
| 69 |
+
In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well.
|
| 70 |
+
"""
|
| 71 |
+
if next_method_exists:
|
| 72 |
+
# This is the case where `IterDataPipe` has both `__iter__` and `__next__`.
|
| 73 |
+
# The `_valid_iterator_id` should either be never set (`None`), or set by at most one
|
| 74 |
+
# iterator (`0`). Otherwise, it means there are multiple iterators.
|
| 75 |
+
if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:
|
| 76 |
+
extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method"
|
| 77 |
+
raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg)
|
| 78 |
+
elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
|
| 79 |
+
if hasattr(datapipe, "_check_valid_iterator_id"):
|
| 80 |
+
if not datapipe._check_valid_iterator_id(iterator_id):
|
| 81 |
+
raise RuntimeError("This iterator has been invalidated, because a new iterator has been created "
|
| 82 |
+
f"from one of the ChildDataPipes of "
|
| 83 |
+
f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg)
|
| 84 |
+
else:
|
| 85 |
+
raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.")
|
| 86 |
+
elif datapipe._valid_iterator_id != iterator_id:
|
| 87 |
+
raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _set_datapipe_valid_iterator_id(datapipe):
|
| 91 |
+
"""Given a DataPipe, updates its valid iterator ID and reset the DataPipe."""
|
| 92 |
+
if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
|
| 93 |
+
if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"):
|
| 94 |
+
datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate
|
| 95 |
+
else:
|
| 96 |
+
raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.")
|
| 97 |
+
else:
|
| 98 |
+
if datapipe._valid_iterator_id is None:
|
| 99 |
+
datapipe._valid_iterator_id = 0
|
| 100 |
+
else:
|
| 101 |
+
datapipe._valid_iterator_id += 1
|
| 102 |
+
datapipe.reset()
|
| 103 |
+
return datapipe._valid_iterator_id
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def hook_iterator(namespace):
|
| 107 |
+
r"""
|
| 108 |
+
Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`.
|
| 109 |
+
|
| 110 |
+
This is done for the purpose of profiling and checking if an iterator is still valid.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def profiler_record_fn_context(datapipe):
|
| 114 |
+
if not hasattr(datapipe, "_profile_name"):
|
| 115 |
+
datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True)
|
| 116 |
+
return torch.autograd.profiler.record_function(datapipe._profile_name)
|
| 117 |
+
|
| 118 |
+
class IteratorDecorator:
|
| 119 |
+
r"""
|
| 120 |
+
Wrap the iterator and modifying its `__next__` method.
|
| 121 |
+
|
| 122 |
+
This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function.
|
| 123 |
+
Those `__iter__` method commonly returns `self` but not necessarily.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __init__(self, iterator, datapipe, iterator_id, has_next_method):
|
| 127 |
+
self.iterator = iterator
|
| 128 |
+
self.datapipe = datapipe
|
| 129 |
+
self.iterator_id = iterator_id
|
| 130 |
+
self._profiler_enabled = torch.autograd._profiler_enabled()
|
| 131 |
+
# Check if `__iter__` returns `self` and `DataPipe` has `__next__`
|
| 132 |
+
self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method
|
| 133 |
+
|
| 134 |
+
def __iter__(self):
|
| 135 |
+
return self
|
| 136 |
+
|
| 137 |
+
def _get_next(self):
|
| 138 |
+
"""Return next with logic related to iterator validity, profiler, and incrementation of samples yielded."""
|
| 139 |
+
_check_iterator_valid(self.datapipe, self.iterator_id)
|
| 140 |
+
result = next(self.iterator)
|
| 141 |
+
if not self.self_and_has_next_method:
|
| 142 |
+
self.datapipe._number_of_samples_yielded += 1
|
| 143 |
+
return result
|
| 144 |
+
|
| 145 |
+
def __next__(self):
|
| 146 |
+
# TODO: Add try-except to in-place reduce traceback from the Exception
|
| 147 |
+
# See: https://github.com/pytorch/data/issues/284
|
| 148 |
+
if self._profiler_enabled:
|
| 149 |
+
with profiler_record_fn_context(self.datapipe):
|
| 150 |
+
return self._get_next()
|
| 151 |
+
else: # Decided against using `contextlib.nullcontext` for performance reasons
|
| 152 |
+
return self._get_next()
|
| 153 |
+
|
| 154 |
+
def __getattr__(self, name):
|
| 155 |
+
return getattr(self.iterator, name)
|
| 156 |
+
|
| 157 |
+
func = namespace['__iter__']
|
| 158 |
+
|
| 159 |
+
# ``__iter__`` of IterDataPipe is a generator function
|
| 160 |
+
if inspect.isgeneratorfunction(func):
|
| 161 |
+
@functools.wraps(func)
|
| 162 |
+
def wrap_generator(*args, **kwargs):
|
| 163 |
+
gen = func(*args, **kwargs)
|
| 164 |
+
datapipe = args[0]
|
| 165 |
+
if datapipe._fast_forward_iterator:
|
| 166 |
+
it = datapipe._fast_forward_iterator
|
| 167 |
+
datapipe._fast_forward_iterator = None
|
| 168 |
+
datapipe._snapshot_state = _SnapshotState.Iterating
|
| 169 |
+
while True:
|
| 170 |
+
try:
|
| 171 |
+
yield next(it)
|
| 172 |
+
except StopIteration:
|
| 173 |
+
return
|
| 174 |
+
iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
|
| 175 |
+
_profiler_enabled = torch.autograd._profiler_enabled()
|
| 176 |
+
try:
|
| 177 |
+
if _profiler_enabled:
|
| 178 |
+
with profiler_record_fn_context(datapipe):
|
| 179 |
+
response = gen.send(None)
|
| 180 |
+
else:
|
| 181 |
+
response = gen.send(None)
|
| 182 |
+
|
| 183 |
+
while True:
|
| 184 |
+
datapipe._number_of_samples_yielded += 1
|
| 185 |
+
request = yield response
|
| 186 |
+
# Pass through here every time `__next__` is called
|
| 187 |
+
if _profiler_enabled:
|
| 188 |
+
with profiler_record_fn_context(datapipe):
|
| 189 |
+
_check_iterator_valid(datapipe, iterator_id)
|
| 190 |
+
response = gen.send(request)
|
| 191 |
+
else: # Decided against using `contextlib.nullcontext` for performance reasons
|
| 192 |
+
_check_iterator_valid(datapipe, iterator_id)
|
| 193 |
+
response = gen.send(request)
|
| 194 |
+
except StopIteration as e:
|
| 195 |
+
return
|
| 196 |
+
except Exception as e:
|
| 197 |
+
# TODO: Simplify the traceback message to skip over `response = gen.send(None)`
|
| 198 |
+
# Part of https://github.com/pytorch/data/issues/284
|
| 199 |
+
datapipe = args[0]
|
| 200 |
+
msg = "thrown by __iter__ of"
|
| 201 |
+
single_iterator_msg = "single iterator per IterDataPipe constraint"
|
| 202 |
+
if hasattr(e.args, '__len__'):
|
| 203 |
+
full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
|
| 204 |
+
if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist
|
| 205 |
+
e.args = (f'\nThis exception is {full_msg}',)
|
| 206 |
+
elif msg not in e.args[0] and single_iterator_msg not in e.args[0]:
|
| 207 |
+
e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:]
|
| 208 |
+
raise
|
| 209 |
+
|
| 210 |
+
namespace['__iter__'] = wrap_generator
|
| 211 |
+
else: # ``__iter__`` of IterDataPipe is NOT a generator function
|
| 212 |
+
# IterDataPipe is an iterator with both ``__iter__`` and ``__next__``
|
| 213 |
+
# And ``__iter__`` may or may not return `self`
|
| 214 |
+
if '__next__' in namespace: # If `__next__` exists, put a wrapper around it
|
| 215 |
+
next_func = namespace['__next__']
|
| 216 |
+
|
| 217 |
+
@functools.wraps(next_func)
|
| 218 |
+
def wrap_next(*args, **kwargs):
|
| 219 |
+
datapipe = args[0]
|
| 220 |
+
if torch.autograd._profiler_enabled():
|
| 221 |
+
with profiler_record_fn_context(datapipe):
|
| 222 |
+
result = next_func(*args, **kwargs)
|
| 223 |
+
else:
|
| 224 |
+
result = next_func(*args, **kwargs)
|
| 225 |
+
datapipe._number_of_samples_yielded += 1
|
| 226 |
+
return result
|
| 227 |
+
|
| 228 |
+
namespace['__next__'] = wrap_next
|
| 229 |
+
|
| 230 |
+
# Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but
|
| 231 |
+
# the user will be violating the iterator protocol. Potential issue:
|
| 232 |
+
# 1. Valid iterator ID may not update or checked properly
|
| 233 |
+
# 2. The number of samples yielded will be miscounted
|
| 234 |
+
|
| 235 |
+
# Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators
|
| 236 |
+
@functools.wraps(func)
|
| 237 |
+
def wrap_iter(*args, **kwargs):
|
| 238 |
+
iter_ret = func(*args, **kwargs)
|
| 239 |
+
datapipe = args[0]
|
| 240 |
+
datapipe._snapshot_state = _SnapshotState.Iterating
|
| 241 |
+
if datapipe._fast_forward_iterator:
|
| 242 |
+
iter_ret = datapipe._fast_forward_iterator
|
| 243 |
+
datapipe._fast_forward_iterator = None
|
| 244 |
+
return iter_ret
|
| 245 |
+
iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
|
| 246 |
+
return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace)
|
| 247 |
+
|
| 248 |
+
namespace['__iter__'] = wrap_iter
|
moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Taking reference from official Python typing
|
| 2 |
+
# https://github.com/python/cpython/blob/master/Lib/typing.py
|
| 3 |
+
|
| 4 |
+
import collections
|
| 5 |
+
import functools
|
| 6 |
+
import numbers
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState
|
| 10 |
+
from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union,
|
| 11 |
+
get_type_hints)
|
| 12 |
+
from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined]
|
| 13 |
+
from typing import ForwardRef
|
| 14 |
+
|
| 15 |
+
# TODO: Use TypeAlias when Python 3.6 is deprecated
|
| 16 |
+
# Please check [Note: TypeMeta and TypeAlias]
|
| 17 |
+
# In case of metaclass conflict due to ABCMeta or _ProtocolMeta
|
| 18 |
+
# For Python 3.9, only Protocol in typing uses metaclass
|
| 19 |
+
from abc import ABCMeta
|
| 20 |
+
from typing import _GenericAlias # type: ignore[attr-defined, no-redef]
|
| 21 |
+
|
| 22 |
+
class GenericMeta(ABCMeta): # type: ignore[no-redef]
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Integer(numbers.Integral):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Boolean(numbers.Integral):
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Python 'type' object is not subscriptable
|
| 35 |
+
# Tuple[int, List, dict] -> valid
|
| 36 |
+
# tuple[int, list, dict] -> invalid
|
| 37 |
+
# Map Python 'type' to abstract base class
|
| 38 |
+
TYPE2ABC = {
|
| 39 |
+
bool: Boolean,
|
| 40 |
+
int: Integer,
|
| 41 |
+
float: numbers.Real,
|
| 42 |
+
complex: numbers.Complex,
|
| 43 |
+
dict: Dict,
|
| 44 |
+
list: List,
|
| 45 |
+
set: Set,
|
| 46 |
+
tuple: Tuple,
|
| 47 |
+
None: type(None),
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def issubtype(left, right, recursive=True):
|
| 52 |
+
r"""
|
| 53 |
+
Check if the left-side type is a subtype of the right-side type.
|
| 54 |
+
|
| 55 |
+
If any of type is a composite type like `Union` and `TypeVar` with
|
| 56 |
+
bounds, it would be expanded into a list of types and check all
|
| 57 |
+
of left-side types are subtypes of either one from right-side types.
|
| 58 |
+
"""
|
| 59 |
+
left = TYPE2ABC.get(left, left)
|
| 60 |
+
right = TYPE2ABC.get(right, right)
|
| 61 |
+
|
| 62 |
+
if right is Any or left == right:
|
| 63 |
+
return True
|
| 64 |
+
|
| 65 |
+
if isinstance(right, _GenericAlias):
|
| 66 |
+
if getattr(right, '__origin__', None) is Generic:
|
| 67 |
+
return True
|
| 68 |
+
|
| 69 |
+
if right == type(None):
|
| 70 |
+
return False
|
| 71 |
+
|
| 72 |
+
# Right-side type
|
| 73 |
+
constraints = _decompose_type(right)
|
| 74 |
+
|
| 75 |
+
if len(constraints) == 0 or Any in constraints:
|
| 76 |
+
return True
|
| 77 |
+
|
| 78 |
+
if left is Any:
|
| 79 |
+
return False
|
| 80 |
+
|
| 81 |
+
# Left-side type
|
| 82 |
+
variants = _decompose_type(left)
|
| 83 |
+
|
| 84 |
+
# all() will return True for empty variants
|
| 85 |
+
if len(variants) == 0:
|
| 86 |
+
return False
|
| 87 |
+
|
| 88 |
+
return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _decompose_type(t, to_list=True):
|
| 92 |
+
if isinstance(t, TypeVar):
|
| 93 |
+
if t.__bound__ is not None:
|
| 94 |
+
ts = [t.__bound__]
|
| 95 |
+
else:
|
| 96 |
+
# For T_co, __constraints__ is ()
|
| 97 |
+
ts = list(t.__constraints__)
|
| 98 |
+
elif hasattr(t, '__origin__') and t.__origin__ == Union:
|
| 99 |
+
ts = t.__args__
|
| 100 |
+
else:
|
| 101 |
+
if not to_list:
|
| 102 |
+
return None
|
| 103 |
+
ts = [t]
|
| 104 |
+
# Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
|
| 105 |
+
ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc]
|
| 106 |
+
return ts
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _issubtype_with_constraints(variant, constraints, recursive=True):
|
| 110 |
+
r"""
|
| 111 |
+
Check if the variant is a subtype of either one from constraints.
|
| 112 |
+
|
| 113 |
+
For composite types like `Union` and `TypeVar` with bounds, they
|
| 114 |
+
would be expanded for testing.
|
| 115 |
+
"""
|
| 116 |
+
if variant in constraints:
|
| 117 |
+
return True
|
| 118 |
+
|
| 119 |
+
# [Note: Subtype for Union and TypeVar]
|
| 120 |
+
# Python typing is able to flatten Union[Union[...]] or Union[TypeVar].
|
| 121 |
+
# But it couldn't flatten the following scenarios:
|
| 122 |
+
# - Union[int, TypeVar[Union[...]]]
|
| 123 |
+
# - TypeVar[TypeVar[...]]
|
| 124 |
+
# So, variant and each constraint may be a TypeVar or a Union.
|
| 125 |
+
# In these cases, all of inner types from the variant are required to be
|
| 126 |
+
# extraced and verified as a subtype of any constraint. And, all of
|
| 127 |
+
# inner types from any constraint being a TypeVar or a Union are
|
| 128 |
+
# also required to be extracted and verified if the variant belongs to
|
| 129 |
+
# any of them.
|
| 130 |
+
|
| 131 |
+
# Variant
|
| 132 |
+
vs = _decompose_type(variant, to_list=False)
|
| 133 |
+
|
| 134 |
+
# Variant is TypeVar or Union
|
| 135 |
+
if vs is not None:
|
| 136 |
+
return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs)
|
| 137 |
+
|
| 138 |
+
# Variant is not TypeVar or Union
|
| 139 |
+
if hasattr(variant, '__origin__') and variant.__origin__ is not None:
|
| 140 |
+
v_origin = variant.__origin__
|
| 141 |
+
# In Python-3.9 typing library untyped generics do not have args
|
| 142 |
+
v_args = getattr(variant, "__args__", None)
|
| 143 |
+
else:
|
| 144 |
+
v_origin = variant
|
| 145 |
+
v_args = None
|
| 146 |
+
|
| 147 |
+
# Constraints
|
| 148 |
+
for constraint in constraints:
|
| 149 |
+
cs = _decompose_type(constraint, to_list=False)
|
| 150 |
+
|
| 151 |
+
# Constraint is TypeVar or Union
|
| 152 |
+
if cs is not None:
|
| 153 |
+
if _issubtype_with_constraints(variant, cs, recursive):
|
| 154 |
+
return True
|
| 155 |
+
# Constraint is not TypeVar or Union
|
| 156 |
+
else:
|
| 157 |
+
# __origin__ can be None for plain list, tuple, ... in Python 3.6
|
| 158 |
+
if hasattr(constraint, '__origin__') and constraint.__origin__ is not None:
|
| 159 |
+
c_origin = constraint.__origin__
|
| 160 |
+
if v_origin == c_origin:
|
| 161 |
+
if not recursive:
|
| 162 |
+
return True
|
| 163 |
+
# In Python-3.9 typing library untyped generics do not have args
|
| 164 |
+
c_args = getattr(constraint, "__args__", None)
|
| 165 |
+
if c_args is None or len(c_args) == 0:
|
| 166 |
+
return True
|
| 167 |
+
if v_args is not None and len(v_args) == len(c_args) and \
|
| 168 |
+
all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)):
|
| 169 |
+
return True
|
| 170 |
+
# Tuple[int] -> Tuple
|
| 171 |
+
else:
|
| 172 |
+
if v_origin == constraint:
|
| 173 |
+
return True
|
| 174 |
+
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def issubinstance(data, data_type):
|
| 179 |
+
if not issubtype(type(data), data_type, recursive=False):
|
| 180 |
+
return False
|
| 181 |
+
|
| 182 |
+
# In Python-3.9 typing library __args__ attribute is not defined for untyped generics
|
| 183 |
+
dt_args = getattr(data_type, "__args__", None)
|
| 184 |
+
if isinstance(data, tuple):
|
| 185 |
+
if dt_args is None or len(dt_args) == 0:
|
| 186 |
+
return True
|
| 187 |
+
if len(dt_args) != len(data):
|
| 188 |
+
return False
|
| 189 |
+
return all(issubinstance(d, t) for d, t in zip(data, dt_args))
|
| 190 |
+
elif isinstance(data, (list, set)):
|
| 191 |
+
if dt_args is None or len(dt_args) == 0:
|
| 192 |
+
return True
|
| 193 |
+
t = dt_args[0]
|
| 194 |
+
return all(issubinstance(d, t) for d in data)
|
| 195 |
+
elif isinstance(data, dict):
|
| 196 |
+
if dt_args is None or len(dt_args) == 0:
|
| 197 |
+
return True
|
| 198 |
+
kt, vt = dt_args
|
| 199 |
+
return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items())
|
| 200 |
+
|
| 201 |
+
return True
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# [Note: TypeMeta and TypeAlias]
|
| 205 |
+
# In order to keep compatibility for Python 3.6, use Meta for the typing.
|
| 206 |
+
# TODO: When PyTorch drops the support for Python 3.6, it can be converted
|
| 207 |
+
# into the Alias system and using `__class_getitem__` for DataPipe. The
|
| 208 |
+
# typing system will gain benefit of performance and resolving metaclass
|
| 209 |
+
# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class _DataPipeType:
|
| 213 |
+
r"""Save type annotation in `param`."""
|
| 214 |
+
|
| 215 |
+
def __init__(self, param):
|
| 216 |
+
self.param = param
|
| 217 |
+
|
| 218 |
+
def __repr__(self):
|
| 219 |
+
return _type_repr(self.param)
|
| 220 |
+
|
| 221 |
+
def __eq__(self, other):
|
| 222 |
+
if isinstance(other, _DataPipeType):
|
| 223 |
+
return self.param == other.param
|
| 224 |
+
return NotImplemented
|
| 225 |
+
|
| 226 |
+
def __hash__(self):
|
| 227 |
+
return hash(self.param)
|
| 228 |
+
|
| 229 |
+
def issubtype(self, other):
|
| 230 |
+
if isinstance(other.param, _GenericAlias):
|
| 231 |
+
if getattr(other.param, '__origin__', None) is Generic:
|
| 232 |
+
return True
|
| 233 |
+
if isinstance(other, _DataPipeType):
|
| 234 |
+
return issubtype(self.param, other.param)
|
| 235 |
+
if isinstance(other, type):
|
| 236 |
+
return issubtype(self.param, other)
|
| 237 |
+
raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}")
|
| 238 |
+
|
| 239 |
+
def issubtype_of_instance(self, other):
|
| 240 |
+
return issubinstance(other, self.param)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
# Default type for DataPipe without annotation
|
| 244 |
+
T_co = TypeVar('T_co', covariant=True)
|
| 245 |
+
_DEFAULT_TYPE = _DataPipeType(Generic[T_co])
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class _DataPipeMeta(GenericMeta):
|
| 249 |
+
r"""
|
| 250 |
+
Metaclass for `DataPipe`.
|
| 251 |
+
|
| 252 |
+
Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`.
|
| 253 |
+
|
| 254 |
+
Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
type: _DataPipeType
|
| 258 |
+
|
| 259 |
+
def __new__(cls, name, bases, namespace, **kwargs):
|
| 260 |
+
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 261 |
+
|
| 262 |
+
# TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
|
| 263 |
+
cls.__origin__ = None
|
| 264 |
+
if 'type' in namespace:
|
| 265 |
+
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 266 |
+
|
| 267 |
+
namespace['__type_class__'] = False
|
| 268 |
+
# For plain derived class without annotation
|
| 269 |
+
for base in bases:
|
| 270 |
+
if isinstance(base, _DataPipeMeta):
|
| 271 |
+
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 272 |
+
|
| 273 |
+
namespace.update({'type': _DEFAULT_TYPE,
|
| 274 |
+
'__init_subclass__': _dp_init_subclass})
|
| 275 |
+
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 276 |
+
|
| 277 |
+
def __init__(self, name, bases, namespace, **kwargs):
|
| 278 |
+
super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 279 |
+
|
| 280 |
+
# TODO: Fix isinstance bug
|
| 281 |
+
@_tp_cache
|
| 282 |
+
def _getitem_(self, params):
|
| 283 |
+
if params is None:
|
| 284 |
+
raise TypeError(f'{self.__name__}[t]: t can not be None')
|
| 285 |
+
if isinstance(params, str):
|
| 286 |
+
params = ForwardRef(params)
|
| 287 |
+
if not isinstance(params, tuple):
|
| 288 |
+
params = (params, )
|
| 289 |
+
|
| 290 |
+
msg = f"{self.__name__}[t]: t must be a type"
|
| 291 |
+
params = tuple(_type_check(p, msg) for p in params)
|
| 292 |
+
|
| 293 |
+
if isinstance(self.type.param, _GenericAlias):
|
| 294 |
+
orig = getattr(self.type.param, '__origin__', None)
|
| 295 |
+
if isinstance(orig, type) and orig is not Generic:
|
| 296 |
+
p = self.type.param[params] # type: ignore[index]
|
| 297 |
+
t = _DataPipeType(p)
|
| 298 |
+
l = len(str(self.type)) + 2
|
| 299 |
+
name = self.__name__[:-l]
|
| 300 |
+
name = name + '[' + str(t) + ']'
|
| 301 |
+
bases = (self,) + self.__bases__
|
| 302 |
+
return self.__class__(name, bases,
|
| 303 |
+
{'__init_subclass__': _dp_init_subclass,
|
| 304 |
+
'type': t,
|
| 305 |
+
'__type_class__': True})
|
| 306 |
+
|
| 307 |
+
if len(params) > 1:
|
| 308 |
+
raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1')
|
| 309 |
+
|
| 310 |
+
t = _DataPipeType(params[0])
|
| 311 |
+
|
| 312 |
+
if not t.issubtype(self.type):
|
| 313 |
+
raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]')
|
| 314 |
+
|
| 315 |
+
# Types are equal, fast path for inheritance
|
| 316 |
+
if self.type == t:
|
| 317 |
+
return self
|
| 318 |
+
|
| 319 |
+
name = self.__name__ + '[' + str(t) + ']'
|
| 320 |
+
bases = (self,) + self.__bases__
|
| 321 |
+
|
| 322 |
+
return self.__class__(name, bases,
|
| 323 |
+
{'__init_subclass__': _dp_init_subclass,
|
| 324 |
+
'__type_class__': True,
|
| 325 |
+
'type': t})
|
| 326 |
+
|
| 327 |
+
# TODO: Fix isinstance bug
|
| 328 |
+
def _eq_(self, other):
|
| 329 |
+
if not isinstance(other, _DataPipeMeta):
|
| 330 |
+
return NotImplemented
|
| 331 |
+
if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
|
| 332 |
+
return self is other
|
| 333 |
+
return (self.__origin__ == other.__origin__ # type: ignore[has-type]
|
| 334 |
+
and self.type == other.type)
|
| 335 |
+
|
| 336 |
+
# TODO: Fix isinstance bug
|
| 337 |
+
def _hash_(self):
|
| 338 |
+
return hash((self.__name__, self.type))
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class _IterDataPipeMeta(_DataPipeMeta):
|
| 342 |
+
r"""
|
| 343 |
+
Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`.
|
| 344 |
+
|
| 345 |
+
Add various functions for behaviors specific to `IterDataPipe`.
|
| 346 |
+
"""
|
| 347 |
+
|
| 348 |
+
def __new__(cls, name, bases, namespace, **kwargs):
|
| 349 |
+
|
| 350 |
+
if 'reset' in namespace:
|
| 351 |
+
reset_func = namespace['reset']
|
| 352 |
+
|
| 353 |
+
@functools.wraps(reset_func)
|
| 354 |
+
def conditional_reset(*args, **kwargs):
|
| 355 |
+
r"""
|
| 356 |
+
Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`.
|
| 357 |
+
|
| 358 |
+
This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call.
|
| 359 |
+
"""
|
| 360 |
+
datapipe = args[0]
|
| 361 |
+
if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):
|
| 362 |
+
# Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
|
| 363 |
+
# already begun iterating.
|
| 364 |
+
datapipe._number_of_samples_yielded = 0
|
| 365 |
+
datapipe._fast_forward_iterator = None
|
| 366 |
+
reset_func(*args, **kwargs)
|
| 367 |
+
datapipe._snapshot_state = _SnapshotState.Iterating
|
| 368 |
+
|
| 369 |
+
namespace['reset'] = conditional_reset
|
| 370 |
+
|
| 371 |
+
if '__iter__' in namespace:
|
| 372 |
+
hook_iterator(namespace)
|
| 373 |
+
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _dp_init_subclass(sub_cls, *args, **kwargs):
|
| 377 |
+
# Add function for datapipe instance to reinforce the type
|
| 378 |
+
sub_cls.reinforce_type = reinforce_type
|
| 379 |
+
|
| 380 |
+
# TODO:
|
| 381 |
+
# - add global switch for type checking at compile-time
|
| 382 |
+
|
| 383 |
+
# Ignore internal type class
|
| 384 |
+
if getattr(sub_cls, '__type_class__', False):
|
| 385 |
+
return
|
| 386 |
+
|
| 387 |
+
# Check if the string type is valid
|
| 388 |
+
if isinstance(sub_cls.type.param, ForwardRef):
|
| 389 |
+
base_globals = sys.modules[sub_cls.__module__].__dict__
|
| 390 |
+
try:
|
| 391 |
+
param = _eval_type(sub_cls.type.param, base_globals, locals())
|
| 392 |
+
sub_cls.type.param = param
|
| 393 |
+
except TypeError as e:
|
| 394 |
+
raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e
|
| 395 |
+
|
| 396 |
+
if '__iter__' in sub_cls.__dict__:
|
| 397 |
+
iter_fn = sub_cls.__dict__['__iter__']
|
| 398 |
+
hints = get_type_hints(iter_fn)
|
| 399 |
+
if 'return' in hints:
|
| 400 |
+
return_hint = hints['return']
|
| 401 |
+
# Plain Return Hint for Python 3.6
|
| 402 |
+
if return_hint == Iterator:
|
| 403 |
+
return
|
| 404 |
+
if not (hasattr(return_hint, '__origin__') and
|
| 405 |
+
(return_hint.__origin__ == Iterator or
|
| 406 |
+
return_hint.__origin__ == collections.abc.Iterator)):
|
| 407 |
+
raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}"
|
| 408 |
+
", but found {}".format(sub_cls.__name__, _type_repr(hints['return'])))
|
| 409 |
+
data_type = return_hint.__args__[0]
|
| 410 |
+
if not issubtype(data_type, sub_cls.type.param):
|
| 411 |
+
raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}"
|
| 412 |
+
" for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__))
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def reinforce_type(self, expected_type):
|
| 416 |
+
r"""
|
| 417 |
+
Reinforce the type for DataPipe instance.
|
| 418 |
+
|
| 419 |
+
And the 'expected_type' is required to be a subtype of the original type
|
| 420 |
+
hint to restrict the type requirement of DataPipe instance.
|
| 421 |
+
"""
|
| 422 |
+
if isinstance(expected_type, tuple):
|
| 423 |
+
expected_type = Tuple[expected_type]
|
| 424 |
+
_type_check(expected_type, msg="'expected_type' must be a type")
|
| 425 |
+
|
| 426 |
+
if not issubtype(expected_type, self.type.param):
|
| 427 |
+
raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}")
|
| 428 |
+
|
| 429 |
+
self.type = _DataPipeType(expected_type)
|
| 430 |
+
return self
|