diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f1588b08b51eff84953b137aa4914d36e294b14 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar.h new file mode 100644 index 0000000000000000000000000000000000000000..3471ab1dd447a6f9bac485e0ae1c871a99c94ceb --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor +inline at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar::call(self, arg1, arg2, arg3); +} + +// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _foobar_out(at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out); +} +// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _foobar_outf(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { + return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out); +} + +} diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4578ae046b5c70c40cb5323324efbe6900c3d4a --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5349fc337a9324ad01300190555c83c8ed4d4874 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor chain_matmul(at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_out(at::TensorList matrices, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e584ba74bba0d7f11b7de0e3b04fd2c6da0ce7f --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h new file mode 100644 index 0000000000000000000000000000000000000000..f352b2a46b16e0f5e6f23c01f24aed0ca4767ca2 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor resolve_conj(const at::Tensor & self) { + return at::_ops::resolve_conj::call(self); +} + +} diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15e8582a6f13d701aeb1674dbffb775a81d0090b --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..615866269f1ff66aae727d7e4c392bf1d2c18a05 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); +TORCH_API ::std::tuple triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); +TORCH_API ::std::tuple triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M); + +} // namespace meta +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py b/moondream/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..96a3fa497c04c6419b74d8095ac4d3b4f1f41fa2 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py @@ -0,0 +1,299 @@ +from collections import defaultdict + +from itertools import chain + +import pickle + +from typing import ( + Any, + Callable, + Dict, + List, + no_type_check, + Sequence, +) + +import torch +import torch.nn as nn +from torch.utils.hooks import RemovableHandle +from torch.utils._python_dispatch import TorchDispatchMode + + +BYTES_PER_MB = 1024 * 1024.0 + + +class MemoryProfileDispatchMode(TorchDispatchMode): + """Run in ``TorchDispatchMode`` to get memory stats at operator level.""" + + def __init__(self, memory_tracker) -> None: + self.memory_tracker = memory_tracker + + def __torch_dispatch__(self, func, types, args=..., kwargs=None): + rs = func(*args, **kwargs) + if func == torch.ops.aten.detach.default: + return rs + func_name: str = ( + self.memory_tracker._cur_module_name + + "." + + func.__name__ + + "_" + + str(self.memory_tracker._operator_names[func.__name__]) + ) + self.memory_tracker._operator_names[func.__name__] = ( + self.memory_tracker._operator_names[func.__name__] + 1 + ) + self.memory_tracker._record_memory_stats(func_name) + + return rs + + +class MemoryTracker: + """ + Collect and plot the memory stats at operator level. + + Includes ``memories_allocated``, ``memories_active`` and ``memories_reserved``. + It also prints a summary for the top 20 operators that generate the most memories. + + Example usage: + + >>> # xdoctest: +SKIP(failing) + >>> net.cuda() + >>> input = input.cuda() + + >>> mem_tracker = MemoryTracker() + >>> mem_tracker.start_monitor(net) + + >>> net.zero_grad(True) + >>> loss = net(input) + >>> if isinstance(loss, dict): + >>> loss = loss['out'] + >>> loss.sum().backward() + >>> net.zero_grad(set_to_none=True) + + >>> mem_tracker.stop() + >>> mem_tracker.summary() + >>> mem_tracker.show_traces() + """ + + def __init__(self) -> None: + torch._C._log_api_usage_once("torch.distributed.memory_tracker") + self._hooks: List[RemovableHandle] = [] + self._operator_names: Dict[str, int] = defaultdict(int) + self.memories_allocated: Dict[int, Dict[str, float]] = defaultdict() + self.memories_active: Dict[int, Dict[str, float]] = defaultdict() + self.memories_reserved: Dict[int, Dict[str, float]] = defaultdict() + self._markers: Dict[str, int] = defaultdict(int) + self._cur_module_name: str = "" + self._op_index: int = 0 + self._num_cuda_retries: int = 0 + + @no_type_check + def start_monitor(self, root_module: nn.Module) -> None: + """ + Register module hooks and entering ``MemoryProfileDispatchMode``. + + This enables operator level memory stats can be tracked during module runtime. + """ + self._clear_state() + root_module.__setattr__("_memory_tracker_is_root", True) + for name, m in root_module.named_modules(): + if m is not root_module: + m.__setattr__("_memory_tracker_is_root", False) + # fused_proxy_group does not support hooks + if ".fused_proxy_grouped_embedding_bag" in name: + continue + # hook ordering with other hooks added by users is not managed, so + # the memory stats tracked here may not completely accurate. + h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name)) + h2 = m.register_forward_hook(self._create_post_forward_hook(name)) + # it does not work well with jagged tensor somehow, the root cause is not + # clear and remove it for now as it does not really capture important info. + # h3 = m.register_backward_hook(self._create_backward_hook(name)) + self._hooks.extend([h1, h2]) + torch.cuda.empty_cache() + assert getattr(self, "profile_mode", None) is None + self.profile_mode = MemoryProfileDispatchMode(self) + self.profile_mode.__enter__() + + @no_type_check + def stop(self) -> None: + """ + Remove module hooks and exit ``MemoryProfileDispatchMode`` to stop tracking memory stats at operator level. + + Get some aggregated stats when the memory_tracker() is enabled, like cuda ``num_alloc_retries``. + """ + self._num_cuda_retries = torch.cuda.memory_stats().get("num_alloc_retries", 0) + + for h in self._hooks: + h.remove() + self._hooks.clear() + assert getattr(self, "profile_mode", None) is not None + self.profile_mode.__exit__(None, None, None) + self.profile_mode = None + + @no_type_check + def summary(self, top: int = 20) -> None: + """ + Print out the top operators that generate the most memories. + + The number of the top operators can be configured. + """ + op_diff: Dict[str, float] = defaultdict(float) + op_name, previous_allocated_memory = self.memories_allocated[0] + for i in range(1, self._op_index): + op_name, current_allocated_memory = self.memories_allocated[i] + op_diff[op_name] = current_allocated_memory - previous_allocated_memory + previous_allocated_memory = current_allocated_memory + + print("------------------------------------------------") + print(f"The number of cuda retries are: {self._num_cuda_retries}") + print(f"Top {top} ops that generates memory are:") + for k, v in sorted(op_diff.items(), key=lambda item: item[1], reverse=True)[ + :top + ]: + print(f"{k}: {v}MB") + print("------------------------------------------------") + + @no_type_check + def show_traces(self, path: str = "") -> None: + import matplotlib.pyplot as plt + + def _plot_figure(x, y_values, labels): + min_val = min(list(chain(*y_values))) * 0.999 + max_val = max(list(chain(*y_values))) * 1.001 + plt.figure() + for y, label in zip(y_values, labels): + plt.plot(x, y, label=label) + plt.xlabel("# Operator Calls") + plt.ylabel("Memory (MB)") + plt.legend() + for marker_name, marker in self._markers.items(): + if marker_name == "fw_bw_boundary": + plt.plot( + [marker, marker], + [min_val, max_val], + "r", + lw=2, + label=marker_name, + ) + else: + plt.plot( + [marker, marker], + [min_val, max_val], + "k-", + lw=2, + label=marker_name, + ) + + if path != "": + self.load(path) + + y_1 = [gb for (name, gb) in self.memories_allocated.values()] + y_2 = [gb for (name, gb) in self.memories_active.values()] + y_3 = [gb for (name, gb) in self.memories_reserved.values()] + x = list(range(len(y_1))) + # Split figures when there is big difference between + # "reserved_memory" and "allocated_memory" or "active_memory". + _plot_figure( + x, + [list(y_1), list(y_2), list(y_3)], + ["allocated_memory", "active_memory", "reserved_memory"], + ) + _plot_figure(x, [list(y_1)], ["allocated_memory"]) + _plot_figure(x, [list(y_2)], ["active_memory"]) + _plot_figure(x, [list(y_3)], ["reserved_memory"]) + + def save_stats(self, path: str) -> None: + """Save the stats using pickle during runtime if users want to plot the traces in other places like notebook.""" + stats = { + "memories_allocated": self.memories_allocated, + "memories_active": self.memories_active, + "memories_reserved": self.memories_reserved, + "markers": self._markers, + "num_alloc_retries": self._num_cuda_retries, + } + + with open(path, "wb") as f: + pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL) + + def load(self, path: str) -> None: + """Load the pickled memory stats to plot the traces or print the summary.""" + with open(path, "rb") as f: + stats = pickle.load(f) + + self.memories_allocated = stats["memories_allocated"] + self.memories_active = stats["memories_active"] + self.memories_reserved = stats["memories_reserved"] + self._markers = stats["markers"] + self._num_cuda_retries = stats["num_alloc_retries"] + + def _create_pre_forward_hook(self, name: str) -> Callable: + """Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start.""" + def _pre_forward_hook(module: nn.Module, inputs: Any) -> None: + self._cur_module_name = f"{name}.forward" + if ( + hasattr(module, "_memory_tracker_is_root") + and module._memory_tracker_is_root + ): + self._add_marker("fw_start") + + return _pre_forward_hook + + def _create_post_forward_hook(self, name: str) -> Callable: + """Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass.""" + + def _post_forward_hook( + module: nn.Module, + inputs: Sequence[torch.Tensor], + outputs: Sequence[torch.Tensor], + ) -> None: + if ( + hasattr(module, "_memory_tracker_is_root") + and module._memory_tracker_is_root + ): + self._add_marker("fw_bw_boundary") + + return _post_forward_hook + + def _create_backward_hook(self, name: str) -> Callable: + """Insert the current module name with backward prefix for the operator name.""" + + def _backward_hook( + module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor + ) -> None: + self._cur_module_name = f"{name}.backward" + + return _backward_hook + + @no_type_check + def _record_memory_stats(self, fn_name: str) -> None: + """ + Record current memory allocated, current memory active and current memory reserved. + + The memory stats dict is indexed with ``self._op_index``. + """ + memory_allocated: float = torch.cuda.memory_allocated() / BYTES_PER_MB + memory_reserved: float = torch.cuda.memory_reserved() / BYTES_PER_MB + memory_active: float = ( + torch.cuda.memory_stats().get("active_bytes.all.current", 0) / BYTES_PER_MB + ) + self.memories_allocated[self._op_index] = (fn_name, memory_allocated) + self.memories_reserved[self._op_index] = (fn_name, memory_reserved) + self.memories_active[self._op_index] = (fn_name, memory_active) + self._op_index += 1 + + def _add_marker(self, marker_name: str) -> None: + """Set the marker's x-axis value.""" + marker_val = len(self.memories_allocated.values()) + self._markers[marker_name] = marker_val + + def _clear_state(self) -> None: + """Clear states when start_monitor() is called.""" + self._operator_names.clear() + self.memories_allocated.clear() + self.memories_active.clear() + self.memories_reserved.clear() + self._markers.clear() + self._cur_module_name = "" + self._op_index = 0 + self._num_cuda_retries = 0 diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c3922e421f3e5585924862ac1be7edc3f98bf88 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe025785d5dff3354d8a64dbd952b7aa65439b45 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a48f3ba262752a28951c3d95d8d5d2164ebc933 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68595a3c217bb20e2f5989ebd7d0c945dfeae361 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4961fed4712a1a4a82b4beea302eea021863d393 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..682a7f2b299a9316ff842c5c402b42aad7b31800 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py @@ -0,0 +1,2086 @@ +import copy +import functools +import logging +import warnings +from contextlib import ExitStack +from dataclasses import dataclass, field +from typing import ( + Any, + cast, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + no_type_check, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed._tensor import DTensor, Replicate +from torch.distributed.distributed_c10d import _get_pg_default_device +from torch.distributed.fsdp._common_utils import ( + _apply_to_modules, + _FSDPState, + _get_module_fsdp_state_if_fully_sharded_module, + _get_param_to_fqns, + _module_handle, + _named_parameters_with_duplicates, + clean_tensor_name, +) +from torch.distributed.fsdp._debug_utils import SimpleProfiler +from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle +from torch.distributed.fsdp._fsdp_extensions import ( + _ext_chunk_dtensor, + _ext_chunk_tensor, +) +from torch.distributed.fsdp._runtime_utils import ( + _lazy_init, + _reset_flat_param_grad_info_if_needed, +) +from torch.distributed.fsdp.api import ( + ShardingStrategy, + StateDictSettings, + StateDictType, +) +from torch.utils._pytree import tree_map_only + + +logger = logging.getLogger(__name__) + + +@dataclass +class FSDPParamInfo: + state: _FSDPState + handle: FlatParamHandle + param_indices: Dict[str, int] + param_requires_grad: List[bool] + + +def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]: + keys = sorted(dictionary.keys()) + for k in keys: + yield k, dictionary[k] + + +@dataclass +class _ConsolidatedOptimState: + """ + This holds the consolidated optimizer state on the target rank. Positive- + dimension tensor state is communicated across ranks, while zero-dimension + tensor state and non-tensor state is taken directly from the target rank. + + PyTorch version 1.12 moved to using zero-dimension tensors for scalar + values, but user implemented optimizers may still use float (i.e. a + non-tensor). Thus, we support both and handle them identically. + + Attributes: + tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension + tensor state name to the unsharded flat tensor representing the + state. + zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero- + dimension tensor state name to its value. + non_tensor_state (Dict[str, Any]): Mapping from non-tensor state + name to its value. + """ + + tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + non_tensor_state: Dict[str, Any] = field(default_factory=dict) + + +class _PosDimTensorInfo(NamedTuple): + """ + Meatadata for positive-dimension tensors used internally for + :meth:`scatter_full_optim_state_dict`. + + Attributes: + shape (torch.Size): Sharded tensor shape (which is equal to the + unsharded tensor shape if the tensor is optimizer state for a + non-FSDP parameter and is hence not sharded). + dtype (torch.dtype): Data type of the tensor. + """ + + shape: torch.Size + dtype: torch.dtype + + +class _OptimStateKey(NamedTuple): + """ + This represents an optimizer state key that may be used commonly across + ranks. It is based on the unflattened parameter names rather than parameter + IDs to make it independent of each rank's own optimizer construction. + """ + + unflat_param_names: Tuple[str, ...] + is_fsdp_managed: bool + + +def _unflatten_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the optimizer state, consisting of the "state" part and the + "param_groups" part. Unflattening the "state" part involves consolidating + the state on the target rank and remapping from flattened to unflattened + parameter IDs, and the "param_groups" part only involves remapping from + flattened to unflattened parameter IDs. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): Entry for the flat parameter in the + "state" part of the optimizer state dict. + to_save (bool): Whether to save the state on this rank. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter if on the target + rank or an empty :class:`list` otherwise. The final optimizer state + dict will need to map these entries using the proper unflattened + parameter IDs. + """ + assert ( + not shard_state or to_save + ), "If ``shard_state`` is True, ``to_save`` has to be True." + consolidated_state = _communicate_optim_state( + fsdp_param_info, + flat_param_state, + ) + if to_save: + unflat_param_state = _unflatten_communicated_optim_state( + fsdp_param_info, + consolidated_state, + shard_state, + ) + for optim_state in unflat_param_state: + # We can't use .items() below cuz we'd run into a concurrent modification error + if cpu_offload: + for key in list(optim_state.keys()): + state = optim_state[key] + if not isinstance(state, torch.Tensor): + continue + optim_state[key] = state.cpu() + return unflat_param_state + else: + return [] + + +def _is_zero_dim_tensor(x: Any) -> bool: + return torch.is_tensor(x) and x.dim() == 0 + + +def _communicate_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], +) -> _ConsolidatedOptimState: + """ + Communicates the optimizer state for a flat parameter across ranks. All + ranks will hold the entire non-sharded optimizer state on GPU. + + If ``N`` is the number of tensor optimizer states in the optimizer state + dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1`` + otherwise (where the plus 1 comes from all-gathering the padding per rank). + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): The entry in the "state" part of the + optimizer state dict corresponding to the flat parameter. + + Returns: + ConsolidatedOptimState: Consolidated optimizer state for the target + flat parameter. + """ + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + state = _ConsolidatedOptimState() + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for state_name, value in sorted_items(flat_param_state): + # Positive-dimension tensor state: communicate across ranks + if torch.is_tensor(value) and value.dim() > 0: + # If the parameter is not sharded, then neither is the + # positive-dimension tensor state, so no need to communicate it -- + # we take the target rank's value + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + tensor_state[state_name] = value + continue + assert ( + fsdp_state.compute_device is not None + ), "compute_device has not been initialized" + if value.device.type != fsdp_state.compute_device.type: + value = value.to(fsdp_state.compute_device) + # Assume that positive-dimension tensor optimizer state + # has the same shape as the sharded flat parameter + buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined] + tensor_buffer = value.new_zeros(*buffer_size) + dist.all_gather_into_tensor( + tensor_buffer, value, group=fsdp_state.process_group + ) + fsdp_state._device_handle.synchronize() + unpadded_numel = cast( + nn.Parameter, flat_param._unpadded_unsharded_size + ).numel() + tensor_state[state_name] = tensor_buffer[:unpadded_numel] + # Zero-dimension tensor state and non-tensor state: take this rank's + # value directly + else: + if _is_zero_dim_tensor(value): + zero_dim_tensor_state[state_name] = value.detach().clone() + else: + non_tensor_state[state_name] = value + return state + + +def _unflatten_communicated_optim_state( + fsdp_param_info: FSDPParamInfo, + state: _ConsolidatedOptimState, + shard_state: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the communicated optimizer state (given by ``tensor_state``, + ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat + parameter. This should only be called on the target rank. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + state (_ConsolidatedOptimState): Consolidated optimizer state. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter. The final + optimizer state dict will need to map these entries using the proper + unflattened parameter IDs. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + unflat_param_state: List[Dict[str, Any]] = [] + flat_param_views: Dict[str, Iterator] = {} + num_unflat_params = flat_param._num_params + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for _ in range(num_unflat_params): + unflat_state_param = {} + # Add positive-dimension tensor state: unflatten with views + for state_name, flat_tensor in sorted_items(tensor_state): + views_generated = state_name in flat_param_views + if not views_generated: + views = handle._get_unflat_views(flat_tensor) + flat_param_views[state_name] = views + else: + views = flat_param_views[state_name] + optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views) + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + optim_state = _ext_chunk_dtensor( + optim_state, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + optim_state = _ext_chunk_tensor( + optim_state, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + unflat_state_param[state_name] = optim_state + + # Add zero-dimension tensor state: take the target rank's value + for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state): + unflat_state_param[state_name] = zero_dim_tensor + # Add non-tensor state: take the target rank's value + for state_name, non_tensor in sorted_items(non_tensor_state): + unflat_state_param[state_name] = non_tensor + unflat_param_state.append(unflat_state_param) + return unflat_param_state + + +def _broadcast_processed_state( + fsdp_state: _FSDPState, + optim_state: Dict[str, Any], + group: Optional[dist.ProcessGroup], +) -> Dict[str, Any]: + objects: List[Any] = [None] + if fsdp_state.rank == 0: + objects[0] = tree_map_only( + torch.Tensor, + lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr] + optim_state, + ) + dist.broadcast_object_list(objects, src=0, group=group) + if fsdp_state.rank == 0: + return optim_state + else: + return objects[0] + + +def _broadcast_state( + fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup] +) -> Any: + if fsdp_state.rank == 0: + if not isinstance(state, torch.Tensor) or state.dim() == 0: + return state + tensor = state.to(fsdp_state.compute_device) + else: + if isinstance(state, torch.Tensor): + assert state.dim() == 0, ( + "For non-zero ranks, a tensor state should have zero dimension, " + "but got the state with shape {state.shape()}." + ) + return state + elif not isinstance(state, _PosDimTensorInfo): + return state + tensor = torch.zeros( + state.shape, dtype=state.dtype, device=fsdp_state.compute_device + ) + dist.broadcast(tensor, src=0, group=group) + return tensor + + +def _shard_orig_param_state( + fsdp_param_info: FSDPParamInfo, + fqn: str, + optim_state: Dict[str, Any], +) -> Dict[str, Any]: + """ + Shard the optimizer state for the original parameter with the name ``fqn``. + This API should only be used when ``use_orig_params`` is True. + """ + if not optim_state: + return {} + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + param_idx = fsdp_param_info.param_indices[fqn] + shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined] + optim_state = _gather_state_dict( + optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device + ) + if not shard_param_info.in_shard: + return {} + # Flatten and shard the state. + new_optim_state: Dict[str, Any] = {} + intra_param_start_idx = shard_param_info.intra_param_start_idx + intra_param_end_idx = shard_param_info.intra_param_end_idx + for state_name, value in optim_state.items(): + if ( + torch.is_tensor(value) + and value.dim() > 0 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator] + new_optim_state[state_name] = value + return new_optim_state + + +def _flatten_optim_state_dict( + optim_state_dict: Dict[str, Any], + model: nn.Module, + use_orig_params: bool = False, + optim: Optional[torch.optim.Optimizer] = None, + rank0_only: bool = False, + group: Optional[dist.ProcessGroup] = None, +) -> Dict[str, Any]: + """ + Flattens the full optimizer state dict, still keying by unflattened parameter + names. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP know how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- it is managed by other parallelism and FSDP does not + know ho to handle/aggregate them. + + Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to + flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require + all the states even if the corresponding parameters are empty. To this end, + ``optim`` will be used to to get the initial state of the empty parameters. + ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or + NamedOptimizer. + + Returns: + Dict[str, Any]: The flattened optimizer state dict. + """ + SimpleProfiler.reset() + + unflat_osd = optim_state_dict + if "state" not in unflat_osd and not rank0_only: + raise ValueError( + '`optim_state_dict` must have the keys "state"' + "to be a valid optimizer state dict" + ) + param_to_fqns = _get_param_to_fqns(model) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state + + # Broadcast unflat_osd without non-scalar tensor if rank0_only is True. + if rank0_only: + unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group) + + # Construct the "state" part + flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {} + unflat_osd_state = unflat_osd["state"] + all_state_keys = set(unflat_osd_state.keys()) + + for param, fqns in param_to_fqns.items(): + fqn = fqns[0] + if fqn not in unflat_osd_state: + continue + all_state_keys.difference_update(fqns) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name in unflat_osd_state[fqn].keys(): + unflat_osd_state[fqn][state_name] = _broadcast_state( + fsdp_state, unflat_osd_state[fqn][state_name], group=group + ) + fqn = fqns[0] + if fqn in fqn_to_fsdp_param_info: + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + if use_orig_params: + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + flat_state = _shard_orig_param_state( + fsdp_param_info, + fqn, + unflat_osd_state[fqn], + ) + else: + flat_state = _flatten_optim_state( + fsdp_param_info, + unflat_osd_state, + fqns, + ) + key = _OptimStateKey(tuple(fqns), True) + # Only include non-empty states since as expected by + # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer + # or NamedOptimizer. + if flat_state: + flat_osd_state[key] = flat_state + elif use_orig_params: + assert ( + len(fqns) == 1 + ), f"use_orig_params is True but there are multiple FQNs, {fqns}." + if optim is not None: # NamedOptimizer or KeyedOptimizer case. + state = optim.state.get(param, None) # type: ignore[call-overload] + if state is not None: + flat_osd_state[key] = copy.deepcopy(state) + else: + warnings.warn( + f"optim_state[{key}] is not on rank{fsdp_state.rank}." + ) + + else: + raise RuntimeError( + f"The state of {key} is empty. This should happen when " + "use_orig_params=True." + ) + else: # do not flatten non-FSDP parameters' states + assert len(fqns) == 1 + key = _OptimStateKey(tuple(fqns), False) + flat_osd_state[key] = copy.copy(unflat_osd_state[fqn]) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name, param_state in list(unflat_osd_state[fqn].items()): + if fsdp_state.rank > 0: + # Deference the tensor so that PyTorch can collect the memory. + del unflat_osd_state[fqn][state_name] + else: + # Move the tensor in the original osd back to CPU to make the + # original osd unaffected. + unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][ + state_name + ].cpu() + + # Handle user-defined state, states that are not associated with parameters. + for key in all_state_keys: + user_state = unflat_osd_state[key] + if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params: + user_state = _broadcast_state(fsdp_state, user_state, group=group) + flat_osd_state[key] = copy.copy(user_state) + + SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ") + # Construct the "param_groups" part -- copy as is since it will be + # rekeyed later according to the target rank's optimizer + # Only copy param_groups if it exists in unflat_osd + if "param_groups" in unflat_osd: + flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"]) + return {"state": flat_osd_state, "param_groups": flat_osd_param_groups} + else: + return {"state": flat_osd_state} + + +def _flatten_optim_state( + fsdp_param_info: FSDPParamInfo, + unflat_osd_state: Dict[str, Dict[str, Any]], + unflat_param_names: List[str], +) -> Dict[str, Any]: + """ + Flattens the optimizer state in ``full_optim_state_dict`` for a single + flat parameter in ``fsdp_param_info`` corresponding to the unflattened + parameter names in ``unflat_param_names``. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the + optimizer state dict corresponding to the unflattened parameters. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the flat parameter ``flat_param``. + + Returns: + Dict[str, Any]: A :class:`dict` mapping state names to their values for + a particular flat parameter. The sharded optimizer state dict's "state" + part will map a key to this returned value. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + num_unflat_params = len(unflat_param_names) + assert num_unflat_params > 0, ( + "Expects at least one unflattened parameter corresponding to the " + "flat parameter" + ) + unflat_param_shapes = flat_param._shapes + num_unflat_param_shapes = len(unflat_param_shapes) + assert ( + num_unflat_params == num_unflat_param_shapes + ), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}" + + # Check if these unflattened parameters have any optimizer state + has_state = [ + bool(unflat_param_name in unflat_osd_state) + for unflat_param_name in unflat_param_names + ] + # If none of the unflattened parameters comprising this flat parameter have + # any state, then we do not want an entry in the optimizer state dict + if not any(has_state): + return {} # no need to flatten any state + # There may still be some unflattened parameters with state and some + # without + unflat_param_states = [ + _gather_state_dict( + unflat_osd_state[unflat_param_name], + pg=fsdp_state.process_group, + device=fsdp_state.compute_device, + ) + if unflat_param_name in unflat_osd_state + else None + for unflat_param_name in unflat_param_names + ] + # Check that the unflattened parameters have the same state names + state_names = None + for unflat_param_state in unflat_param_states: + if unflat_param_state is None: + continue + if state_names is None: + state_names = set(unflat_param_state.keys()) + else: + if state_names != set(unflat_param_state.keys()): + raise ValueError( + "Differing optimizer state names for the unflattened " + f"parameters: {unflat_param_names}" + ) + assert state_names is not None + + # Flatten the state + flat_state: Dict[str, Any] = {} + for state_name in state_names: + state_values = [ + unflat_param_state[state_name] if unflat_param_state is not None else None + for unflat_param_state in unflat_param_states + ] + non_none_state_values = [v for v in state_values if v is not None] + # If all ranks have None, this is a None value + if not non_none_state_values: + flat_state[state_name] = None + continue + are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True + for v in non_none_state_values: + are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0 + are_zero_dim_tensors &= _is_zero_dim_tensor(v) + are_non_tensors &= not torch.is_tensor(v) + types = {type(v) for v in non_none_state_values} + if len(types) != 1 or not ( + are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors + ): + raise ValueError( + f"Differing optimizer state types for state {state_name}, " + f"values {non_none_state_values}, and unflattened parameter " + f"names {unflat_param_names}" + ) + if are_pos_dim_tensors: + flat_tensor = _flatten_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + unflat_param_shapes, + handle, + ) + # Shard the flattened tensor immediately to minimize max memory + # usage + if ( + fsdp_state.world_size != 1 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + sharded_flat_tensor, _ = FlatParamHandle._get_shard( + flat_tensor, + fsdp_state.rank, + fsdp_state.world_size, + ) + else: + sharded_flat_tensor = flat_tensor + flat_state[state_name] = sharded_flat_tensor + elif are_zero_dim_tensors: + flat_state[state_name] = _flatten_zero_dim_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + else: + assert are_non_tensors + flat_state[state_name] = _flatten_non_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + + return flat_state + + +def _flatten_tensor_optim_state( + state_name: str, + pos_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], + unflat_param_shapes: Sequence[torch.Size], + handle: FlatParamHandle, +) -> torch.Tensor: + """ + Flattens the positive-dimension tensor optimizer state given by the values + ``tensors`` for the state ``state_name`` for a single flat parameter + from ``handle`` corresponding to the unflattened parameter names + ``unflat_param_names`` and unflatted parameter shapes + ``unflat_param_shapes``. This flattens each unflattened parameter's tensor + state into one tensor. + + NOTE: We use zero tensors for any unflattened parameters without state + since some value is required to fill those entries. This assumes that the + zero tensor is mathematically equivalent to having no state, which is true + for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all + optimizers. + + Args: + state_name (str): Optimizer state name. + pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor + optimizer state values for the unflattened parameters corresponding + to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes + corresponding to the single flat parameter. + handle (FlatParamHandle): The flat parameter's handle. + + Returns: + torch.Tensor: A flat tensor containing the optimizer state + corresponding to ``state_name`` constructed by concatenating the + unflattened parameter tensor states in ``pos_dim_tensors`` (using zero + tensors for any unflattened parameters without the state). + """ + flat_param = handle.flat_param + non_none_tensors = [t for t in pos_dim_tensors if t is not None] + # Check that all are tensors with the same dtype + dtypes = {t.dtype for t in non_none_tensors} + if len(dtypes) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have positive-dimension tensor state with the " + f"same dtype but got dtypes {dtypes} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + dtype = next(iter(dtypes)) + # Check that each tensor state matches its parameter's shape + for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes): + if tensor is None and len(shape) == 0: + raise ValueError("Flattening a zero-dimension parameter is not supported") + elif tensor is not None and tensor.shape != shape: + raise ValueError( + "Tensor optimizer state does not have same shape as its " + f"parameter: {tensor.shape} {shape}" + ) + # Flatten the tensor states: we do not need to add any right-hand-side + # padding since the flat optimizer state tensor is sharded via + # `_get_shard()`, which pads the shard as needed (just like for the flat + # parameter) + cpu_device = torch.device("cpu") + tensors_to_flatten = [ + torch.flatten(state_value.to(cpu_device)) + if state_value is not None + else torch.flatten( + torch.zeros( + size=shape, + dtype=dtype, + device=cpu_device, + ) + ) + for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes) + ] + flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel) + flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined] + assert flat_tensor.shape == flat_param_shape, ( + f"tensor optim state: {flat_tensor.shape} " + f"flat parameter: {flat_param_shape}" + ) + return flat_tensor + + +def _flatten_zero_dim_tensor_optim_state( + state_name: str, + zero_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], +) -> torch.Tensor: + """ + Flattens the zero-dimension tensor optimizer state given by the values + ``zero_dim_tensors`` for the state ``state_name`` for a single flat + parameter corresponding to the unflattened parameter names + ``unflat_param_names`` by enforcing that all tensors are the same and using + that common value. + + NOTE: The requirement that the tensors are the same across all unflattened + parameters comprising the flat parameter is needed to maintain the + invariant that FSDP performs the same computation as its non-sharded + equivalent. This means that none of the unflattened parameters can be + missing this state since imposing a value may differ from having no value. + For example, for Adam's "step", no value means maximum bias correction, + while having some positive value means less bias correction. + + Args: + state_name (str): Optimizer state name. + zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state + for the unflattened parameters corresponding to the single + flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + torch.Tensor: A zero-dimensional tensor giving the value of the state + ``state_name`` for all unflattened parameters corresponding to the + names ``unflat_param_names``. + """ + non_none_tensors = [t for t in zero_dim_tensors if t is not None] + # Enforce that all have the same value and dtype + values_set = {t.item() if t is not None else None for t in zero_dim_tensors} + dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors} + if ( + len(non_none_tensors) != len(zero_dim_tensors) + or len(values_set) != 1 + or len(dtypes) != 1 + ): + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {values_set} and dtypes {dtypes} for state " + f"{state_name} and unflattened parameter names " + f"{unflat_param_names}" + ) + value = next(iter(values_set)) + dtype = next(iter(dtypes)) + return torch.tensor(value, dtype=dtype, device=torch.device("cpu")) + + +def _flatten_non_tensor_optim_state( + state_name: str, + non_tensors: List[Any], + unflat_param_names: List[str], +) -> Any: + """ + Flattens the non-tensor optimizer state given by the values ``non_tensors`` + for the state ``state_name`` for a single flat parameter corresponding + to the unflattened parameter names ``unflat_param_names`` by enforcing that + all values are the same and using that common value. + + See the note in :func:`_flatten_zero_dim_tensor_optim_state`. + + Args: + state_name (str): Optimizer state name. + non_tensors (List[Any]): Non-tensor optimizer state for the unflattened + parameters corresponding to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + Any: A non-tensor giving the value of the state ``state_name`` for all + unflattened parameters corresponding to the names + ``unflat_param_names``. + """ + non_none_non_tensors = [nt for nt in non_tensors if nt is not None] + # Enforce that all have the same value (same type already checked) + non_tensor_set = set(non_tensors) + if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {non_tensor_set} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + non_tensor = next(iter(non_tensor_set)) + return non_tensor + + +def _rekey_sharded_optim_state_dict( + sharded_osd: Dict[str, Any], + model: nn.Module, + optim: torch.optim.Optimizer, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + using_optim_input: bool, + is_named_optimizer: bool = False, +) -> Dict[str, Any]: + """ + Rekeys the optimizer state dict from unflattened parameter names to flat + parameter IDs according to the calling rank's ``optim``, which may be + different across ranks. In particular, the unflattened parameter names are + represented as :class:`_OptimStateKey` s. + """ + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast( + Dict[nn.Parameter, Union[int, str]], + ( + _get_param_to_param_id_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_to_param_key( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + # All parameter keys in `param_to_param_key` should be in + # `param_to_fqns` -- strict inequality follows when not all parameters are + # passed to the optimizer + assert len(param_to_param_key) <= len(param_to_fqns) + + unflat_param_names_to_flat_param_key: Dict[ + Tuple[str, ...], Union[int, str] + ] = {} # for "state" + unflat_param_name_to_flat_param_key: Dict[ + str, Union[int, str] + ] = {} # for "param_groups" + for param, unflat_param_names in param_to_fqns.items(): + if param not in param_to_param_key: + # This parameter was not passed to the optimizer + continue + flat_param_key = param_to_param_key[param] + unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key + for unflat_param_name in unflat_param_names: + unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key + + sharded_osd_state = sharded_osd["state"] + rekeyed_osd_state: Dict[Union[str, int], Any] = {} + for key, param_state in sharded_osd_state.items(): + if isinstance(key, str): + rekeyed_osd_state[key] = param_state + continue + flat_param_key = unflat_param_names_to_flat_param_key.get( + key.unflat_param_names, key.unflat_param_names + ) + rekeyed_osd_state[flat_param_key] = param_state + + # Only process param_groups if it exists in sharded_osd + if "param_groups" in sharded_osd: + rekeyed_osd_param_groups: List[Dict[str, Any]] = [] + for unflat_param_group in sharded_osd["param_groups"]: + flat_param_group = copy.deepcopy(unflat_param_group) + flat_param_keys = sorted( + { + unflat_param_name_to_flat_param_key[unflat_param_name] + for unflat_param_name in unflat_param_group["params"] + } + ) + flat_param_group["params"] = flat_param_keys + rekeyed_osd_param_groups.append(flat_param_group) + return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups} + else: + return {"state": rekeyed_osd_state} + + +def _get_param_id_to_param_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[int, nn.Parameter]: + """ + Constructs a mapping from parameter IDs to parameters. This may be used + both for models with ``FlatParameter`` s and without. + + NOTE: This method is only preserved for backward compatibility. The method + :meth:`_get_param_key_to_param` is the preferred code path that does not + rely on ``optim_input``. + + NOTE: We critically assume that, whether the optimizer input is a list of + parameters or a list of parameter groups, :class:`torch.optim.Optimizer` + enumerates the parameter IDs in order. In other words, for a parameter list + input, the parameter IDs should be in that list order, and for a parameter + groups input, the parameter IDs should be in order within each parameter + group and in order across parameter groups. + + Args: + model (nn.Module): Model whose parameters are passed into the + optimizer. + optim_input (Optional[Union[List[Dict[str, Any]], + Iterable[nn.Parameter]]]): Input passed into the optimizer + representing either a :class:`list` of parameter groups or an + iterable of parameters; if ``None``, then this method assumes the + input was ``model.parameters()``. (Default: ``None``) + + Returns: + List[nn.Parameter]: Mapping from parameter IDs to parameters, + where the parameter ID is implicitly the index in the :class:`list`. + """ + # Assume the standard case of passing `model.parameters()` to the optimizer + # if `optim_input` is not specified + if optim_input is None: + return dict(enumerate(model.parameters())) + try: + params = cast(List[nn.Parameter], list(optim_input)) + except TypeError as e: + raise TypeError( + "Optimizer input should be an iterable of Tensors or dicts, " + f"but got {optim_input}" + ) from e + if len(params) == 0: + raise ValueError("Optimizer input should not be empty") + + # Check if the optimizer input represents tensors or parameter groups + all_tensors = True + all_dicts = True + for param in params: + all_tensors &= isinstance(param, torch.Tensor) + all_dicts &= isinstance(param, dict) + if not all_tensors and not all_dicts: + raise TypeError("Optimizer input should be an iterable of Tensors or dicts") + if all_tensors: + return dict(enumerate(params)) + assert all_dicts + param_id_to_param: List[nn.Parameter] = [] + for param_group in params: + has_params_key = "params" in param_group # type: ignore[operator] + assert has_params_key, ( + 'A parameter group should map "params" to a list of the ' + "parameters in the group" + ) + # Implicitly map `flat_param_id` (current length of the list) to + # `param` + param_id_to_param.extend(param_group["params"]) # type: ignore[index] + return dict(enumerate(param_id_to_param)) + + +def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]: + """ + Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes + from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical" + because ``FlatParameter`` s do not come from the original module but are + registered only after FSDP has been applied. This function returns the FSDP-given + name for the ``FlatParameter`` (usually module._flat_param) as opposed to the + canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``). + + Consequently, this function will only return a non-empty mapping if FSDP was + applied with ``use_orig_params=False`` as, otherwise, the original parameters + are used within the module and there would be no ``FlatParameter`` s in the module. + + """ + + def module_fn(module, prefix, tree_level, flat_param_to_fqn): + for param_name, param in _named_parameters_with_duplicates( + module, recurse=False + ): + if not isinstance(param, FlatParameter): + continue + fqn = clean_tensor_name(prefix + param_name) + flat_param_to_fqn[param] = fqn + + def return_fn(flat_param_to_fqn): + return flat_param_to_fqn + + flat_param_to_fqn_ret: Dict[FlatParameter, str] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + flat_param_to_fqn_ret, + ) + + +def _get_param_key_to_param( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[Union[int, str], nn.Parameter]: + """ + Constructs a mapping from parameter keys to parameters. For the regular + optimizers, the keys are parameter IDs. For NamedOptimizer, the keys + are FQNs. This API may be used both for models with ``FlatParameter`` s and + without. + """ + clean_fqn_to_curr_fqn: Dict[str, str] = {} + if is_named_optimizer: + assert ( + param_to_fqns is not None and flat_param_to_fqn is not None + ), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None." + assert model is not None + for key, _ in _named_parameters_with_duplicates(model): + clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key + + param_key_to_param: Dict[Union[str, int], nn.Parameter] = {} + pid = 0 + for param_group in optim.param_groups: + if is_named_optimizer: + for param in param_group["params"]: + assert flat_param_to_fqn is not None + if param in flat_param_to_fqn: + # FlatParameter case + key = flat_param_to_fqn[param] + else: + assert param_to_fqns is not None + # use_orig_params case + assert len(param_to_fqns[param]) == 1 + key = param_to_fqns[param][0] + try: + key = clean_fqn_to_curr_fqn[key] + except KeyError as e: + raise KeyError( + f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}." + ) from e + param_key_to_param[key] = param + else: + for param in param_group["params"]: + param_key_to_param[pid] = param + pid += 1 + + return param_key_to_param + + +def _get_param_to_param_key( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[nn.Parameter, Union[int, str]]: + """ + Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API + only supports the case where `optim` is a regular optimizer, not NamedOptimizer. + So the parameter keys will be parameter ids. + """ + param_id_to_param = _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _get_param_to_param_id_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[nn.Parameter, int]: + """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`.""" + param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _check_missing_keys_on_rank( + r0_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]], + param_key_to_param: Dict[Union[str, int], nn.Parameter], + group: Optional[dist.ProcessGroup], +) -> None: + # Ensure that all ranks have at least the optimizer states needed by + # rank 0's optimizer + missing_keys: List[_OptimStateKey] = [] + for r0_optim_state_key in r0_optim_state_keys: + if r0_optim_state_key not in optim_state_key_to_param_key: + # A parameter from rank 0's optimizer does not exist for this + # rank's optimizer + missing_keys.append(r0_optim_state_key) + continue + param_key = optim_state_key_to_param_key[r0_optim_state_key] + if isinstance(param_key, int): + assert param_key >= 0 and param_key < len( + param_key_to_param + ), "Check the `param_key_to_param` construction" + # We cannot use FSDPState.compute_device as this API is a global view. + device = _get_pg_default_device(group) + num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device) + dist.all_reduce(num_missing, group=group) + if num_missing.item() > 0: + obj_list = [None for _ in range(dist.get_world_size(group))] + dist.all_gather_object(obj_list, missing_keys, group=group) + error_msg = ( + "FSDP currently requires each rank to have at least the " + "optimizer states needed by rank 0's optimizer but some ranks " + "are missing some of those states" + ) + for rank, keys in enumerate(obj_list): + keys = cast(List[_OptimStateKey], keys) + if len(keys) > 0: + error_msg += ( + f"\nRank {rank} is missing states for the parameters: " + f"{[key.unflat_param_names for key in keys]}" + ) + raise RuntimeError(error_msg) + + +def _map_param_key_to_optim_keys( + optim_state_dict: Dict[str, Any], + group: Optional[dist.ProcessGroup], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + merge_keys: bool = False, +) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]: + """ + Construct the local mapping between the ``_OptimStateKey`` and parameter keys + and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0 + must contain all the ``_OptimStateKey``, an exception will be raised otherwise. + Note that ``merge_keys`` should equal to ``use_orig_params``. + """ + rank = dist.get_rank(group) + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local + all_optim_state_keys: List[_OptimStateKey] = [] + + for param_key, param in param_key_to_param.items(): + # Do not include parameters without state to avoid empty mappings + # just like in normal `torch.optim.Optimizer.state_dict()` + if param_key not in optim_state_dict["state"]: + continue + fqns = param_to_fqns[param] + is_fsdp_managed = isinstance(param, FlatParameter) + if is_fsdp_managed: + assert fqns[0] in fqn_to_fsdp_param_info, ( + fqns[0], + list(fqn_to_fsdp_param_info.keys()), + ) + is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info + optim_state_key = _OptimStateKey( + unflat_param_names=tuple(fqns), + is_fsdp_managed=is_fsdp_managed, + ) + if rank == 0 or merge_keys: + all_optim_state_keys.append(optim_state_key) + optim_state_key_to_param_key[optim_state_key] = param_key + + if merge_keys: + all_keys: List[List[_OptimStateKey]] = [ + [] for _ in range(dist.get_world_size(group)) + ] + dist.all_gather_object(all_keys, all_optim_state_keys, group=group) + merge_all_optim_state_keys = [ + key for local_keys in all_keys for key in local_keys + ] + all_optim_state_keys = sorted(set(merge_all_optim_state_keys)) + else: + key_obj_list: List[Optional[List[_OptimStateKey]]] = ( + [all_optim_state_keys] if rank == 0 else [None] + ) + dist.broadcast_object_list(key_obj_list, src=0, group=group) + assert key_obj_list[0] is not None + all_optim_state_keys = key_obj_list[0] + _check_missing_keys_on_rank( + all_optim_state_keys, + optim_state_key_to_param_key, + param_key_to_param, + group, + ) + + return all_optim_state_keys, optim_state_key_to_param_key + + +def _unflatten_param_groups( + state_dict: Dict[str, Any], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], +) -> List[Dict[str, Any]]: + param_groups: List[Dict[str, Any]] = [] + for flat_param_group in state_dict["param_groups"]: + unflat_param_group = copy.deepcopy(flat_param_group) + param_group_params = [ + param_key_to_param[flat_param_key] + for flat_param_key in flat_param_group["params"] + ] + nested_unflat_param_names = [ + param_to_fqns[param] for param in param_group_params + ] + unflat_param_group["params"] = [ + unflat_param_name + for unflat_param_names in nested_unflat_param_names + for unflat_param_name in unflat_param_names + ] # flatten the list of lists + param_groups.append(unflat_param_group) + return param_groups + + +def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool: + """ + Returns whether the state_dict is from a NamedOptimizer. + This function checks that the keys in the state_dict['state'] are strings + (which usually are FQNs) versus integers (which usually refer to param_ids + from a vanilla torch.optim.Optimizer). + """ + state = optim_state_dict.get("state", None) + if not state: + # If we cannot find a state, assume it is not NamedOptimizer as + # NamedOptimizer has eager initialization. + return False + try: + key = next(iter(state.keys())) + except Exception as e: + raise Exception(optim_state_dict) from e + return isinstance(key, str) + + +@dataclass +class StateInfo: + # The key of these dictionaries are the state name, e.g., `exp_avg`. + tensors: Dict[str, _PosDimTensorInfo] + scalar_tensors: Dict[str, torch.Tensor] + non_tensors: Dict[str, Any] + + +def _allgather_state_info( + fsdp_state: _FSDPState, + input_states: Dict[str, Any], +) -> List[Dict[str, StateInfo]]: + """ + Given the ``input_states``, allgather StateInfo for each state. The function + uses all_gather_object to gather StateInfo so no GPU tensors are sent. + """ + + processed_state_dict: Dict[str, StateInfo] = {} + gathered_state_info: List[Dict[str, StateInfo]] = [ + {} for _ in range(fsdp_state.world_size) + ] + + for fqn, optim_state in input_states.items(): + # Allgather the scalar tensor state, non-tensor states and tensors metadata. + processed_state = StateInfo({}, {}, {}) + for state_name, value in sorted_items(optim_state): + if torch.is_tensor(value): + if value.dim() == 0: + # Ensure that `step` is on CPU. + processed_state.scalar_tensors[state_name] = value.cpu() + else: + processed_state.tensors[state_name] = _PosDimTensorInfo( + value.shape, value.dtype + ) + else: + processed_state.non_tensors[state_name] = value + processed_state_dict[fqn] = processed_state + dist.all_gather_object( + gathered_state_info, + processed_state_dict, + group=fsdp_state.process_group, + ) + return gathered_state_info + + +def _convert_all_state_info( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + output_states: Dict[str, Dict[str, Any]], +) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API converted + the StateInfo into the original state if the state is not a non-scalar + tensor. For a multi-dimensional tensor, the local state will be stored in + ``state_buffer`` in a correct order for later allgather purpose. + """ + + state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {} + + for fqn, gathered_state in output_states.items(): + state_info = [s[fqn] for s in gathered_state_info] + all_tensor_states = sorted( + {n for state in state_info for n in state.tensors.keys()} + ) + empty_ranks: Set[int] = set() + dtype: Optional[torch.dtype] = None + # First check all the non-scalar states and get the information of + # states on each rank. + for state_name in all_tensor_states: + numels = [] + _empty_ranks: Set[int] = set() + for rank, object_state in enumerate(state_info): + numels.append(0) + info = object_state.tensors.get(state_name, None) + if info is not None: + numels[-1] = info.shape.numel() + if not dtype: + dtype = info.dtype + else: + assert dtype == info.dtype + if numels[-1] == 0: + _empty_ranks.add(rank) + + assert not empty_ranks or empty_ranks == _empty_ranks + empty_ranks = _empty_ranks + if state_name not in state_buffers: + state_buffers[state_name] = [ + None for _ in fsdp_param_info.param_indices + ] + local_state = input_states[fqn].get(state_name, None) + # N.B. We need to move the state to compute_device. The reason is + # not yet clear and we need to figure out why the state may be on a + # different device. + if local_state is not None: + local_state = local_state.to(fsdp_param_info.state.compute_device) + state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state + + # Restoring the scalar and non-tensor states. If the corresponding + # non-scalar states do not exist on the rank, we also skip the scalar + # non-tensor states on that rank. + for rank, object_state in enumerate(state_info): + if rank in empty_ranks: + continue + for name, non_tensor_value in object_state.non_tensors.items(): + curr_non_tensor_value = gathered_state.get(name, None) + assert ( + curr_non_tensor_value is None + or curr_non_tensor_value == non_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {non_tensor_value}." + + f" Other ranks: {curr_non_tensor_value}" + ) + gathered_state[name] = non_tensor_value + + for name, scalar_tensor_value in object_state.scalar_tensors.items(): + curr_scalar_tensor_value = gathered_state.get(name, None) + assert curr_scalar_tensor_value is None or torch.equal( + scalar_tensor_value, curr_scalar_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {scalar_tensor_value}." + + f" Other ranks: {curr_scalar_tensor_value}" + ) + gathered_state[name] = scalar_tensor_value + + return dtype, state_buffers # type: ignore[possibly-undefined] + + +def _unflatten_orig_param_states( + fsdp_param_info: FSDPParamInfo, + output_states: Dict[str, Dict[str, Any]], + state_name: str, + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> None: + """ + Given a output state dict, ``output_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), and the values + are gathered states, unflatten the states to the original dimensions. + + This function performs the unflattening process in-place. + """ + if not to_save: + return + flat_param = fsdp_param_info.handle.flat_param + fsdp_state = fsdp_param_info.state + for fqn, gathered_state in output_states.items(): + value = gathered_state[state_name] + param_idx = fsdp_param_info.param_indices[fqn] + + # TODO: This solution is not general and only apply to PTD TP solution. + if isinstance(value, DTensor): + placement = value.placements[0] + # If gathered state is a DTensor and its TP placement is not Replicate(), we need to + # gather the tensor on its TP dimension before chunking them into DTensor again. + if placement != Replicate(): + placement_dim = placement.dim # type: ignore[attr-defined] + value_local = value.redistribute(placements=(Replicate(),)) + reshape_size = list(flat_param._shapes[param_idx]) + reshape_size[placement_dim] *= value.device_mesh.size(0) + reshape_size = torch.Size(reshape_size) + value = value.reshape(reshape_size) + # If gathered state is a replicate DTensor, we directly reshape it. + else: + value = value.reshape(flat_param._shapes[param_idx]) + else: + # If gathered state is a tensor, we directly reshape it into unflatten state. + value = value.reshape(flat_param._shapes[param_idx]) + + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + value = _ext_chunk_dtensor( + value, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + value = _ext_chunk_tensor( + value, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + elif not cpu_offload: + with SimpleProfiler.profile("clone"): + value = value.detach().clone() + + if cpu_offload: + with SimpleProfiler.profile(SimpleProfiler.Type.D2H): + value = value.cpu() + gathered_state[state_name] = value + + +def _allgather_orig_param_states( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Dict[str, Any]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API allgathers + all tensor states and restore non-tensor states from ``gathered_state_info``. + """ + fsdp_state = fsdp_param_info.state + if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL: + logger.warning( + "CUDA Memory Summary before calling to _allgather_orig_param_states %s", + torch.cuda.memory_summary(), + ) + + output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()} + + dtype, state_buffers = _convert_all_state_info( + fsdp_param_info, gathered_state_info, input_states, output_states + ) + + if len(state_buffers) == 0: + return output_states + + has_state_params: List[bool] = [ + True if fqn in output_states else False + for fqn, idx in fsdp_param_info.param_indices.items() + ] + + # Loop through the ``state_buffers`` and construct the flattened, concatenated, + # sharded states. The size of the constructed state will be the same size as + # flat_param (also sharded). + # Then we perform an allgather_into_tensor to get the full flat_param state. + # The full flat_param state is the result of concatenation of multiple states + # the order of of flat_param._fqns. + # The final step is to split the flat_param state into original param states + # and return the result. + flat_param = fsdp_param_info.handle.flat_param + empty_func = functools.partial( + torch.empty, dtype=dtype, device=fsdp_state.compute_device + ) + gathered_tensor = empty_func(flat_param._padded_unsharded_size) + # Synchronize can be slow but this will be easier for us to debug. + torch.cuda.synchronize() + for state_name, buffers in state_buffers.items(): + local_buffers: List[torch.Tensor] = [] + begin = fsdp_state.rank * flat_param._sharded_size.numel() + # End is inclusive. + end = begin + flat_param._sharded_size.numel() - 1 + # param_idx corresponds to the parameter index in the FlatParameter. + mem_offset, param_idx = 0, 0 + for numel, is_padding in zip( + flat_param._numels_with_padding, flat_param._is_padding_mask + ): + frozen_and_no_state = not is_padding and ( + not fsdp_param_info.param_requires_grad[param_idx] + and not has_state_params[param_idx] + ) + + if is_padding or frozen_and_no_state: + # This memory range is a padding or the param is frozen and does + # not require gradient. For the later case, we treat it as a + # padding and add empty values to the local_buffers. + + padding_begin, padding_end = mem_offset, mem_offset + numel - 1 + if padding_begin <= begin <= padding_end: + # The range is an align padding before the first parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + padding_end - begin + 1 + if end >= padding_end + else end - begin + 1 + ) + elif padding_begin <= end <= padding_end: + # The range is an align padding after the last parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + end - padding_begin + 1 + if begin <= padding_begin + else end - begin + 1 + ) + elif begin < padding_begin <= padding_end < end: + # The range is an align padding that is completely in the + # shard. + padding_len = numel + else: + padding_len = 0 + if padding_len: + local_buffers.append(empty_func(padding_len)) + + if not is_padding: + # This memory range is a parameter in FlatParameter. So there + # should be an corresponding state in the optimizer unless the + # parameter is frozen, which we treat it as a padding above. + + # We need to check if this rank owns the buffer. If this is None: + # 1.) the rank does not own any part of the original parameter. + # As a result, there is no corresponding optimizer state on + # the rank as well. + # 2.) the parameter is frozen AND no optimizer state for the + # parameter. If a parameter is frozen, there can still be + # optimizer state if the parameter is not frozen in the + # previous steps. + if buffers[param_idx] is not None: + local_buffers.append(cast(torch.Tensor, buffers[param_idx])) + param_idx += 1 + + mem_offset += numel + + shard_numel_padded = flat_param._sharded_size.numel() - ( + sum(t.numel() for t in local_buffers) + ) + + assert flat_param._shard_numel_padded == shard_numel_padded, ( + "Manually calculated _sharded_numel_padded is incorrect. " + f"_shard_numel_padded={flat_param._shard_numel_padded}, " + f"shard_numel_padded={shard_numel_padded}, " + f"_sharded_size.numel={flat_param._sharded_size.numel()}, " + f"_numels_with_padding={flat_param._numels_with_padding}, " + f"begin={begin}, end={end}," + ) + if shard_numel_padded > 0: + # Add right-handed padding. + local_buffers.append(empty_func(shard_numel_padded)) + local_shard = torch.cat(local_buffers) + assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), ( + "The size of local shard times the world size should equal to the " + "gathered tensor size. The inconsistency may be from a bug of " + "FlatParameter's metadata or the reconstruction logic in optimizer " + "state dict." + ) + torch.cuda.synchronize() + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER): + dist.all_gather_into_tensor( + gathered_tensor, local_shard, group=fsdp_state.process_group + ) + # Synchronize can be slow but this will be easier for us to debug. + torch.cuda.synchronize() + + unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()] + flat_param_handle = fsdp_param_info.handle + orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor) + assert len(orig_states) == len(fsdp_param_info.param_indices), ( + "The number of parameters from FlatParameter is not consistent to " + "the number of states used by optimizer state dict reconstruction " + "logic." + ) + for fqn, idx in fsdp_param_info.param_indices.items(): + if fsdp_param_info.param_requires_grad[idx] or fqn in output_states: + output_states[fqn][state_name] = orig_states[idx] + + _unflatten_orig_param_states( + fsdp_param_info, + output_states, + state_name, + shard_state, + to_save, + cpu_offload, + ) + + del gathered_tensor + return output_states + + +def _gather_all_orig_param_state( + fsdp_param_info: FSDPParamInfo, + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Any]: + """ + Given a optimizer state dict, ``input_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), gather all the + states and unflatten them to the original dimensions. Note that all the + params referred by the ``input_states`` must be managed by FSDP. + """ + fsdp_state = fsdp_param_info.state + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + return input_states if to_save else {} + + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ): + gathered_state_info = _allgather_state_info(fsdp_state, input_states) + output_states = _allgather_orig_param_states( + fsdp_param_info, + gathered_state_info, + input_states, + shard_state, + to_save, + cpu_offload, + ) + if to_save: + for key, idx in fsdp_param_info.param_indices.items(): + if key in output_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + + raise RuntimeError( + f"{key} is not in the output state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the output_states has the param keys " + f"{sorted(output_states.keys())}." + ) + return output_states + else: + return {} + + +def _convert_state_with_orig_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo + # usually corresponds to multiple parameters. We could not use FSDPParamInfo + # as the key because FSDPParamInfo is not hashable. As a result, we fall back + # to `id(FSDPParamInfo)`, which the type is an integer. + all_states: Dict[int, Dict[str, Any]] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + if param_key is None and not optim_state_key.is_fsdp_managed: + continue + + if optim_state_key.is_fsdp_managed: + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None) + if fsdp_param_info is None: + # This can happen if the not all FSDP instances have all the + # parameters. This can happen with FSDP + some MPMD style + # parallelism. + + # TODO: it is unclear if we need to do the same check with + # non-FSDP managed keys. + continue + state = {} if param_key is None else optim_state_dict[param_key] + if id(fsdp_param_info) not in all_states: + all_states[id(fsdp_param_info)] = {} + all_states[id(fsdp_param_info)][fqn] = state + + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + with SimpleProfiler.profile("none_fsdp_managed_copy"): + param_key = cast(Union[str, int], param_key) + fsdp_osd_state[unflat_param_name] = copy.copy( + optim_state_dict[param_key] + ) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + # Instead of gathering the state of each parameter individually, we perform + # the gathering all at once to speed up the process. + for _all_states in all_states.values(): + fqn = next(iter(_all_states.keys())) + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + assert len(fsdp_param_info.param_requires_grad) > 0, ( + "With use_orig_params, FSDPParamInfo should have requires_grad " + "information. However, the length is zero." + ) + for key, idx in fsdp_param_info.param_indices.items(): + if key in _all_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + raise RuntimeError( + f"{key} is not in the optimizer state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the optimizer has the param keys " + f"{sorted(_all_states.keys())}." + ) + fsdp_osd_state.update( + _gather_all_orig_param_state( + fsdp_param_info, + _all_states, + shard_state, + to_save, + cpu_offload, + ) + ) + + return fsdp_osd_state + + +def _convert_state_with_flat_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + assert param_key is not None, ( + "If use_orig_params is False, we must be able to find the " + f"corresponding param id. {optim_state_key} {param_key}" + ) + + if optim_state_key.is_fsdp_managed: + # If there are multiple unflat_param_names (not use_orig_params), + # they share the same FSDPParamInfo. So the first unflat_param_name + # is sufficient to fetch the FSDPParamInfo. + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + unflat_state = _unflatten_optim_state( + fsdp_param_info, + optim_state_dict[param_key], + to_save, + shard_state, + cpu_offload, + ) + if to_save: + assert len(unflat_state) == len(optim_state_key.unflat_param_names) + for unflat_param_name, unflat_param_state in zip( + optim_state_key.unflat_param_names, + unflat_state, + ): + fsdp_osd_state[unflat_param_name] = unflat_param_state + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key]) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + return fsdp_osd_state + + +@torch.no_grad() +def _optim_state_dict( + model: nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + rank0_only: bool, + shard_state: bool, + group: Optional[dist.ProcessGroup], + using_optim_input: bool, + use_orig_params: bool = False, + cpu_offload: bool = True, +) -> Dict[str, Any]: + """ + Consolidates the optimizer state and returns it as a :class:`dict` + following the convention of :meth:`torch.optim.Optimizer.state_dict`, + i.e. with keys ``"state"`` and ``"param_groups"``. + The flat parameters in ``FSDP`` modules contained in ``model`` are mapped + back to their unflattened parameters. + + Parameter keys are not well-defined. For a regular optimizer, the optimizer + state_dict contains a mapping from parameter IDs to parameter states. + Parameter IDs are the order of parameters in ``optim.param_groups()`` across + all the groups. This API also allows user to pass ``optim_input`` for the + mapping between parameters and parameter IDs. Using ``optim_input`` is being + deprecated. + + If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not + contain parameter IDs mapping but a mapping from parameter FQNs to parameter + states. This API finds the mapping from FQNs to parameters if the optimizer + is a ``NamedOptimizer``. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP knows how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- those are managed by other parallelisms and FSDP does not + know how to handle/aggregate them. + + Args: + model (nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + rank0_only (bool): If ``True``, saves the populated :class:`dict` + only on rank 0; if ``False``, saves it on all ranks. (Default: + ``True``) + shard_state (bool): If ``True``, shard and distribute all + non-zero-dimension states. + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model`` 's original unflattened parameters and including keys + "state" and "param_groups" following the convention of + :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``, + then nonzero ranks return an empty :class:`dict`. + """ + SimpleProfiler.reset() + cm = ExitStack() + cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL)) + _reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model)) + to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state + + with SimpleProfiler.profile("preprocessing"): + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + is_named_optimizer = _is_named_optimizer(optim_state_dict) + + param_key_to_param = cast( + Dict[Union[int, str], nn.Parameter], + ( + _get_param_id_to_param_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + + with SimpleProfiler.profile("preprocessing_with_comm"): + ( + all_optim_state_keys, + optim_state_key_to_param_key, + ) = _map_param_key_to_optim_keys( + optim_state_dict, + group, + param_key_to_param, + param_to_fqns, + fqn_to_fsdp_param_info, + merge_keys=use_orig_params, + ) + + with SimpleProfiler.profile("state_converting"): + convert_fn = ( + _convert_state_with_orig_params + if use_orig_params + else _convert_state_with_flat_params + ) + fsdp_osd_state = convert_fn( + all_optim_state_keys, + optim_state_key_to_param_key, + fqn_to_fsdp_param_info, + optim_state_dict["state"], + to_save, + shard_state, + cpu_offload, + ) + + # At this point, communication is complete and ranks can return early if nothing + # will be saved on that rank. + if not to_save: + return {} + + fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state} + + flat_param_fqns = set(flat_param_to_fqn.values()) + for key, value in optim_state_dict["state"].items(): + if key in fsdp_osd_state: + continue + if key in flat_param_fqns: + continue + if key in param_key_to_param: + continue + # This key is not recognized by FSDP. It may be a user-defined state + # or some parameters state that FSDP is unable to map from + # ``optim.param_groups``. + warnings.warn( + f"Found a optim state, {key}, that FSDP cannot process. FSDP " + "will directly copy everything to the returned state_dict. In " + "most cases, this is a user-defined state that is not " + "associated with any particular parameter. Another possible " + "case is this state is managed by TorchRec. Otherwise, there may " + " be a mismatched assumption of optim_state_dict of this mode." + ) + fsdp_osd_state[key] = value + + if "param_groups" in optim_state_dict: + fsdp_osd["param_groups"] = _unflatten_param_groups( + optim_state_dict, param_key_to_param, param_to_fqns + ) + + cm.close() + SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ") + + return fsdp_osd + + +def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]: + """ + Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo`` + if the param is managed by FSDP. Shared parameters, or original parameters that + are shared across multiple nn.Modules, are required to belong to one and only + one FSDP instance and thus correspond to one ``FlatParameter``. Within the one + ``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared + parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters. + """ + + def module_fn(module, prefix, tree_level, fqn_to_param_info): + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state is None: + return + _lazy_init(fsdp_state, module) + handle = _module_handle(fsdp_state, module) + if not handle: + return + flat_param = handle.flat_param + fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, []) + # NOTE: `idx` indexes into the data structures *without* padding + # elements + for idx, local_fqn in enumerate(flat_param._fqns): + fqn = clean_tensor_name(prefix + local_fqn) + if fqn in fqn_to_param_info: + assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn + fqn_to_param_info[fqn] = fsdp_param_info + fsdp_param_info.param_indices[fqn] = idx + if flat_param._params is not None: + fsdp_param_info.param_requires_grad.append( + flat_param._params[idx].requires_grad + ) + + def return_fn(fqn_to_param_info): + return fqn_to_param_info + + fqn_to_param_info: Dict[str, FSDPParamInfo] = {} + # FlatParameter._fqns stores the local fqn, starting from the root of the + # FSDP. Using _apply_to_modules() with model (may not be the FSDP root + # module) allows us to construct the global fqn. + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + fqn_to_param_info, + ) + + +@no_type_check +def _set_optim_use_dtensor( + fsdp_state: _FSDPState, + state_dict_settings: StateDictSettings, +) -> None: + # If device_mesh is passed in when initalizing FSDP, we automatically turn the + # _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type + # has to be set to SHARDED_STATE_DICT. + if getattr(fsdp_state, "_device_mesh", None): + state_dict_type = state_dict_settings.state_dict_type + if state_dict_type == StateDictType.LOCAL_STATE_DICT: + raise RuntimeError( + "Found state_dict_type LOCAL_STATE_DICT.", + "DeviceMesh is not compatible with LOCAL_STATE_DICT.", + "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.", + ) + else: + state_dict_settings.optim_state_dict_config._use_dtensor = True diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..16f521f65b8d95ccbb719ebdfa916ea1b6efbedc --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py @@ -0,0 +1,262 @@ +import collections +import functools +import inspect +import warnings +from functools import partial +from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union + +import torch.nn as nn +from torch.distributed.fsdp._common_utils import ( + _get_module_fsdp_state, + _override_module_mixed_precision, +) + +from torch.distributed.fsdp.wrap import ( + _construct_wrap_fn, + _or_policy, + _Policy, + _post_order_apply, + _recursive_wrap, + _run_mixed_precision_override_policy, + _wrap_module_cls_individually, +) + + +def _auto_wrap( + root_module: nn.Module, + policy: Union[Callable, _Policy], + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + root_kwargs: Dict[str, Any], + fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard` +): + """ + Auto wraps modules in ``root_module`` 's tree according to ``policy`` + following a post-order traversal. + + Precondition: ``root_kwargs`` should contain all arguments except + ``module``. This function accepts the kwargs dict directly since it gets + forwarded into the post-order traversal function. + """ + mixed_precision = root_kwargs["mixed_precision"] + is_wrapper = inspect.isclass(fsdp_fn) + # TODO: We may relax this no-nested-wrapping constraint to support manual + # wrapping followed by auto wrapping. + _check_nested_wrapping(root_module) + + if isinstance(policy, _Policy): + root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None + target_module_to_kwargs = policy._run_policy( + root_module, ignored_modules, root_kwargs + ) + if mixed_precision is not None: + target_module_to_kwargs = _run_mixed_precision_override_policy( + root_module, + mixed_precision._module_classes_to_ignore, + ignored_modules, + root_kwargs, + target_module_to_kwargs, + ) + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + _warn_on_overridden_mixed_precision(overridden_module_classes) + use_orig_params = root_kwargs.get("use_orig_params", False) + _validate_frozen_params( + root_module, + set(target_module_to_kwargs.keys()), + ignored_params, + use_orig_params, + ) + wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn) + _post_order_apply(root_module, wrap_fn) + return + + recursive_wrap_kwargs = { + "module": root_module, + "auto_wrap_policy": policy, + "wrapper_cls": fsdp_fn, + "ignored_modules": ignored_modules, + "ignored_params": ignored_params, + "only_wrap_children": True, + } + if mixed_precision is not None: + # Wrap modules of the ignored types separately and register forward + # hooks to cast to fp32 and back to the original dtype, respectively + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + policy = functools.partial( + _or_policy, + policies=[ + policy, + partial( + _wrap_module_cls_individually, + module_classes=mixed_precision._module_classes_to_ignore, + ), + ], + ) + recursive_wrap_kwargs["auto_wrap_policy"] = policy + _warn_on_overridden_mixed_precision(overridden_module_classes) + _recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type] + + +def _check_nested_wrapping(root_module: nn.Module): + for module_name, module in root_module.named_modules(): + if _get_module_fsdp_state(module) is not None: + raise ValueError( + "FSDP auto wrapping requires modules to not already have " + f"FSDP applied but found {module_name} in\n{root_module}" + ) + + +def _warn_on_overridden_mixed_precision( + overridden_module_classes: Set[Type[nn.Module]], +): + if len(overridden_module_classes) == 0: + return + warnings.warn( + "Both mixed precision and an auto_wrap_policy were specified to FSDP, " + f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n" + "These modules will be wrapped as separate FSDP instacnes with mixed " + "precision disabled." + ) + + +def _validate_frozen_params( + root_module: nn.Module, + modules_to_wrap: Set[nn.Module], + ignored_params: Set[nn.Parameter], + use_orig_params: bool, +): + """ + This checks that, given ``modules_to_wrap``, each module would manage + parameters that are uniformly frozen or non-frozen. This uniformity + requirement is strict for ``use_orig_params=False`` (hard error) and highly + recommended for ``use_orig_params=True`` (user warning). + """ + post_order_named_modules = _get_post_order_named_modules(root_module) + visited_modules: Set[nn.Module] = set() + for module_name, module in post_order_named_modules: + if module in modules_to_wrap: + param_to_fqn = _get_managed_param_to_fqn( + module, ignored_params, visited_modules, module_name + ) + frozen_param_fqns: List[str] = [] + frozen_param_numel = 0 + nonfrozen_param_fqns: List[str] = [] + nonfrozen_param_numel = 0 + for param, fqn in param_to_fqn.items(): + if param.requires_grad: + nonfrozen_param_fqns.append(fqn) + nonfrozen_param_numel += param.numel() + else: + frozen_param_fqns.append(fqn) + frozen_param_numel += param.numel() + if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0: + msg = f"{module_name} has both parameters with requires_grad=True and False." + if use_orig_params: + total_param_numel = frozen_param_numel + nonfrozen_param_numel + msg += ( + " We do not recommend wrapping such modules since " + "the gradient memory usage will be higher than expected " + f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel " + "before sharding via reduce-scatter). " + ) + else: + msg += " FSDP does not support wrapping such modules when use_orig_params=False. " + msg += "If possible, wrap the frozen parameters with FSDP separately.\n" + msg += ( + f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n" + f"The following parameters have requires_grad=False:\n{frozen_param_fqns}" + ) + if use_orig_params: + warnings.warn(msg) + else: + raise ValueError(msg) + + +def _get_post_order_named_modules( + root_module: nn.Module, +) -> List[Tuple[str, nn.Module]]: + """ + This returns the named modules following a post-order traversal, which is a + valid reverse topological sort. We achieve this using the reverse of a + stack-based DFS order instead of reversing ``root_module.named_modules()`` + since the former gives the modules in registration order at each level in + the module tree (as opposed to the reverse), which allows us to error/warn + on the first registered module that violates the condition. + + For example, consider the following module structure: + M( + S1(), + S2( + SS1(), + SS2(), + ), + S3(), + ) + The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse + ``named_modules()`` order is [S3, SS2, SS1, S2, S1, M]. + """ + visited_modules = {root_module} + stack = [("", root_module)] + # Append and reverse at the end for linear-time algorithm + reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = [] + while stack: + module_name, module = stack.pop() + reverse_post_order_named_modules.append((module_name, module)) + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + if module_name != "": + child_module_name = module_name + "." + child_module_name + stack.append((child_module_name, child_module)) + post_order_named_modules = list(reversed(reverse_post_order_named_modules)) + return post_order_named_modules + + +def _get_managed_param_to_fqn( + module_to_wrap: nn.Module, + ignored_params: Set[nn.Parameter], + visited_modules: Set[nn.Module], + root_prefix: str, +) -> Dict[nn.Parameter, str]: + """ + This returns a dict that maps managed parameter to its FQN for the given + ``module_to_wrap``. The dict's keys are exactly the parameters that would + be managed by the module, where this is achieved by calling this function + on the modules to wrap in reverse topological order, destructively updating + ``visited_modules``, and not traversing into those modules. The FQNs are + prefixed from the root (via ``root_prefix``) to be more informative. + + NOTE: This function is meant to be called pre-wrapping and iteratively in + reverse topological order to cover the full module tree. This differs from + the ``_get_param_to_fqn()`` function meant to be called post-wrapping and + on the full module tree in one shot. Given those differences, we do not try + to unify the two. + """ + param_to_fqn: Dict[nn.Parameter, str] = {} + # Run BFS (or any tree traversal works) + queue = collections.deque([(module_to_wrap, root_prefix)]) + visited_modules.add(module_to_wrap) + while queue: + module, prefix = queue.popleft() + for param_name, param in module.named_parameters(recurse=False): + if param not in ignored_params: + fqn = param_name if prefix == "" else prefix + "." + param_name + param_to_fqn[param] = fqn + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + child_prefix = ( + child_module_name + if prefix == "" + else prefix + "." + child_module_name + ) + queue.append((child_module, child_prefix)) + return param_to_fqn diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6b45df495b11681a076db1a9bab0928a0123cb0 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26461205e5ef299f7ac3e0a3f83dff38fcd637a9 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13ec582ed5c596c33ac91a253bcfc1d7c9900be2 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78d542918c8a1ac37f5344e7c97718e295ab85a Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad6737b4551dff47ace5714f3732821b5b035c38 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a132b998c0489818ab06df561e467dcaaf9d3a Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c44804d9e7d3f87f8c81729b05a1af27614428c3 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py new file mode 100644 index 0000000000000000000000000000000000000000..91c9e5f81830e953b2d7c6ebc58f05e4c7fe1ecf --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +""" +These are functions that should simply be applied to both mask and data. +Take select or stack as an example. This operation can be applied to +both the mask and data of a MaskedTensor and the result wrapped into +a new MaskedTensor as a result. +""" + +import torch + +from .core import _map_mt_args_kwargs, _wrap_result + +__all__ = [] # type: ignore[var-annotated] + + +PASSTHROUGH_FNS = [ + torch.ops.aten.select, + torch.ops.aten.transpose, + torch.ops.aten.split, + torch.ops.aten.t, + torch.ops.aten.slice, + torch.ops.aten.slice_backward, + torch.ops.aten.select_backward, + torch.ops.aten.index, + torch.ops.aten.expand, + torch.ops.aten.view, + torch.ops.aten._unsafe_view, + torch.ops.aten._reshape_alias, + torch.ops.aten.cat, + torch.ops.aten.unsqueeze, +] + + +def _is_pass_through_fn(fn): + return fn in PASSTHROUGH_FNS + + +def _apply_pass_through_fn(fn, *args, **kwargs): + data_args, data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data()) + result_data = fn(*data_args, **data_kwargs) + mask_args, mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask()) + result_mask = fn(*mask_args, **mask_kwargs) + return _wrap_result(result_data, result_mask) diff --git a/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..737f4b240beb91bca5b8b5fe46cc45dd4dce9c63 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import warnings + +import torch + +from .core import is_masked_tensor +from .creation import as_masked_tensor, masked_tensor + +__all__ = [] # type: ignore[var-annotated] + + +def _masked_all_all(data, mask=None): + if mask is None: + return data.all() + return data.masked_fill(~mask, True).all() + + +def _masked_all_dim(data, dim, keepdim=False, mask=None): + if mask is None: + return torch.all(data, dim=dim, keepdim=keepdim) + return torch.all(data.masked_fill(~mask, True), dim=dim, keepdim=keepdim) + + +def _masked_all(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 1: + return _masked_all_all(args[0], mask=kwargs["mask"]) + return _masked_all_dim(*args, **kwargs) + + +def _multidim_any(mask, dim, keepdim): + if isinstance(dim, int): + return _multidim_any(mask, [dim], keepdim) + for d in sorted(dim, reverse=True): + mask = torch.any(mask, dim=d, keepdim=keepdim) + return mask + + +def _get_masked_fn(fn): + if fn == "all": + return _masked_all + return getattr(torch.masked, fn) + + +def _torch_reduce_all(fn): + def reduce_all(self): + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask().values() if self.is_sparse else self.get_mask() + # When reduction is "all", then torch.argmin/torch.argmax needs to return the index of the + # element corresponding to the min/max, but this operation isn't supported correctly for sparse layouts. + # Therefore, this implementation calculates it using the strides. + if fn == "all": + result_data = masked_fn(data, mask=mask) + + elif fn in {"argmin", "argmax"} and self.is_sparse_coo(): + sparse_idx = masked_fn(data.values(), mask=mask).to(dtype=torch.int) + indices = ( + data.to_sparse_coo().indices() + if not self.is_sparse_coo() + else data.indices() + ) + idx = indices.unbind(1)[sparse_idx] + stride = data.size().numel() / torch.tensor( + data.size(), device=data.device + ).cumprod(0) + result_data = torch.sum(idx * stride) + + # we simply pass in the values for sparse COO/CSR tensors + elif self.is_sparse: + result_data = masked_fn(masked_tensor(data.values(), mask)) + + else: + result_data = masked_fn(self, mask=mask) + + return as_masked_tensor(result_data, torch.any(mask)) + + return reduce_all + + +def _torch_reduce_dim(fn): + def reduce_dim(self, dim, keepdim=False, dtype=None): + if self.is_sparse: + msg = ( + f"The sparse version of {fn} is not implemented in reductions.\n" + "If you would like this operator to be supported, please file an issue for a feature request at " + "https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n" + "In the case that the semantics for the operator are not trivial, it would be appreciated " + "to also include a proposal for the semantics." + ) + warnings.warn(msg) + return NotImplemented + if not is_masked_tensor(self): + raise TypeError("Input to reduce_dim must be a MaskedTensor") + + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask() + if fn == "all": + result_data = masked_fn(data, dim=dim, keepdim=keepdim, mask=mask) + else: + result_data = masked_fn( + self, dim=dim, keepdim=keepdim, dtype=dtype, mask=self.get_mask() + ) + return as_masked_tensor(result_data, _multidim_any(mask, dim, keepdim)) + + return reduce_dim + + +def _torch_reduce(fn): + def reduce_fn(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + return _torch_reduce_dim(fn)(*args, **kwargs) + + return reduce_fn + + +def _reduce_dim_args(input, dim, keepdim=False, dtype=None): + return input, dim, keepdim, dtype + + +def _torch_grad_reduce(fn): + def grad_reduce(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + # TODO: autograd.Function doesn't support kwarg + input, dim, keepdim, dtype = _reduce_dim_args(*args, **kwargs) + return _torch_reduce_dim(fn)(input, dim, keepdim, dtype) + + return grad_reduce + + +REDUCE_NAMES = [ + "sum", + "mean", + "amin", + "amax", + "argmin", + "argmax", + "prod", + "all", + "norm", + "var", + "std", +] + +NATIVE_REDUCE_MAP = { + getattr(torch.ops.aten, name): _torch_reduce(name) for name in REDUCE_NAMES +} +TORCH_REDUCE_MAP = { + getattr(torch, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} +TENSOR_REDUCE_MAP = { + getattr(torch.Tensor, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} + +NATIVE_REDUCE_FNS = list(NATIVE_REDUCE_MAP.keys()) +TORCH_REDUCE_FNS = list(TORCH_REDUCE_MAP.keys()) +TENSOR_REDUCE_FNS = list(TENSOR_REDUCE_MAP.keys()) + +def _is_reduction(fn): + return fn in NATIVE_REDUCE_MAP or fn in TORCH_REDUCE_MAP or fn in TENSOR_REDUCE_MAP + + +def _apply_reduction(fn, *args, **kwargs): + if fn in NATIVE_REDUCE_MAP: + return NATIVE_REDUCE_MAP[fn](*args, **kwargs) + if fn in TORCH_REDUCE_MAP: + return TORCH_REDUCE_MAP[fn](*args, **kwargs) + if fn in TENSOR_REDUCE_MAP: + return TENSOR_REDUCE_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b72b05d128250033871a6e62e0fe20b4b66c20 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13223fb0e9c5e07033d4970696ee382b492a7392 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a5a53a3e9f3fb188d1f335d870bf28421360822 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ee2d63650eb6d9551be1d3ec168ca166cc0f1c Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py new file mode 100644 index 0000000000000000000000000000000000000000..870bda554e74808d9423e1d79ebf2dfbdee93f91 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py @@ -0,0 +1,94 @@ +import sympy +from sympy.multipledispatch import dispatch + +__all__ = ["SingletonInt"] + + +class SingletonInt(sympy.AtomicExpr): + # This is probably not super important unless we are in multiple dispatch + # situations with other more exotic Expr types. + _op_priority = 99999 + + def __new__(cls, *args, coeff=None, **kwargs): + instance = super().__new__(cls, *args, **kwargs) + return instance + + # The semantics of this class should match that of NestedIntSymNodeImpl in + # c10/core/NestedIntSymNodeImpl.h + def __init__(self, val, *, coeff=1): + self._val = val + self._coeff = coeff + super().__init__() + + # See NOTE [ Inequalities with nested int ] + def _eval_Eq(self, other): + if ( + isinstance(other, SingletonInt) + and other._val == self._val + and self._coeff == other._coeff + ): + return sympy.true + else: + return sympy.false + + # This is necessary so that calling expr.free_symbols on exprs that contain + # this Singleton does not error + @property + def free_symbols(self): + return set() + + def __mul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + def __rmul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + # Make sure we promptly raise an error instead of falling back to building + # an expression tree. There are probably more ops, how can we be exhaustive? + def __add__(self, other): + raise NotImplementedError("NYI") + + def __sub__(self, other): + raise NotImplementedError("NYI") + + def __truediv__(self, other): + raise NotImplementedError("NYI") + + def __floordiv__(self, other): + raise NotImplementedError("NYI") + + def __mod__(self, other): + raise NotImplementedError("NYI") + + +# See NOTE [ Inequalities with nested int ] +@dispatch(sympy.Integer, SingletonInt) +def _eval_is_ge(a, b): + if a < 2: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if b <= 2: + return sympy.true + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if a._val == b._val: + if a._coeff >= b._coeff: + return sympy.true + else: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") diff --git a/moondream/lib/python3.10/site-packages/torch/utils/_sympy/solve.py b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/solve.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1113bea891df2be89e7bb324a558a73afc0425 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/solve.py @@ -0,0 +1,175 @@ +import logging + +from typing import Dict, Optional, Tuple, Type + +import sympy + +from torch.utils._sympy.functions import FloorDiv + +log = logging.getLogger(__name__) + +_MIRROR_REL_OP: Dict[Type[sympy.Basic], Type[sympy.Rel]] = { + sympy.Eq: sympy.Eq, + sympy.Ne: sympy.Ne, + sympy.Ge: sympy.Le, + sympy.Gt: sympy.Lt, + sympy.Le: sympy.Ge, + sympy.Lt: sympy.Gt, +} + +INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le) + + +def mirror_rel_op(type: Type) -> Optional[Type[sympy.Rel]]: + return _MIRROR_REL_OP.get(type, None) + + +# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side. +# +# Returns a tuple of: +# 1. The simplified expression +# 2. The expression on the right-hand side +# +# Returns 'None' if it can't reach a state where the only thing in the left +# hand side is 'thing'. +# +# 'trials': number of times 'try_solve' will try to isolate 'thing' to the +# left-hand side. +# +# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into +# inequalities. +def try_solve( + expr: sympy.Basic, + thing: sympy.Basic, + trials: int = 5, + floordiv_inequality: bool = True, +) -> Optional[Tuple[sympy.Rel, sympy.Basic]]: + mirror = mirror_rel_op(type(expr)) + + # Ignore unsupported expressions: + # - Those that are not relational operations + # - Those that don't have a mirror (just avoiding unexpected classes) + if not isinstance(expr, sympy.Rel) or mirror is None: + log.debug("expression with unsupported type: %s", type(expr)) + return None + + lhs_has_thing = expr.lhs.has(thing) + rhs_has_thing = expr.rhs.has(thing) + + # Give up when 'thing' appears on both sides of the relational expression. + # That is because, as is, we assume the thing we are trying to isolate is + # only on the right-hand side. + if lhs_has_thing and rhs_has_thing: + log.debug("thing (%s) found in both sides of expression: %s", thing, expr) + return None + + # Try considering both LHS and RHS by mirroring the original expression: + # a < b ==> b > a + expressions = [] + + # Add each version of 'expr' if 'thing' is in its left-hand side. + if lhs_has_thing: + expressions.append(expr) + if rhs_has_thing: + expressions.append(mirror(expr.rhs, expr.lhs)) + + for e in expressions: + if e is None: + continue + + assert isinstance(e, sympy.Rel) + + for _ in range(trials): + trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality) + # Stop if there was no change in this trial. + if trial == e: + break + e = trial # type: ignore[assignment] + + # Return if we were able to isolate 'thing' on the left-hand side. + if isinstance(e, sympy.Rel) and e.lhs == thing: + return e, e.rhs + + return None + + +def _try_isolate_lhs( + expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool +) -> sympy.Basic: + e = expr + op = type(expr) + + if isinstance(e, sympy.Rel): + # Move any constants in the left-hand side to the right-hand side. + lhs_not_thing = ( + sum([a for a in e.lhs.args if not a.has(thing)]) + if isinstance(e.lhs, sympy.Add) + else 0 + ) + e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined] + + # Divide both sides by the factors that don't contain thing. + if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul): + lhs, rhs = e.args + other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)]) + + # If we can't tell whether 'other' is negative or positive, we do nothing. + # That is because we don't know whether we have mirror the operation or not. + if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None): + # Divide both sides by 'other'. + lhs = lhs / other + rhs = rhs / other + + # If 'e' is an inequality and 'other' is negative, we have to + # mirror the expression. + if isinstance(e, INEQUALITY_TYPES) and other.is_negative: + op = mirror_rel_op(op) # type: ignore[assignment] + + assert op is not None + e = op(lhs, rhs) + + ################################################################################ + # left-hand side is FloorDiv + ################################################################################ + # + # Given the expression: a // b op c + # where 'op' is a relational operation, these rules only work if: + # - b > 0 + # - c is an integer + if ( + floordiv_inequality + and isinstance(e, sympy.Rel) + and isinstance(e.lhs, FloorDiv) + and e.lhs.divisor.is_positive + and e.rhs.is_integer + ): + # a // b == expr + # => a >= (b * expr) and a < (b * (expr + 1)) + if isinstance(expr, sympy.Eq): + numerator, denominator = e.lhs.args + return sympy.And( + sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # a // b != expr + # => a < (b * expr) or a >= (b * (expr + 1)) + if isinstance(expr, sympy.Ne): + numerator, denominator = e.lhs.args + return sympy.Or( + sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # The transformations below only work if b is positive. + # Note: we only have this information for constants. + # a // b > expr => a >= b * (expr + 1) + # a // b >= expr => a >= b * expr + if isinstance(expr, (sympy.Gt, sympy.Ge)): + quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + # a // b < expr => a < b * expr + # a // b <= expr => a < b * (expr + 1) + if isinstance(expr, (sympy.Lt, sympy.Le)): + quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + + return e diff --git a/moondream/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f4ca9a6b77d7f5f9178fa3e05f842ee324edf2 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py @@ -0,0 +1,782 @@ +from __future__ import annotations + +import dataclasses +import itertools +import sympy +from sympy.logic.boolalg import BooleanAtom, Boolean as SympyBoolean +import operator +import math +import logging +import torch +from typing import Dict, Optional, SupportsFloat, TypeVar, Generic, Union, overload, Callable, TYPE_CHECKING +from typing_extensions import TypeGuard + +from torch._prims_common import dtype_to_type +from .interp import sympy_interp +from .functions import Round, RoundDecimal + +log = logging.getLogger(__name__) + +__all__ = ["ValueRanges", "ValueRangeAnalysis", "bound_sympy"] + +_T = TypeVar('_T', sympy.Expr, SympyBoolean) + +class ValueRangeError(RuntimeError): + pass + + +# Like sympify, but supports less stuff, and also ensures that direct +# sympy expressions don't have free variables +def simple_sympify(e): + if isinstance(e, bool): + return sympy.true if e else sympy.false + elif isinstance(e, int): + return sympy.Integer(e) + elif isinstance(e, float): + # infinity is special; we use it to bracket integers as well + if math.isinf(e): + return sympy.oo if e > 0 else -sympy.oo + return sympy.Float(e) + elif isinstance(e, sympy.Expr): + assert e.is_number, e + # NaNs can occur when doing things like 0 * sympy.oo, but it is better + # if the operator notices this and takes care of it, because sometimes + # the NaN is inappropriate (for example, for ints, the [-oo, oo] range + # should go to zero when multiplied with [0, 0]) + assert e != sympy.nan + return e + elif isinstance(e, BooleanAtom): + return e + else: + raise AssertionError(f"not simple sympy type {type(e)}: {e}") + + +# Sympy atomics only. Unlike <=, it also works on Sympy bools. +def sympy_generic_le(lower, upper): + if isinstance(lower, sympy.Expr): + assert isinstance(upper, sympy.Expr) + return lower <= upper + else: + # only negative condition is True > False + assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean) + return not (lower and not upper) + + +def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]: + return vr.is_bool + + +def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]: + return not vr.is_bool + + +ExprIn = Union[int, float, sympy.Expr] +BoolIn = Union[bool, SympyBoolean] +AllIn = Union[ExprIn, BoolIn] +ExprFn = Callable[[sympy.Expr], sympy.Expr] +ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr] +BoolFn = Callable[[SympyBoolean], SympyBoolean] +BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean] +AllFn = Union[ExprFn, BoolFn] +AllFn2 = Union[ExprFn2, BoolFn2] + + +@dataclasses.dataclass(frozen=True) +class ValueRanges(Generic[_T]): + if TYPE_CHECKING: + # ruff doesn't understand circular references but mypy does + ExprVR = ValueRanges[sympy.Expr] # noqa: F821 + BoolVR = ValueRanges[SympyBoolean] # noqa: F821 + AllVR = Union[ExprVR, BoolVR] + + # Although the type signature here suggests you can pass any + # sympy expression, in practice the analysis here only works + # with constant sympy expressions + lower: _T + upper: _T + is_bool: bool + + @overload + def __init__(self: ValueRanges[sympy.Expr], lower: ExprIn, upper: ExprIn) -> None: + ... + + @overload + def __init__(self: ValueRanges[SympyBoolean], lower: BoolIn, upper: BoolIn) -> None: + ... + + def __init__(self, lower: AllIn, upper: AllIn) -> None: + lower = simple_sympify(lower) + upper = simple_sympify(upper) + # TODO: when the bounds have free variables, this may be + # nontrivial to actually verify + if not sympy_generic_le(lower, upper): + raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]") + # Because this is a frozen class + object.__setattr__(self, "lower", lower) + object.__setattr__(self, "upper", upper) + object.__setattr__(self, "is_bool", isinstance(lower, SympyBoolean)) + assert isinstance(upper, SympyBoolean) == self.is_bool + + def boolify(self) -> ValueRanges[SympyBoolean]: + if vr_is_bool(self): + return self + elif self == ValueRanges.unknown(): + return ValueRanges.unknown_bool() + else: + raise AssertionError(f"not bool like {self}") + + def __contains__(self, x: AllIn) -> bool: + x = simple_sympify(x) + return sympy_generic_le(self.lower, x) and sympy_generic_le(x, self.upper) + + def issubset(self, other): + return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(self.upper, other.upper) + + def tighten(self, other) -> ValueRanges: + """Given two ValueRanges, returns their intersection""" + return self & other + + # Intersection + @overload + def __and__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __and__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __and__(self: AllVR, other: AllVR) -> AllVR: + if other == ValueRanges.unknown(): + return self + if self == ValueRanges.unknown(): + return other + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper)) + else: + return ValueRanges(sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper)) + + # Union + @overload + def __or__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __or__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __or__(self: AllVR, other: AllVR) -> AllVR: + if ValueRanges.unknown() in (self, other): + return ValueRanges.unknown() + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper)) + else: + return ValueRanges(sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper)) + + def is_singleton(self) -> bool: + return self.lower == self.upper + + # TODO: this doesn't work with bools but arguably it should + @staticmethod + def unknown() -> ValueRanges[sympy.Expr]: + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def unknown_bool() -> ValueRanges[SympyBoolean]: + return ValueRanges(sympy.false, sympy.true) + + @overload + @staticmethod + # work around the fact that bool and int overlap + def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap] + ... + + @overload + @staticmethod + def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR: + ... + + @staticmethod + def wrap(arg: Union[AllIn, AllVR]) -> AllVR: + if isinstance(arg, ValueRanges): + return arg + # arg is either ExprIn or BoolIn, but we don't know it here + return ValueRanges(arg, arg) # type: ignore[arg-type] + + @staticmethod + def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Increasing: x <= y => f(x) <= f(y).""" + x = ValueRanges.wrap(x) + return ValueRanges(fn(x.lower), fn(x.upper)) + + @overload + @staticmethod + def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + ... + + @overload + @staticmethod + def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR: + ... + + @staticmethod + def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR: + """Decreasing: x <= y => f(x) >= f(y).""" + x = ValueRanges.wrap(x) + # consistently either Expr or Bool, but we don't know it here + return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type] + + @staticmethod + def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """It's increasing or decreasing.""" + x = ValueRanges.wrap(x) + l = fn(x.lower) + u = fn(x.upper) + return ValueRanges(min(l, u), max(l, u)) + + @staticmethod + def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Fn is convex and has a minimum at 0.""" + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges(0, max(fn(x.lower), fn(x.upper))) + else: + return ValueRanges.monotone_map(x, fn) + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[ExprIn, ExprVR], y: Union[ExprIn, ExprVR], fn: ExprFn2) -> ExprVR: + ... + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[BoolIn, BoolVR], y: Union[BoolIn, BoolVR], fn: BoolFn2) -> BoolVR: + ... + + @staticmethod + def coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR: + """ + It's increasing on each coordinate. + + Mathematically: + For every 1 <= i <= n and x_i <= y_i we have that + f(x1, .., xn) <= f(x1, , yi, ..., xn) + """ + x, y = ValueRanges.wrap(x), ValueRanges.wrap(y) + return ValueRanges( + fn(x.lower, y.lower), # type: ignore[arg-type] + fn(x.upper, y.upper), # type: ignore[arg-type] + ) + + @classmethod + def coordinatewise_monotone_map(cls, x, y, fn): + """It's increasing or decreasing on each coordinate.""" + x, y = cls.wrap(x), cls.wrap(y) + products = [ + fn(a, b) + for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper]) + ] + return ValueRanges(min(products), max(products)) + +class SymPyValueRangeAnalysis: + """ + It gives bounds on a SymPy operator given bounds on its arguments + See the function `bound_sympy` for a function that applies this logic to a full SymPy expression + """ + + @staticmethod + def constant(value, dtype): + # NB: value is NOT a sympy expression, it's a constant! + is_python = isinstance(value, (int, float, bool)) + assert is_python or isinstance(value, (BooleanAtom, sympy.Integer, sympy.Number)) + + # using nan makes subsequent computation throw, and for the purposes of optimization + # returning -math.inf - math.inf is equivalent to giving up + if isinstance(value, SupportsFloat) and math.isnan(value): + return ValueRanges.unknown() + + if is_python: + type_ = dtype_to_type(dtype) + value = type_(value) + else: + # We do a type check on a best-effort basis + # We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision + if dtype == torch.bool: + assert isinstance(value, BooleanAtom) + elif dtype.is_floating_point: + assert not value.is_finite or value.is_real + else: + # dtype is intXX + assert value.is_integer + + return ValueRanges.wrap(value) + + @staticmethod + def not_(a): + a = ValueRanges.wrap(a) + a = a.boolify() + assert a.is_bool + return ValueRanges.decreasing_map(a, sympy.Not) + + @staticmethod + def or_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or) + + @staticmethod + def and_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And) + + @staticmethod + def eq(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if a.is_singleton() and b.is_singleton() and a.lower == b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def ne(cls, a, b): + return cls.not_(cls.eq(a, b)) + + @classmethod + def lt(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(cls.not_(a), b) + else: + if a.upper < b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower >= b.upper: + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def gt(cls, a, b): + return cls.lt(b, a) + + @classmethod + def le(cls, a, b): + return cls.not_(cls.gt(a, b)) + + @classmethod + def ge(cls, a, b): + return cls.not_(cls.lt(a, b)) + + @staticmethod + def add(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, operator.add) + + @classmethod + def mul(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(a, b) + + def safe_mul(a, b): + # Make unknown() * wrap(0) == wrap(0) + if a == 0: + return a + elif b == 0: + return b + else: + return a * b + + return ValueRanges.coordinatewise_monotone_map(a, b, safe_mul) + + @classmethod + def div(cls, a, b): + return cls.truediv(a, b) + + @staticmethod + def truediv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.truediv) + + @staticmethod + def floordiv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.floordiv) + + @staticmethod + def mod(x, y): + x = ValueRanges.wrap(x) + y = ValueRanges.wrap(y) + if x.is_singleton() and y.is_singleton() and y.lower != 0: + return ValueRanges.wrap(x.lower % y.lower) + if y.lower <= 0: + return ValueRanges.unknown() + return ValueRanges(0, y.upper) + + @classmethod + def modular_indexing(cls, a, b, c): + return cls.mod(cls.floordiv(a, b), c) + + @classmethod + def is_non_overlapping_and_dense_indicator(cls, *args): + return ValueRanges.unknown() + + @classmethod + def pow(cls, a, b): + def is_integer(val): + return isinstance(val, int) or ( + hasattr(val, "is_integer") and val.is_integer + ) + + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + # Not implemented yet. It's a bit tricky + # If you want to implement it, compute the partial derivatives of a ** b + # and check the ranges where the function is increasing / decreasing + # Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a)) + # If this second option is implemented, by carefult about the types and possible infinities here and there. + if not b.is_singleton(): + return ValueRanges.unknown() + + b = b.lower + if a.is_singleton(): + a = a.lower + r = a ** b + if not r.is_finite: + return ValueRanges.unknown() + return ValueRanges.wrap(r) + + if b == 0: + if not a.lower.is_finite: + return ValueRanges.unknown() + type_ = sympy.Float if a.lower.is_real else sympy.Integer + return ValueRanges.wrap(type_(1)) + + if b < 0: + a = cls.reciprocal(a) + b = -b + + if a == ValueRanges.unknown(): + return ValueRanges.unknown() + + # Here b > 0 + if not is_integer(b): + # If the base is positive, then we're good, otherwise nothing's defined + if a.lower >= 0: + return ValueRanges.increasing_map(a, lambda x: x ** b) + else: + return ValueRanges.unknown() + else: + # b > 0 integer + if b % 2 == 0: + # x^n where n is even + return ValueRanges.convex_min_zero_map(a, lambda x: x ** b) + else: + # x^n where n is odd + return ValueRanges.increasing_map(a, lambda x: x ** b) + + @staticmethod + def reciprocal(x): + """ Needed as it's used in pow, but it won't appear on a SymPy expression """ + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges.unknown() + else: + return ValueRanges.decreasing_map(x, lambda y: 1 / y) + + @staticmethod + def abs(x): + return ValueRanges.convex_min_zero_map(x, abs) + + @staticmethod + def exp(x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.exponential.exp) + + @staticmethod + def log(x): + x = ValueRanges.wrap(x) + if x.lower <= 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.log) + + @classmethod + def minimum(cls, a, b): + return cls.min_or_max(a, b, sympy.Min) + + @classmethod + def maximum(cls, a, b): + return cls.min_or_max(a, b, sympy.Max) + + @staticmethod + def min_or_max(a, b, fn): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + # Performs upcasting first + def fn_(x: sympy.Expr, y: sympy.Expr) -> sympy.Expr: + # Poorman's version of upcasting in Sympy + # Inf is not a float... + if x.is_Integer and y.is_Integer: + result_type = sympy.Integer + elif x.is_rational and y.is_rational: + result_type = sympy.Rational + else: + assert x.is_real or not x.is_finite or y.is_real or not y.is_finite + result_type = sympy.Float + return fn(result_type(x), result_type(y)) + + return ValueRanges.coordinatewise_increasing_map(a, b, fn_) + + @classmethod + def floor(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor) + + @classmethod + def ceil(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.ceiling) + + @classmethod + def round(cls, number, ndigits=None): + if ndigits is None: + fn = Round + else: + assert ndigits.is_singleton() + ndigits = ndigits.lower + # We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind + # the second parameter. + fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731 + + return ValueRanges.increasing_map(number, fn) + + # It's used in some models on symints + @staticmethod + def sqrt(x): + x = ValueRanges.wrap(x) + if x.lower < 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.sqrt) + + @staticmethod + def where(a, b, c): + b = ValueRanges.wrap(b) + c = ValueRanges.wrap(c) + a = a.boolify() + assert b.is_bool == c.is_bool + if b.is_bool: + return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper)) + else: + return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper)) + + # expr_cond_pair is used to represent a single (expr, condition) pair in piecewise. + # We just return the value range of the expression and its corresponding condition as a tuple + # and defer the analysis to piecewise + @staticmethod + def expr_cond_pair(a, b): + b = b.boolify() + return (a, b) + + # piecewise function can be used to convert a SymBool to SymInt: + # int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise. + # + # ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair. + # The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True. + @staticmethod + def piecewise(*ranges): + init_range = None + for expr_range, cond_range in ranges: + if sympy.true in cond_range: + if init_range is None: + init_range = expr_range + else: + init_range = init_range | expr_range + return init_range + + @staticmethod + def cos(x): + # TODO: We should tighten value ranges + # If input range span is pi + 2*pi*k, then output range is (-1, 1) + # otherwise the minimum of the value of the function on the extremes + return ValueRanges(-1.0, 1.0) + + @staticmethod + def cosh(x): + x = ValueRanges.wrap(x) + if x.lower > 0: + return ValueRanges.increasing_map(x, sympy.cosh) + elif x.upper < 0: + return ValueRanges.decreasing_map(x, sympy.cosh) + return ValueRanges(0.0, sympy.oo) + + @staticmethod + def sin(x): + # TODO: We should tighten value ranges + # See details on cos + return ValueRanges(-1.0, 1.0) + + @staticmethod + def sinh(x): + return ValueRanges.increasing_map(x, sympy.sinh) + + @staticmethod + def tan(x): + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def tanh(x): + return ValueRanges.increasing_map(x, sympy.tanh) + + @staticmethod + def asin(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.increasing_map(x, sympy.asin) + return ValueRanges.unknown() + + @staticmethod + def acos(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.decreasing_map(x, sympy.acos) + return ValueRanges.unknown() + + @staticmethod + def atan(x): + return ValueRanges.increasing_map(x, sympy.atan) + + +class ValueRangeAnalysis(SymPyValueRangeAnalysis): + def __init__(self): + self.name = "ValueRangeAnalysis" + boolean_operators = ( + "xor", + "logical_and", + "logical_or", + "logical_not", + ) + for op in boolean_operators: + setattr(self, op, self.bool_handler) + + @staticmethod + def bool_handler(*args, **kwargs): + # just assuming bools can have both values + return ValueRanges(sympy.false, sympy.true) # type: ignore[arg-type] + + @staticmethod + def default_handler(*args, **kwargs): + # many ops are unlikely to show up in optimizable indexing compute, + # so we dont have full coverage + return ValueRanges.unknown() + + def load(self, name: str, index: sympy.Expr): + return ValueRanges.unknown() + + def store(self, name, index, value, mode=None): + return + + def reduction(self, name, dtype, src_dtype, reduction_type, index, value): + return ValueRanges.unknown() + + def index_expr(self, index, dtype): + assert isinstance(index, ValueRanges) + return index + + @staticmethod + def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None): + x = ValueRanges.wrap(x) + + if dtype == torch.bool: + if x.is_singleton(): + return ValueRanges.wrap(x.lower != 0) + elif 0 not in x: + return ValueRanges.wrap(sympy.true) + else: + return ValueRanges(sympy.false, sympy.true) + + def cast(x, dtype): + # dtype is int or float + if dtype.is_floating_point: + return sympy.Float(x) + else: + try: + return sympy.Integer(x) + except TypeError: + # inf cannot be cast to Integer + return x + + if x.is_bool: + if x.is_singleton(): + val = 1 if x.lower else 0 + return ValueRanges.wrap(cast(val, dtype)) + else: + return ValueRanges(cast(0, dtype), cast(1, dtype)) + else: + # int to float or float to int + return ValueRanges(cast(x.lower, dtype), cast(x.upper, dtype)) + + @staticmethod + def square(x): + return ValueRanges.convex_min_zero_map(x, lambda y: y * y) + + @staticmethod + def neg(x): + return ValueRanges.decreasing_map(x, operator.neg) + + @classmethod + def truncdiv(cls, a, b): + x = cls.truediv(a, b) + if x == ValueRanges.unknown(): + return x + + def trunc(x): + return sympy.Integer(x) if x.is_finite else x + + return ValueRanges.increasing_map(x, trunc) + + @classmethod + def sub(cls, a, b): + return cls.add(a, cls.neg(b)) + + def __getattr__(self, name): + log.debug("unhandled ValueRange op %s", name) + return self.default_handler + + +def bound_sympy(expr: sympy.Expr, ranges: Optional[Dict[sympy.Symbol, ValueRanges]] = None) -> ValueRanges: + if isinstance(expr, sympy.Number): + return ValueRanges.wrap(expr) + + ranges = ranges or {} + + # If there's a tracing context, augment available constrained ranges. + context = torch._guards.TracingContext.try_get() + if context and context.fake_mode.shape_env: + ranges = {**context.fake_mode.shape_env.var_to_range, **ranges} + + unbounded_vars = expr.free_symbols - ranges.keys() + if unbounded_vars: + # Give some bounds to the free variables via their SymPy assumptions + # TODO A better way of doing this would be to assign them a range upon creation, as + # size variables can come with a lower bound of 2, as we specialise on 0 and 1 + unbounded_ranges: Dict[sympy.Symbol, ValueRanges] = {} + for s in unbounded_vars: + assert s.is_integer # type: ignore[attr-defined] + if s.is_positive: # type: ignore[attr-defined] + lower = 1 + elif s.is_nonnegative: # type: ignore[attr-defined] + lower = 0 + else: + lower = -math.inf # type: ignore[assignment] + unbounded_ranges[s] = ValueRanges(lower, math.inf) # type: ignore[index] + ranges = {**ranges, **unbounded_ranges} + + return sympy_interp(SymPyValueRangeAnalysis, ranges, expr) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/__init__.py b/moondream/lib/python3.10/site-packages/torch/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c5a6fb866dc770cbc7bce8b77e819b02865f11 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/__init__.py @@ -0,0 +1,76 @@ +# TODO(VitalyFedyunin): Rearranging this imports leads to crash, +# need to cleanup dependencies and fix it +from torch.utils.data.sampler import ( + BatchSampler, + RandomSampler, + Sampler, + SequentialSampler, + SubsetRandomSampler, + WeightedRandomSampler, +) +from torch.utils.data.dataset import ( + ChainDataset, + ConcatDataset, + Dataset, + IterableDataset, + StackDataset, + Subset, + TensorDataset, + random_split, +) +from torch.utils.data.datapipes.datapipe import ( + DFIterDataPipe, + DataChunk, + IterDataPipe, + MapDataPipe, +) +from torch.utils.data.dataloader import ( + DataLoader, + _DatasetKind, + get_worker_info, + default_collate, + default_convert, +) +from torch.utils.data.distributed import DistributedSampler +from torch.utils.data.datapipes._decorator import ( + argument_validation, + functional_datapipe, + guaranteed_datapipes_determinism, + non_deterministic, + runtime_validation, + runtime_validation_disabled, +) + +__all__ = ['BatchSampler', + 'ChainDataset', + 'ConcatDataset', + 'DFIterDataPipe', + 'DataChunk', + 'DataLoader', + 'Dataset', + 'DistributedSampler', + 'IterDataPipe', + 'IterableDataset', + 'MapDataPipe', + 'RandomSampler', + 'Sampler', + 'SequentialSampler', + 'StackDataset', + 'Subset', + 'SubsetRandomSampler', + 'TensorDataset', + 'WeightedRandomSampler', + '_DatasetKind', + 'argument_validation', + 'default_collate', + 'default_convert', + 'functional_datapipe', + 'get_worker_info', + 'guaranteed_datapipes_determinism', + 'non_deterministic', + 'random_split', + 'runtime_validation', + 'runtime_validation_disabled'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a286b5c5fd60d3ddf04fcee8e13cb4e46a22ed74 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ef9f052fea1ce0769e602db912b1425b52e5191 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..788d842a42e89b64030eaa3fe13c1073e8bbeed8 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..391d1ace4c9ec2a53e74197706e910e20fa7c14c Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fdd4447a0b24be9c641f08a71aed8e04060f6e2 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63e0ff0dbe2bd9c36fe9172fe9daf03611a089ec Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28bd681b88b5ac8423109208afbbdb04c894c636 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..da8f3780bed253e39a055345febfaad82035b0ed --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py @@ -0,0 +1,72 @@ +r"""Signal handling for multiprocessing data loading. + +NOTE [ Signal handling in multiprocessing data loading ] + +In cases like DataLoader, if a worker process dies due to bus error/segfault +or just hang, the main process will hang waiting for data. This is difficult +to avoid on PyTorch side as it can be caused by limited shm, or other +libraries users call in the workers. In this file and `DataLoader.cpp`, we make +our best effort to provide some error message to users when such unfortunate +events happen. + +When a _BaseDataLoaderIter starts worker processes, their pids are registered in a +defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ] +via `_set_worker_pids`. + +When an error happens in a worker process, the main process received a SIGCHLD, +and Python will eventually call the handler registered below +(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails` +call checks all registered worker pids and raise proper error message to +prevent main process from hanging waiting for data from worker. + +Additionally, at the beginning of each worker's `_utils.worker._worker_loop`, +`_set_worker_signal_handlers` is called to register critical signal handlers +(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error +message to stderr before triggering the default handler. So a message will also +be printed from the worker process when it is killed by such signals. + +See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of +this signal handling design and other mechanism we implement to make our +multiprocessing data loading robust to errors. +""" + +import signal +import threading +from . import IS_WINDOWS + +# Some of the following imported functions are not used in this file, but are to +# be used `_utils.signal_handling.XXXXX`. +from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401 +from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401 + +_SIGCHLD_handler_set = False +r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one +handler needs to be set for all DataLoaders in a process.""" + + +def _set_SIGCHLD_handler(): + # Windows doesn't support SIGCHLD handler + if IS_WINDOWS: + return + # can't set signal in child threads + if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined] + return + global _SIGCHLD_handler_set + if _SIGCHLD_handler_set: + return + previous_handler = signal.getsignal(signal.SIGCHLD) + if not callable(previous_handler): + # This doesn't catch default handler, but SIGCHLD default handler is a + # no-op. + previous_handler = None + + def handler(signum, frame): + # This following call uses `waitid` with WNOHANG from C side. Therefore, + # Python can still get and update the process status successfully. + _error_if_any_worker_fails() + if previous_handler is not None: + assert callable(previous_handler) + previous_handler(signum, frame) + + signal.signal(signal.SIGCHLD, handler) + _SIGCHLD_handler_set = True diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..137791c4c43627852048dc38d3ccf915964bc202 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py @@ -0,0 +1,329 @@ +r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers. + +These **needs** to be in global scope since Py2 doesn't support serializing +static methods. +""" + +import torch +import random +import os +import queue +from dataclasses import dataclass +from torch._utils import ExceptionWrapper +from typing import Optional, Union, TYPE_CHECKING +from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY +if TYPE_CHECKING: + from torch.utils.data import Dataset + +if IS_WINDOWS: + import ctypes + from ctypes.wintypes import DWORD, BOOL, HANDLE + + # On Windows, the parent ID of the worker process remains unchanged when the manager process + # is gone, and the only way to check it through OS is to let the worker have a process handle + # of the manager and ask if the process status has changed. + class ManagerWatchdog: + def __init__(self): + self.manager_pid = os.getppid() + + # mypy cannot detect this code is windows only + self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) # type: ignore[attr-defined] + self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD) + self.kernel32.OpenProcess.restype = HANDLE + self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD) + self.kernel32.WaitForSingleObject.restype = DWORD + + # Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx + SYNCHRONIZE = 0x00100000 + self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid) + + if not self.manager_handle: + raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined] + + self.manager_dead = False + + def is_alive(self): + if not self.manager_dead: + # Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx + self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0 + return not self.manager_dead +else: + class ManagerWatchdog: # type: ignore[no-redef] + def __init__(self): + self.manager_pid = os.getppid() + self.manager_dead = False + + def is_alive(self): + if not self.manager_dead: + self.manager_dead = os.getppid() != self.manager_pid + return not self.manager_dead + +_worker_info: Optional["WorkerInfo"] = None + + +class WorkerInfo: + id: int + num_workers: int + seed: int + dataset: 'Dataset' + __initialized = False + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + self.__keys = tuple(kwargs.keys()) + self.__initialized = True + + def __setattr__(self, key, val): + if self.__initialized: + raise RuntimeError(f"Cannot assign attributes to {self.__class__.__name__} objects") + return super().__setattr__(key, val) + + def __repr__(self): + items = [] + for k in self.__keys: + items.append(f'{k}={getattr(self, k)}') + return f"{self.__class__.__name__}({', '.join(items)})" + + +def get_worker_info() -> Optional[WorkerInfo]: + r"""Returns the information about the current + :class:`~torch.utils.data.DataLoader` iterator worker process. + + When called in a worker, this returns an object guaranteed to have the + following attributes: + + * :attr:`id`: the current worker id. + * :attr:`num_workers`: the total number of workers. + * :attr:`seed`: the random seed set for the current worker. This value is + determined by main process RNG and the worker id. See + :class:`~torch.utils.data.DataLoader`'s documentation for more details. + * :attr:`dataset`: the copy of the dataset object in **this** process. Note + that this will be a different object in a different process than the one + in the main process. + + When called in the main process, this returns ``None``. + + .. note:: + When used in a :attr:`worker_init_fn` passed over to + :class:`~torch.utils.data.DataLoader`, this method can be useful to + set up each worker process differently, for instance, using ``worker_id`` + to configure the ``dataset`` object to only read a specific fraction of a + sharded dataset, or use ``seed`` to seed other libraries used in dataset + code. + """ + return _worker_info + + +r"""Dummy class used to signal the end of an IterableDataset""" +@dataclass(frozen=True) +class _IterableDatasetStopIteration: + worker_id: int + +r"""Dummy class used to resume the fetching when worker reuse is enabled""" +@dataclass(frozen=True) +class _ResumeIteration: + seed: Optional[int] = None + +# The function `_generate_state` is adapted from `numpy.random.SeedSequence` +# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx +# It's MIT licensed, here is the copyright: + +# Copyright (c) 2015 Melissa E. O'Neill +# Copyright (c) 2019 NumPy Developers +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This function generates an array of int32 as the seed for +# `numpy.random`, in order to prevent state collision due to same +# seed and algorithm for `numpy.random` and `random` modules. +# TODO: Implement `SeedSequence` like object for `torch.random` +def _generate_state(base_seed, worker_id): + INIT_A = 0x43b0d7e5 + MULT_A = 0x931e8875 + INIT_B = 0x8b51f9dd + MULT_B = 0x58f38ded + MIX_MULT_L = 0xca01f9dd + MIX_MULT_R = 0x4973f715 + XSHIFT = 4 * 8 // 2 + MASK32 = 0xFFFFFFFF + + entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0] + pool = [0] * 4 + + hash_const_A = INIT_A + + def hash(value): + nonlocal hash_const_A + value = (value ^ hash_const_A) & MASK32 + hash_const_A = (hash_const_A * MULT_A) & MASK32 + value = (value * hash_const_A) & MASK32 + value = (value ^ (value >> XSHIFT)) & MASK32 + return value + + def mix(x, y): + result_x = (MIX_MULT_L * x) & MASK32 + result_y = (MIX_MULT_R * y) & MASK32 + result = (result_x - result_y) & MASK32 + result = (result ^ (result >> XSHIFT)) & MASK32 + return result + + # Add in the entropy to the pool. + for i in range(len(pool)): + pool[i] = hash(entropy[i]) + + # Mix all bits together so late bits can affect earlier bits. + for i_src in range(len(pool)): + for i_dst in range(len(pool)): + if i_src != i_dst: + pool[i_dst] = mix(pool[i_dst], hash(pool[i_src])) + + hash_const_B = INIT_B + state = [] + for i_dst in range(4): + data_val = pool[i_dst] + data_val = (data_val ^ hash_const_B) & MASK32 + hash_const_B = (hash_const_B * MULT_B) & MASK32 + data_val = (data_val * hash_const_B) & MASK32 + data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32 + state.append(data_val) + return state + +def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event, + auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id, + num_workers, persistent_workers, shared_seed): + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + + try: + # Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal + # module's handlers are executed after Python returns from C low-level + # handlers, likely when the same fatal signal had already happened + # again. + # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers + signal_handling._set_worker_signal_handlers() + + torch.set_num_threads(1) + seed = base_seed + worker_id + random.seed(seed) + torch.manual_seed(seed) + if HAS_NUMPY: + np_seed = _generate_state(base_seed, worker_id) + import numpy as np + np.random.seed(np_seed) + + from torch.utils.data import IterDataPipe + from torch.utils.data.graph_settings import apply_random_seed + + shared_rng = torch.Generator() + if isinstance(dataset, IterDataPipe): + assert shared_seed is not None + shared_rng.manual_seed(shared_seed) + dataset = apply_random_seed(dataset, shared_rng) + + global _worker_info + _worker_info = WorkerInfo(id=worker_id, num_workers=num_workers, + seed=seed, dataset=dataset) + + from torch.utils.data import _DatasetKind + + init_exception = None + + try: + if init_fn is not None: + init_fn(worker_id) + + fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last) + except Exception: + init_exception = ExceptionWrapper( + where=f"in DataLoader worker process {worker_id}") + + # When using Iterable mode, some worker can exit earlier than others due + # to the IterableDataset behaving differently for different workers. + # When such things happen, an `_IterableDatasetStopIteration` object is + # sent over to the main process with the ID of this worker, so that the + # main process won't send more tasks to this worker, and will send + # `None` to this worker to properly exit it. + # + # Note that we cannot set `done_event` from a worker as it is shared + # among all processes. Instead, we set the `iteration_end` flag to + # signify that the iterator is exhausted. When either `done_event` or + # `iteration_end` is set, we skip all processing step and just wait for + # `None`. + iteration_end = False + + watchdog = ManagerWatchdog() + + while watchdog.is_alive(): + try: + r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + continue + if isinstance(r, _ResumeIteration): + # Acknowledge the main process + data_queue.put((r, None)) + iteration_end = False + + if isinstance(dataset, IterDataPipe): + assert r.seed is not None + shared_rng.manual_seed(r.seed) + dataset = apply_random_seed(dataset, shared_rng) + + # Recreate the fetcher for worker-reuse policy + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collation, collate_fn, drop_last) + continue + elif r is None: + # Received the final signal + assert done_event.is_set() or iteration_end + break + elif done_event.is_set() or iteration_end: + # `done_event` is set. But I haven't received the final signal + # (None) yet. I will keep continuing until get it, and skip the + # processing steps. + continue + idx, index = r + data: Union[_IterableDatasetStopIteration, ExceptionWrapper] + if init_exception is not None: + data = init_exception + init_exception = None + else: + try: + data = fetcher.fetch(index) # type: ignore[possibly-undefined] + except Exception as e: + if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable: + data = _IterableDatasetStopIteration(worker_id) + # Set `iteration_end` + # (1) to save future `next(...)` calls, and + # (2) to avoid sending multiple `_IterableDatasetStopIteration`s. + iteration_end = True + else: + # It is important that we don't store exc_info in a variable. + # `ExceptionWrapper` does the correct thing. + # See NOTE [ Python Traceback Reference Cycle Problem ] + data = ExceptionWrapper( + where=f"in DataLoader worker process {worker_id}") + data_queue.put((idx, data)) + del data, idx, index, r # save memory + except KeyboardInterrupt: + # Main process will raise KeyboardInterrupt anyways. + pass + if done_event.is_set(): + data_queue.cancel_join_thread() + data_queue.close() diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py b/moondream/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..be97f016a0917a771970843a4ba70deb68cdd60d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py @@ -0,0 +1,5 @@ +import warnings + +def worker_init_fn(worker_id): + warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated" + " as DataLoader automatically applies sharding in every worker") diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/dataloader.py b/moondream/lib/python3.10/site-packages/torch/utils/data/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..f18bb602b50da3a23f9521ac003efbdeade2794d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/dataloader.py @@ -0,0 +1,1479 @@ +r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter. + +To support these two classes, in `./_utils` we define many utility methods and +functions to be run in multiprocessing. E.g., the data loading worker loop is +in `./_utils/worker.py`. +""" + +import functools +import itertools +import logging +import os +import queue +import threading +import warnings + +from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union + +import multiprocessing as python_multiprocessing +import torch +import torch.distributed as dist +import torch.multiprocessing as multiprocessing +import torch.utils.data.graph_settings + +from torch._utils import ExceptionWrapper + +from . import ( + IterDataPipe, + MapDataPipe, + IterableDataset, + Sampler, + SequentialSampler, + RandomSampler, + BatchSampler, + Dataset,) + +from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper + +from . import _utils + +__all__ = [ + "DataLoader", + "get_worker_info", + "default_collate", + "default_convert", +] + +T_co = TypeVar('T_co', covariant=True) +T = TypeVar('T') +_worker_init_fn_t = Callable[[int], None] + +# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that +# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'. +# See https://github.com/python/mypy/issues/3737. +_collate_fn_t = Callable[[List[T]], Any] + + +# These functions used to be defined in this file. However, it was moved to +# _utils/collate.py. Although it is rather hard to access this from user land +# (one has to explicitly directly `import torch.utils.data.dataloader`), there +# probably is user code out there using it. This aliasing maintains BC in this +# aspect. +default_collate: _collate_fn_t = _utils.collate.default_collate +default_convert = _utils.collate.default_convert + +get_worker_info = _utils.worker.get_worker_info + +logger = logging.getLogger(__name__) + + +class _DatasetKind: + Map = 0 + Iterable = 1 + + @staticmethod + def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last): + if kind == _DatasetKind.Map: + return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + else: + return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + + +class _InfiniteConstantSampler(Sampler): + r"""Analogous to ``itertools.repeat(None, None)``. + + Used as sampler for :class:`~torch.utils.data.IterableDataset`. + """ + + def __iter__(self): + while True: + yield None + + +def _get_distributed_settings(): + if dist.is_available() and dist.is_initialized(): + return dist.get_world_size(), dist.get_rank() + else: + return 1, 0 + + +def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id): + global_worker_id = worker_id + info = torch.utils.data.get_worker_info() + assert info is not None + total_workers = info.num_workers + datapipe = info.dataset + assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) + # To distribute elements across distributed process evenly, we should shard data on distributed + # processes first then shard on worker processes + total_workers *= world_size + global_worker_id = global_worker_id * world_size + rank_id + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id) + if worker_init_fn is not None: + worker_init_fn(worker_id) + + +def _share_dist_seed(generator, pg): + _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator) + if isinstance(pg, dist.ProcessGroup): + dist.broadcast(_shared_seed, src=0, group=pg) + return _shared_seed.item() + + +class DataLoader(Generic[T_co]): + r""" + Data loader combines a dataset and a sampler, and provides an iterable over the given dataset. + + The :class:`~torch.utils.data.DataLoader` supports both map-style and + iterable-style datasets with single- or multi-process loading, customizing + loading order and optional automatic batching (collation) and memory pinning. + + See :py:mod:`torch.utils.data` documentation page for more details. + + Args: + dataset (Dataset): dataset from which to load the data. + batch_size (int, optional): how many samples per batch to load + (default: ``1``). + shuffle (bool, optional): set to ``True`` to have the data reshuffled + at every epoch (default: ``False``). + sampler (Sampler or Iterable, optional): defines the strategy to draw + samples from the dataset. Can be any ``Iterable`` with ``__len__`` + implemented. If specified, :attr:`shuffle` must not be specified. + batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but + returns a batch of indices at a time. Mutually exclusive with + :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`, + and :attr:`drop_last`. + num_workers (int, optional): how many subprocesses to use for data + loading. ``0`` means that the data will be loaded in the main process. + (default: ``0``) + collate_fn (Callable, optional): merges a list of samples to form a + mini-batch of Tensor(s). Used when using batched loading from a + map-style dataset. + pin_memory (bool, optional): If ``True``, the data loader will copy Tensors + into device/CUDA pinned memory before returning them. If your data elements + are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, + see the example below. + drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If ``False`` and + the size of dataset is not divisible by the batch size, then the last batch + will be smaller. (default: ``False``) + timeout (numeric, optional): if positive, the timeout value for collecting a batch + from workers. Should always be non-negative. (default: ``0``) + worker_init_fn (Callable, optional): If not ``None``, this will be called on each + worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as + input, after seeding and before data loading. (default: ``None``) + multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If + ``None``, the default `multiprocessing context`_ of your operating system will + be used. (default: ``None``) + generator (torch.Generator, optional): If not ``None``, this RNG will be used + by RandomSampler to generate random indexes and multiprocessing to generate + ``base_seed`` for workers. (default: ``None``) + prefetch_factor (int, optional, keyword-only arg): Number of batches loaded + in advance by each worker. ``2`` means there will be a total of + 2 * num_workers batches prefetched across all workers. (default value depends + on the set value for num_workers. If value of num_workers=0 default is ``None``. + Otherwise, if value of ``num_workers > 0`` default is ``2``). + persistent_workers (bool, optional): If ``True``, the data loader will not shut down + the worker processes after a dataset has been consumed once. This allows to + maintain the workers `Dataset` instances alive. (default: ``False``) + pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is + ``True``. + + + .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn` + cannot be an unpicklable object, e.g., a lambda function. See + :ref:`multiprocessing-best-practices` on more details related + to multiprocessing in PyTorch. + + .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used. + When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`, + it instead returns an estimate based on ``len(dataset) / batch_size``, with proper + rounding depending on :attr:`drop_last`, regardless of multi-process loading + configurations. This represents the best guess PyTorch can make because PyTorch + trusts user :attr:`dataset` code in correctly handling multi-process + loading to avoid duplicate data. + + However, if sharding results in multiple workers having incomplete last batches, + this estimate can still be inaccurate, because (1) an otherwise complete batch can + be broken into multiple ones and (2) more than one batch worth of samples can be + dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such + cases in general. + + See `Dataset Types`_ for more details on these two types of datasets and how + :class:`~torch.utils.data.IterableDataset` interacts with + `Multi-process data loading`_. + + .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and + :ref:`data-loading-randomness` notes for random seed related questions. + + .. _multiprocessing context: + https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods + """ + + dataset: Dataset[T_co] + batch_size: Optional[int] + num_workers: int + pin_memory: bool + drop_last: bool + timeout: float + sampler: Union[Sampler, Iterable] + pin_memory_device: str + prefetch_factor: Optional[int] + _iterator : Optional['_BaseDataLoaderIter'] + __initialized = False + + def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, + shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, + batch_sampler: Union[Sampler[List], Iterable[List], None] = None, + num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, + multiprocessing_context=None, generator=None, + *, prefetch_factor: Optional[int] = None, + persistent_workers: bool = False, + pin_memory_device: str = ""): + torch._C._log_api_usage_once("python.data_loader") + + if num_workers < 0: + raise ValueError('num_workers option should be non-negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + if num_workers == 0 and prefetch_factor is not None: + raise ValueError('prefetch_factor option could only be specified in multiprocessing.' + 'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.') + elif num_workers > 0 and prefetch_factor is None: + prefetch_factor = 2 + elif prefetch_factor is not None and prefetch_factor < 0: + raise ValueError('prefetch_factor option should be non-negative') + + if persistent_workers and num_workers == 0: + raise ValueError('persistent_workers option needs num_workers > 0') + + self.dataset = dataset + self.num_workers = num_workers + self.prefetch_factor = prefetch_factor + self.pin_memory = pin_memory + self.pin_memory_device = pin_memory_device + self.timeout = timeout + self.worker_init_fn = worker_init_fn + self.multiprocessing_context = multiprocessing_context + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler + if isinstance(self.dataset, IterDataPipe): + self.dataset = _IterDataPipeSerializationWrapper(self.dataset) + elif isinstance(self.dataset, MapDataPipe): + self.dataset = _MapDataPipeSerializationWrapper(self.dataset) + + # Arg-check dataset related before checking samplers because we want to + # tell users that iterable-style datasets are incompatible with custom + # samplers first, so that they don't learn that this combo doesn't work + # after spending time fixing the custom sampler errors. + if isinstance(dataset, IterableDataset): + self._dataset_kind = _DatasetKind.Iterable + # NOTE [ Custom Samplers and IterableDataset ] + # + # `IterableDataset` does not support custom `batch_sampler` or + # `sampler` since the key is irrelevant (unless we support + # generator-style dataset one day...). + # + # For `sampler`, we always create a dummy sampler. This is an + # infinite sampler even when the dataset may have an implemented + # finite `__len__` because in multi-process data loading, naive + # settings will return duplicated data (which may be desired), and + # thus using a sampler with length matching that of dataset will + # cause data lost (you may have duplicates of the first couple + # batches, but never see anything afterwards). Therefore, + # `Iterabledataset` always uses an infinite sampler, an instance of + # `_InfiniteConstantSampler` defined above. + # + # A custom `batch_sampler` essentially only controls the batch size. + # However, it is unclear how useful it would be since an iterable-style + # dataset can handle that within itself. Moreover, it is pointless + # in multi-process data loading as the assignment order of batches + # to workers is an implementation detail so users can not control + # how to batchify each worker's iterable. Thus, we disable this + # option. If this turns out to be useful in future, we can re-enable + # this, and support custom samplers that specify the assignments to + # specific workers. + if isinstance(dataset, IterDataPipe): + if shuffle is not None: + dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default. + elif shuffle not in {False, None}: + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}") + + if sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}") + elif batch_sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + f"batch_sampler option, but got batch_sampler={batch_sampler}") + else: + shuffle = bool(shuffle) + self._dataset_kind = _DatasetKind.Map + + + + if sampler is not None and shuffle: + raise ValueError('sampler option is mutually exclusive with ' + 'shuffle') + + if batch_sampler is not None: + # auto_collation with custom batch_sampler + if batch_size != 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler option is mutually exclusive ' + 'with batch_size, shuffle, sampler, and ' + 'drop_last') + batch_size = None + drop_last = False + elif batch_size is None: + # no auto_collation + if drop_last: + raise ValueError('batch_size=None option disables auto-batching ' + 'and is mutually exclusive with drop_last') + + if sampler is None: # give default samplers + if self._dataset_kind == _DatasetKind.Iterable: + # See NOTE [ Custom Samplers and IterableDataset ] + sampler = _InfiniteConstantSampler() + else: # map-style + if shuffle: + sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type] + else: + sampler = SequentialSampler(dataset) # type: ignore[arg-type] + + if batch_size is not None and batch_sampler is None: + # auto_collation without custom batch_sampler + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.batch_size = batch_size + self.drop_last = drop_last + self.sampler = sampler + self.batch_sampler = batch_sampler + self.generator = generator + + if collate_fn is None: + if self._auto_collation: + collate_fn = _utils.collate.default_collate + else: + collate_fn = _utils.collate.default_convert + + self.collate_fn = collate_fn + self.persistent_workers = persistent_workers + + self.__initialized = True + self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ] + + self._iterator = None + + self.check_worker_number_rationality() + + torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined] + + def _get_iterator(self) -> '_BaseDataLoaderIter': + if self.num_workers == 0: + return _SingleProcessDataLoaderIter(self) + else: + self.check_worker_number_rationality() + return _MultiProcessingDataLoaderIter(self) + + @property + def multiprocessing_context(self): + return self.__multiprocessing_context + + @multiprocessing_context.setter + def multiprocessing_context(self, multiprocessing_context): + if multiprocessing_context is not None: + if self.num_workers > 0: + if isinstance(multiprocessing_context, str): + valid_start_methods = multiprocessing.get_all_start_methods() + if multiprocessing_context not in valid_start_methods: + raise ValueError( + 'multiprocessing_context option ' + f'should specify a valid start method in {valid_start_methods!r}, but got ' + f'multiprocessing_context={multiprocessing_context!r}') + multiprocessing_context = multiprocessing.get_context(multiprocessing_context) + + if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): + raise TypeError('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + f'multiprocessing_context={multiprocessing_context}') + else: + raise ValueError('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + f'num_workers={self.num_workers}') + + self.__multiprocessing_context = multiprocessing_context + + def __setattr__(self, attr, val): + if self.__initialized and attr in ( + 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'): + raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized') + + super().__setattr__(attr, val) + + # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up + # since '_BaseDataLoaderIter' references 'DataLoader'. + def __iter__(self) -> '_BaseDataLoaderIter': + # When using a single worker the returned iterator should be + # created everytime to avoid resetting its state + # However, in the case of a multiple workers iterator + # the iterator is only created once in the lifetime of the + # DataLoader object so that workers can be reused + if self.persistent_workers and self.num_workers > 0: + if self._iterator is None: + self._iterator = self._get_iterator() + else: + self._iterator._reset(self) + return self._iterator + else: + return self._get_iterator() + + @property + def _auto_collation(self): + return self.batch_sampler is not None + + @property + def _index_sampler(self): + # The actual sampler used for generating indices for `_DatasetFetcher` + # (see _utils/fetch.py) to read data at each time. This would be + # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise. + # We can't change `.sampler` and `.batch_sampler` attributes for BC + # reasons. + if self._auto_collation: + return self.batch_sampler + else: + return self.sampler + + def __len__(self) -> int: + if self._dataset_kind == _DatasetKind.Iterable: + # NOTE [ IterableDataset and __len__ ] + # + # For `IterableDataset`, `__len__` could be inaccurate when one naively + # does multi-processing data loading, since the samples will be duplicated. + # However, no real use case should be actually using that behavior, so + # it should count as a user error. We should generally trust user + # code to do the proper thing (e.g., configure each replica differently + # in `__iter__`), and give us the correct `__len__` if they choose to + # implement it (this will still throw if the dataset does not implement + # a `__len__`). + # + # To provide a further warning, we track if `__len__` was called on the + # `DataLoader`, save the returned value in `self._len_called`, and warn + # if the iterator ends up yielding more than this number of samples. + + # Cannot statically verify that dataset is Sized + length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type] + if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler + from math import ceil + if self.drop_last: + length = length // self.batch_size + else: + length = ceil(length / self.batch_size) + return length + else: + return len(self._index_sampler) + + def check_worker_number_rationality(self): + # This function check whether the dataloader's worker number is rational based on + # current system's resource. Current rule is that if the number of workers this + # Dataloader will create is bigger than the number of logical cpus that is allowed to + # use, than we will pop up a warning to let user pay attention. + # + # eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2 + # threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current + # DataLoader process can use half of them which is 32, then the rational max number of + # worker that initiated from this process is 32. + # Now, let's say the created DataLoader has num_works = 40, which is bigger than 32. + # So the warning message is triggered to notify the user to lower the worker number if + # necessary. + # + # + # [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is + # available (available in most of Linux system, but not OSX and Windows). + # When os.sched_getaffinity is not available, os.cpu_count() is called instead, but + # it doesn't repect cpuset. + # We don't take threading into account since each worker process is single threaded + # at this time. + # + # We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc) + # other than `torch.set_num_threads` to 1 in the worker process, if the passing + # in functions use 3rd party modules that rely on those threading flags to determine + # how many thread to create (eg. numpy, etc), then it is caller's responsibility to + # set those flags correctly. + def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked): + + suggested_max_worker_msg = (( + "Our suggested max number of worker in current system is {}{}, which is smaller " + "than what this DataLoader is going to create.").format( + num_worker_suggest, + ("" if cpuset_checked else " (`cpuset` is not taken into account)")) + ) if num_worker_suggest is not None else ( + "DataLoader is not able to compute a suggested max number of worker in current system.") + + warn_msg = ( + "This DataLoader will create {} worker processes in total. {} " + "Please be aware that excessive worker creation might get DataLoader running slow or even freeze, " + "lower the worker number to avoid potential slowness/freeze if necessary.").format( + num_worker_created, + suggested_max_worker_msg) + return warn_msg + + if not self.num_workers or self.num_workers == 0: + return + + # try to compute a suggested max number of worker based on system's resource + max_num_worker_suggest = None + cpuset_checked = False + if hasattr(os, 'sched_getaffinity'): + try: + max_num_worker_suggest = len(os.sched_getaffinity(0)) + cpuset_checked = True + except Exception: + pass + if max_num_worker_suggest is None: + # os.cpu_count() could return Optional[int] + # get cpu count first and check None in order to satisfy mypy check + cpu_count = os.cpu_count() + if cpu_count is not None: + max_num_worker_suggest = cpu_count + + if max_num_worker_suggest is None: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + return + + if self.num_workers > max_num_worker_suggest: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + + +class _BaseDataLoaderIter: + def __init__(self, loader: DataLoader) -> None: + self._dataset = loader.dataset + self._shared_seed = None + self._pg = None + if isinstance(self._dataset, IterDataPipe): + if dist.is_available() and dist.is_initialized(): + self._pg = dist.new_group(backend="gloo") + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + self._dataset_kind = loader._dataset_kind + self._IterableDataset_len_called = loader._IterableDataset_len_called + self._auto_collation = loader._auto_collation + self._drop_last = loader.drop_last + self._index_sampler = loader._index_sampler + self._num_workers = loader.num_workers + ws, rank = _get_distributed_settings() + self._world_size = ws + self._rank = rank + # for other backends, pin_memory_device need to set. if not set + # default behaviour is CUDA device. if pin_memory_device is selected + # and pin_memory is not set, the default behaviour false. + if (len(loader.pin_memory_device) == 0): + self._pin_memory = loader.pin_memory and torch.cuda.is_available() + self._pin_memory_device = None + else: + if not loader.pin_memory: + warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used" + "please set pin_memory to true, if you need to use the device pin memory") + warnings.warn(warn_msg) + + self._pin_memory = loader.pin_memory + self._pin_memory_device = loader.pin_memory_device + self._timeout = loader.timeout + self._collate_fn = loader.collate_fn + self._sampler_iter = iter(self._index_sampler) + self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item() + self._persistent_workers = loader.persistent_workers + self._num_yielded = 0 + self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__" + + def __iter__(self) -> '_BaseDataLoaderIter': + return self + + def _reset(self, loader, first_iter=False): + self._sampler_iter = iter(self._index_sampler) + self._num_yielded = 0 + self._IterableDataset_len_called = loader._IterableDataset_len_called + if isinstance(self._dataset, IterDataPipe): + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + + def _next_index(self): + return next(self._sampler_iter) # may raise StopIteration + + def _next_data(self): + raise NotImplementedError + + def __next__(self) -> Any: + with torch.autograd.profiler.record_function(self._profile_name): + if self._sampler_iter is None: + # TODO(https://github.com/pytorch/pytorch/issues/76750) + self._reset() # type: ignore[call-arg] + data = self._next_data() + self._num_yielded += 1 + if self._dataset_kind == _DatasetKind.Iterable and \ + self._IterableDataset_len_called is not None and \ + self._num_yielded > self._IterableDataset_len_called: + warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} " + "samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called, + self._num_yielded) + if self._num_workers > 0: + warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the " + "IterableDataset replica at each worker. Please see " + "https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.") + warnings.warn(warn_msg) + return data + + def __len__(self) -> int: + return len(self._index_sampler) + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError("{} cannot be pickled", self.__class__.__name__) + + +class _SingleProcessDataLoaderIter(_BaseDataLoaderIter): + def __init__(self, loader): + super().__init__(loader) + assert self._timeout == 0 + assert self._num_workers == 0 + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Taking care of distributed sharding + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank) + + self._dataset_fetcher = _DatasetKind.create_fetcher( + self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last) + + def _next_data(self): + index = self._next_index() # may raise StopIteration + data = self._dataset_fetcher.fetch(index) # may raise StopIteration + if self._pin_memory: + data = _utils.pin_memory.pin_memory(data, self._pin_memory_device) + return data + + +class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter): + r"""Iterates once over the DataLoader's dataset, as specified by the sampler.""" + + # NOTE [ Data Loader Multiprocessing Shutdown Logic ] + # + # Preliminary: + # + # Our data model looks like this (queues are indicated with curly brackets): + # + # main process || + # | || + # {index_queue} || + # | || + # worker processes || DATA + # | || + # {worker_result_queue} || FLOW + # | || + # pin_memory_thread of main process || DIRECTION + # | || + # {data_queue} || + # | || + # data output \/ + # + # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if + # `pin_memory=False`. + # + # + # Terminating multiprocessing logic requires very careful design. In + # particular, we need to make sure that + # + # 1. The iterator gracefully exits the workers when its last reference is + # gone or it is depleted. + # + # In this case, the workers should be gracefully exited because the + # main process may still need to continue to run, and we want cleaning + # up code in the workers to be executed (e.g., releasing GPU memory). + # Naturally, we implement the shutdown logic in `__del__` of + # DataLoaderIterator. + # + # We delay the discussion on the logic in this case until later. + # + # 2. The iterator exits the workers when the loader process and/or worker + # processes exits normally or with error. + # + # We set all workers and `pin_memory_thread` to have `daemon=True`. + # + # You may ask, why can't we make the workers non-daemonic, and + # gracefully exit using the same logic as we have in `__del__` when the + # iterator gets deleted (see 1 above)? + # + # First of all, `__del__` is **not** guaranteed to be called when + # interpreter exits. Even if it is called, by the time it executes, + # many Python core library resources may already be freed, and even + # simple things like acquiring an internal lock of a queue may hang. + # Therefore, in this case, we actually need to prevent `__del__` from + # being executed, and rely on the automatic termination of daemonic + # children. + # + # Thus, we register an `atexit` hook that sets a global flag + # `_utils.python_exit_status`. Since `atexit` hooks are executed in the + # reverse order of registration, we are guaranteed that this flag is + # set before library resources we use are freed (which, at least in + # CPython, is done via an `atexit` handler defined in + # `multiprocessing/util.py` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362 + # registered when an object requiring this mechanism is first + # created, e.g., `mp.Queue` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103 + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29 + # ) + # + # So in `__del__`, we check if `_utils.python_exit_status` is set or + # `None` (freed), and perform no-op if so. + # + # However, simply letting library clean-up codes run can also be bad, + # because such codes (i.e., `multiprocessing.util._exit_function()`) + # include join putting threads for `mp.Queue`, which can be blocking. + # Hence, the main process putting threads are called with + # `cancel_join_thread` at creation. See later section + # [ 3b. A process won't hang when putting into a queue; ] + # for more details. + # + # Here are two example cases where library clean-up codes can run + # before `__del__` is called: + # + # 1. If we hold onto a reference to the iterator, it more often + # than not tries to do `multiprocessing` library cleaning before + # clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666) + # and thus prevents our cleaning-up code to run first. + # + # 2. A similar issue araises when a `DataLoader` is used in a subprocess. + # When a process ends, it shuts the all its daemonic children + # down with a SIGTERM (instead of joining them without a timeout). + # Simiarly for threads, but by a different mechanism. This fact, + # together with a few implementation details of multiprocessing, forces + # us to make workers daemonic. All of our problems arise when a + # DataLoader is used in a subprocess, and are caused by multiprocessing + # code which looks more or less like this: + # + # try: + # your_function_using_a_dataloader() + # finally: + # multiprocessing.util._exit_function() + # + # The joining/termination mentioned above happens inside + # `_exit_function()`. Now, if `your_function_using_a_dataloader()` + # throws, the stack trace stored in the exception will prevent the + # frame which uses `DataLoaderIter` to be freed. If the frame has any + # reference to the `DataLoaderIter` (e.g., in a method of the iter), + # its `__del__`, which starts the shutdown procedure, will not be + # called. That, in turn, means that workers aren't notified. Attempting + # to join in `_exit_function` will then result in a hang. + # + # For context, `_exit_function` is also registered as an `atexit` call. + # So it is unclear to me (@ssnl) why this is needed in a finally block. + # The code dates back to 2008 and there is no comment on the original + # PEP 371 or patch https://bugs.python.org/issue3050 (containing both + # the finally block and the `atexit` registration) that explains this. + # + # + # Finally, another choice is to just shutdown workers with logic in 1 + # above whenever we see an error in `next`. This isn't ideal because + # a. It prevents users from using try-catch to resume data loading. + # b. It doesn't prevent hanging if users have references to the + # iterator. + # + # 3. All processes exit if any of them die unexpectedly by fatal signals. + # + # As shown above, the workers are set as daemonic children of the main + # process. However, automatic cleaning-up of such child processes only + # happens if the parent process exits gracefully (e.g., not via fatal + # signals like SIGKILL). So we must ensure that each process will exit + # even the process that should send/receive data to/from it were + # killed, i.e., + # + # a. A process won't hang when getting from a queue. + # + # Even with carefully designed data dependencies (i.e., a `put()` + # always corresponding to a `get()`), hanging on `get()` can still + # happen when data in queue is corrupted (e.g., due to + # `cancel_join_thread` or unexpected exit). + # + # For child exit, we set a timeout whenever we try to get data + # from `data_queue`, and check the workers' status on each timeout + # and error. + # See `_DataLoaderiter._get_batch()` and + # `_DataLoaderiter._try_get_data()` for details. + # + # Additionally, for child exit on non-Windows platforms, we also + # register a SIGCHLD handler (which is supported on Windows) on + # the main process, which checks if any of the workers fail in the + # (Python) handler. This is more efficient and faster in detecting + # worker failures, compared to only using the above mechanism. + # See `DataLoader.cpp` and `_utils/signal_handling.py` for details. + # + # For `.get()` calls where the sender(s) is not the workers, we + # guard them with timeouts, and check the status of the sender + # when timeout happens: + # + in the workers, the `_utils.worker.ManagerWatchdog` class + # checks the status of the main process. + # + if `pin_memory=True`, when getting from `pin_memory_thread`, + # check `pin_memory_thread` status periodically until `.get()` + # returns or see that `pin_memory_thread` died. + # + # b. A process won't hang when putting into a queue; + # + # We use `mp.Queue` which has a separate background thread to put + # objects from an unbounded buffer array. The background thread is + # daemonic and usually automatically joined when the process + # *exits*. + # + # In case that the receiver has ended abruptly while + # reading from the pipe, the join will hang forever. The usual + # solution for this in Python is calling `q.cancel_join_thread`, + # which prevents automatically joining it when finalizing + # (exiting). + # + # Nonetheless, `cancel_join_thread` must only be called when the + # queue is **not** going to be read from or write into by another + # process, because it may hold onto a lock or leave corrupted data + # in the queue, leading other readers/writers to hang. + # + # Hence, + # + For worker processes, we only do so (for their output + # queues, i.e., `worker_result_queue`) before exiting. + # + For `pin_memory_thread`, its output queue `data_queue` is a + # `queue.Queue` that does blocking `put` if the queue is full. + # So there is no above problem, but as a result, in + # `_pin_memory_loop`, we do need to wrap the `put` in a loop + # that breaks not only upon success, but also when the main + # process stops reading, i.e., is shutting down. + # + For loader process, we `cancel_join_thread()` for all + # `_index_queues` because the whole purpose of workers and + # `pin_memory_thread` is to serve the loader process. If + # loader process is already exiting, we don't really care if + # the queues are corrupted. + # + # + # Now let's get back to 1: + # how we gracefully exit the workers when the last reference to the + # iterator is gone. + # + # To achieve this, we implement the following logic along with the design + # choices mentioned above: + # + # `workers_done_event`: + # A `multiprocessing.Event` shared among the main process and all worker + # processes. This is used to signal the workers that the iterator is + # shutting down. After it is set, they will not send processed data to + # queues anymore, and only wait for the final `None` before exiting. + # `done_event` isn't strictly needed. I.e., we can just check for `None` + # from the input queue, but it allows us to skip wasting resources + # processing data if we are already shutting down. + # + # `pin_memory_thread_done_event`: + # A `threading.Event` for a similar purpose to that of + # `workers_done_event`, but is for the `pin_memory_thread`. The reason + # that separate events are needed is that `pin_memory_thread` reads from + # the output queue of the workers. But the workers, upon seeing that + # `workers_done_event` is set, only wants to see the final `None`, and is + # not required to flush all data in the output queue (e.g., it may call + # `cancel_join_thread` on that queue if its `IterableDataset` iterator + # happens to exhaust coincidentally, which is out of the control of the + # main process). Thus, since we will exit `pin_memory_thread` before the + # workers (see below), two separete events are used. + # + # NOTE: In short, the protocol is that the main process will set these + # `done_event`s and then the corresponding processes/threads a `None`, + # and that they may exit at any time after receiving the `None`. + # + # NOTE: Using `None` as the final signal is valid, since normal data will + # always be a 2-tuple with the 1st element being the index of the data + # transferred (different from dataset index/key), and the 2nd being + # either the dataset key or the data sample (depending on which part + # of the data model the queue is at). + # + # [ worker processes ] + # While loader process is alive: + # Get from `index_queue`. + # If get anything else, + # Check `workers_done_event`. + # If set, continue to next iteration + # i.e., keep getting until see the `None`, then exit. + # Otherwise, process data: + # If is fetching from an `IterableDataset` and the iterator + # is exhausted, send an `_IterableDatasetStopIteration` + # object to signal iteration end. The main process, upon + # receiving such an object, will send `None` to this + # worker and not use the corresponding `index_queue` + # anymore. + # If timed out, + # No matter `workers_done_event` is set (still need to see `None`) + # or not, must continue to next iteration. + # (outside loop) + # If `workers_done_event` is set, (this can be False with `IterableDataset`) + # `data_queue.cancel_join_thread()`. (Everything is ending here: + # main process won't read from it; + # other workers will also call + # `cancel_join_thread`.) + # + # [ pin_memory_thread ] + # # No need to check main thread. If this thread is alive, the main loader + # # thread must be alive, because this thread is set as daemonic. + # While `pin_memory_thread_done_event` is not set: + # Get from `worker_result_queue`. + # If timed out, continue to get in the next iteration. + # Otherwise, process data. + # While `pin_memory_thread_done_event` is not set: + # Put processed data to `data_queue` (a `queue.Queue` with blocking put) + # If timed out, continue to put in the next iteration. + # Otherwise, break, i.e., continuing to the out loop. + # + # NOTE: we don't check the status of the main thread because + # 1. if the process is killed by fatal signal, `pin_memory_thread` + # ends. + # 2. in other cases, either the cleaning-up in __del__ or the + # automatic exit of daemonic thread will take care of it. + # This won't busy-wait either because `.get(timeout)` does not + # busy-wait. + # + # [ main process ] + # In the DataLoader Iter's `__del__` + # b. Exit `pin_memory_thread` + # i. Set `pin_memory_thread_done_event`. + # ii Put `None` in `worker_result_queue`. + # iii. Join the `pin_memory_thread`. + # iv. `worker_result_queue.cancel_join_thread()`. + # + # c. Exit the workers. + # i. Set `workers_done_event`. + # ii. Put `None` in each worker's `index_queue`. + # iii. Join the workers. + # iv. Call `.cancel_join_thread()` on each worker's `index_queue`. + # + # NOTE: (c) is better placed after (b) because it may leave corrupted + # data in `worker_result_queue`, which `pin_memory_thread` + # reads from, in which case the `pin_memory_thread` can only + # happen at timing out, which is slow. Nonetheless, same thing + # happens if a worker is killed by signal at unfortunate times, + # but in other cases, we are better off having a non-corrupted + # `worker_result_queue` for `pin_memory_thread`. + # + # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b) + # can be omitted + # + # NB: `done_event`s isn't strictly needed. E.g., we can just check for + # `None` from `index_queue`, but it allows us to skip wasting resources + # processing indices already in `index_queue` if we are already shutting + # down. + + def __init__(self, loader): + super().__init__(loader) + + self._prefetch_factor = loader.prefetch_factor + + assert self._num_workers > 0 + assert self._prefetch_factor > 0 + + if loader.multiprocessing_context is None: + multiprocessing_context = multiprocessing + else: + multiprocessing_context = loader.multiprocessing_context + + self._worker_init_fn = loader.worker_init_fn + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Additional worker init function will take care of sharding in MP and Distributed + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + self._worker_init_fn = functools.partial( + _sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank) + + # No certainty which module multiprocessing_context is + self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + self._worker_pids_set = False + self._shutdown = False + self._workers_done_event = multiprocessing_context.Event() + + self._index_queues = [] + self._workers = [] + for i in range(self._num_workers): + # No certainty which module multiprocessing_context is + index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + # Need to `cancel_join_thread` here! + # See sections (2) and (3b) above. + index_queue.cancel_join_thread() + w = multiprocessing_context.Process( + target=_utils.worker._worker_loop, + args=(self._dataset_kind, self._dataset, index_queue, + self._worker_result_queue, self._workers_done_event, + self._auto_collation, self._collate_fn, self._drop_last, + self._base_seed, self._worker_init_fn, i, self._num_workers, + self._persistent_workers, self._shared_seed)) + w.daemon = True + # NB: Process.start() actually take some time as it needs to + # start a process and pass the arguments over via a pipe. + # Therefore, we only add a worker to self._workers list after + # it started, so that we do not call .join() if program dies + # before it starts, and __del__ tries to join but will get: + # AssertionError: can only join a started process. + w.start() + self._index_queues.append(index_queue) + self._workers.append(w) + + if self._pin_memory: + self._pin_memory_thread_done_event = threading.Event() + + # Queue is not type-annotated + self._data_queue = queue.Queue() # type: ignore[var-annotated] + if self._pin_memory_device == "xpu": + current_device = torch.xpu.current_device() # type: ignore[attr-defined] + elif self._pin_memory_device == torch._C._get_privateuse1_backend_name(): + custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name()) + current_device = custom_device_mod.current_device() + else: + current_device = torch.cuda.current_device() # choose cuda for default + pin_memory_thread = threading.Thread( + target=_utils.pin_memory._pin_memory_loop, + args=(self._worker_result_queue, self._data_queue, + current_device, + self._pin_memory_thread_done_event, self._pin_memory_device)) + pin_memory_thread.daemon = True + pin_memory_thread.start() + # Similar to workers (see comment above), we only register + # pin_memory_thread once it is started. + self._pin_memory_thread = pin_memory_thread + else: + self._data_queue = self._worker_result_queue # type: ignore[assignment] + + # In some rare cases, persistent workers (daemonic processes) + # would be terminated before `__del__` of iterator is invoked + # when main process exits + # It would cause failure when pin_memory_thread tries to read + # corrupted data from worker_result_queue + # atexit is used to shutdown thread and child processes in the + # right sequence before main process exits + if self._persistent_workers and self._pin_memory: + import atexit + for w in self._workers: + atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w) + + # .pid can be None only before process is spawned (not the case, so ignore) + _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc] + _utils.signal_handling._set_SIGCHLD_handler() + self._worker_pids_set = True + self._reset(loader, first_iter=True) + + def _reset(self, loader, first_iter=False): + super()._reset(loader, first_iter) + self._send_idx = 0 # idx of the next task to be sent to workers + self._rcvd_idx = 0 # idx of the next task to be returned in __next__ + # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx). + # map: task idx => - (worker_id,) if data isn't fetched (outstanding) + # \ (worker_id, data) if data is already fetched (out-of-order) + self._task_info = {} + self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1) + # A list of booleans representing whether each worker still has work to + # do, i.e., not having exhausted its iterable dataset object. It always + # contains all `True`s if not using an iterable-style dataset + # (i.e., if kind != Iterable). + # Not that this indicates that a worker still has work to do *for this epoch*. + # It does not mean that a worker is dead. In case of `_persistent_workers`, + # the worker will be reset to available in the next epoch. + self._workers_status = [True for i in range(self._num_workers)] + # Reset the worker queue cycle so it resumes next epoch at worker 0 + self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers)) + # We resume the prefetching in case it was enabled + if not first_iter: + for idx in range(self._num_workers): + self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed)) + resume_iteration_cnt = self._num_workers + while resume_iteration_cnt > 0: + return_idx, return_data = self._get_data() + if isinstance(return_idx, _utils.worker._ResumeIteration): + assert return_data is None + resume_iteration_cnt -= 1 + # prime the prefetch loop + for _ in range(self._prefetch_factor * self._num_workers): + self._try_put_index() + + def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL): + # Tries to fetch data from `self._data_queue` once for a given timeout. + # This can also be used as inner loop of fetching without timeout, with + # the sender status as the loop condition. + # + # This raises a `RuntimeError` if any worker died expectedly. This error + # can come from either the SIGCHLD handler in `_utils/signal_handling.py` + # (only for non-Windows platforms), or the manual check below on errors + # and timeouts. + # + # Returns a 2-tuple: + # (bool: whether successfully get data, any: data if successful else None) + try: + data = self._data_queue.get(timeout=timeout) + return (True, data) + except Exception as e: + # At timeout and error, we manually check whether any worker has + # failed. Note that this is the only mechanism for Windows to detect + # worker failures. + failed_workers = [] + for worker_id, w in enumerate(self._workers): + if self._workers_status[worker_id] and not w.is_alive(): + failed_workers.append(w) + self._mark_worker_as_unavailable(worker_id) + if len(failed_workers) > 0: + pids_str = ', '.join(str(w.pid) for w in failed_workers) + raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e + if isinstance(e, queue.Empty): + return (False, None) + import tempfile + import errno + try: + # Raise an exception if we are this close to the FDs limit. + # Apparently, trying to open only one file is not a sufficient + # test. + # See NOTE [ DataLoader on Linux and open files limit ] + fds_limit_margin = 10 + fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)] + except OSError as e: + if e.errno == errno.EMFILE: + raise RuntimeError( + "Too many open files. Communication with the" + " workers is no longer possible. Please increase the" + " limit using `ulimit -n` in the shell or change the" + " sharing strategy by calling" + " `torch.multiprocessing.set_sharing_strategy('file_system')`" + " at the beginning of your code") from None + raise + +# NOTE [ DataLoader on Linux and open files limit ] +# +# On Linux when DataLoader is used with multiprocessing we pass the data between +# the root process and the workers through SHM files. We remove those files from +# the filesystem as soon as they are created and keep them alive by +# passing around their file descriptors through AF_UNIX sockets. (See +# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in +# the wiki (https://github.com/pytorch/pytorch/wiki).) +# +# This sometimes leads us to exceeding the open files limit. When that happens, +# and the offending file descriptor is coming over a socket, the `socket` Python +# package silently strips the file descriptor from the message, setting only the +# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that +# it _indicates that some control data were discarded due to lack of space in +# the buffer for ancillary data_). This might reflect the C implementation of +# AF_UNIX sockets. +# +# This behaviour can be reproduced with the script and instructions at the +# bottom of this note. +# +# When that happens, the standard Python `multiprocessing` (and not +# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata` +# +# Sometimes, instead of the FD being stripped, you may get an `OSError: +# Too many open files`, both in the script below and in DataLoader. However, +# this is rare and seems to be nondeterministic. +# +# +# #!/usr/bin/env python3 +# import sys +# import socket +# import os +# import array +# import shutil +# import socket +# +# +# if len(sys.argv) != 4: +# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)") +# sys.exit(1) +# +# if __name__ == '__main__': +# dirname = sys.argv[1] +# sock_path = dirname + "/sock" +# iterations = int(sys.argv[2]) +# def dummy_path(i): +# return dirname + "/" + str(i) + ".dummy" +# +# +# if sys.argv[3] == 'send': +# while not os.path.exists(sock_path): +# pass +# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# client.connect(sock_path) +# for i in range(iterations): +# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT) +# ancdata = array.array('i', [fd]) +# msg = bytes([i % 256]) +# print("Sending fd ", fd, " (iteration #", i, ")") +# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)]) +# +# +# else: +# assert sys.argv[3] == 'recv' +# +# if os.path.exists(dirname): +# raise Exception("Directory exists") +# +# os.mkdir(dirname) +# +# print("Opening socket...") +# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# server.bind(sock_path) +# +# print("Listening...") +# for i in range(iterations): +# a = array.array('i') +# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize)) +# assert(len(ancdata) == 1) +# cmsg_level, cmsg_type, cmsg_data = ancdata[0] +# a.frombytes(cmsg_data) +# print("Received fd ", a[0], " (iteration #", i, ")") +# +# shutil.rmtree(dirname) +# +# Steps to reproduce: +# +# 1. Run two shells and set lower file descriptor limit in the receiving one: +# (shell1) ulimit -n 1020 +# (shell2) ulimit -n 1022 +# +# 2. Run the script above with the `recv` option in the first shell +# (shell1) ./test_socket.py sock_tmp 1017 recv +# +# 3. Run the script with the `send` option in the second shell: +# (shell2) ./test_socket.py sock_tmp 1017 send + + def _get_data(self): + # Fetches data from `self._data_queue`. + # + # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds, + # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)` + # in a loop. This is the only mechanism to detect worker failures for + # Windows. For other platforms, a SIGCHLD handler is also used for + # worker failure detection. + # + # If `pin_memory=True`, we also need check if `pin_memory_thread` had + # died at timeouts. + if self._timeout > 0: + success, data = self._try_get_data(self._timeout) + if success: + return data + else: + raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds') + elif self._pin_memory: + while self._pin_memory_thread.is_alive(): + success, data = self._try_get_data() + if success: + return data + else: + # while condition is false, i.e., pin_memory_thread died. + raise RuntimeError('Pin memory thread exited unexpectedly') + # In this case, `self._data_queue` is a `queue.Queue`,. But we don't + # need to call `.task_done()` because we don't use `.join()`. + else: + while True: + success, data = self._try_get_data() + if success: + return data + + def _next_data(self): + while True: + # If the worker responsible for `self._rcvd_idx` has already ended + # and was unable to fulfill this task (due to exhausting an `IterableDataset`), + # we try to advance `self._rcvd_idx` to find the next valid index. + # + # This part needs to run in the loop because both the `self._get_data()` + # call and `_IterableDatasetStopIteration` check below can mark + # extra worker(s) as dead. + while self._rcvd_idx < self._send_idx: + info = self._task_info[self._rcvd_idx] + worker_id = info[0] + if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active + break + del self._task_info[self._rcvd_idx] + self._rcvd_idx += 1 + else: + # no valid `self._rcvd_idx` is found (i.e., didn't break) + if not self._persistent_workers: + self._shutdown_workers() + raise StopIteration + + # Now `self._rcvd_idx` is the batch index we want to fetch + + # Check if the next sample has already been generated + if len(self._task_info[self._rcvd_idx]) == 2: + data = self._task_info.pop(self._rcvd_idx)[1] + return self._process_data(data) + + assert not self._shutdown and self._tasks_outstanding > 0 + idx, data = self._get_data() + self._tasks_outstanding -= 1 + if self._dataset_kind == _DatasetKind.Iterable: + # Check for _IterableDatasetStopIteration + if isinstance(data, _utils.worker._IterableDatasetStopIteration): + if self._persistent_workers: + self._workers_status[data.worker_id] = False + else: + self._mark_worker_as_unavailable(data.worker_id) + self._try_put_index() + continue + + if idx != self._rcvd_idx: + # store out-of-order samples + self._task_info[idx] += (data,) + else: + del self._task_info[idx] + return self._process_data(data) + + def _try_put_index(self): + assert self._tasks_outstanding < self._prefetch_factor * self._num_workers + + try: + index = self._next_index() + except StopIteration: + return + for _ in range(self._num_workers): # find the next active worker, if any + worker_queue_idx = next(self._worker_queue_idx_cycle) + if self._workers_status[worker_queue_idx]: + break + else: + # not found (i.e., didn't break) + return + + self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined] + self._task_info[self._send_idx] = (worker_queue_idx,) + self._tasks_outstanding += 1 + self._send_idx += 1 + + def _process_data(self, data): + self._rcvd_idx += 1 + self._try_put_index() + if isinstance(data, ExceptionWrapper): + data.reraise() + return data + + def _mark_worker_as_unavailable(self, worker_id, shutdown=False): + # Mark a worker as having finished its work e.g., due to + # exhausting an `IterableDataset`. This should be used only when this + # `_MultiProcessingDataLoaderIter` is going to continue running. + + assert self._workers_status[worker_id] or (self._persistent_workers and shutdown) + + # Signal termination to that specific worker. + q = self._index_queues[worker_id] + # Indicate that no more data will be put on this queue by the current + # process. + q.put(None) + + # Note that we don't actually join the worker here, nor do we remove the + # worker's pid from C side struct because (1) joining may be slow, and + # (2) since we don't join, the worker may still raise error, and we + # prefer capturing those, rather than ignoring them, even though they + # are raised after the worker has finished its job. + # Joinning is deferred to `_shutdown_workers`, which it is called when + # all workers finish their jobs (e.g., `IterableDataset` replicas) or + # when this iterator is garbage collected. + + self._workers_status[worker_id] = False + + assert self._workers_done_event.is_set() == shutdown + + def _shutdown_workers(self): + # Called when shutting down this `_MultiProcessingDataLoaderIter`. + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on + # the logic of this function. + if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None: + # See (2) of the note. If Python is shutting down, do no-op. + return + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + if not self._shutdown: + self._shutdown = True + try: + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + + # Exit `pin_memory_thread` first because exiting workers may leave + # corrupted data in `worker_result_queue` which `pin_memory_thread` + # reads from. + if hasattr(self, '_pin_memory_thread'): + # Use hasattr in case error happens before we set the attribute. + self._pin_memory_thread_done_event.set() + # Send something to pin_memory_thread in case it is waiting + # so that it can wake up and check `pin_memory_thread_done_event` + self._worker_result_queue.put((None, None)) + self._pin_memory_thread.join() + self._worker_result_queue.cancel_join_thread() + self._worker_result_queue.close() + + # Exit workers now. + self._workers_done_event.set() + for worker_id in range(len(self._workers)): + # Get number of workers from `len(self._workers)` instead of + # `self._num_workers` in case we error before starting all + # workers. + # If we are using workers_status with persistent_workers + # we have to shut it down because the worker is paused + if self._persistent_workers or self._workers_status[worker_id]: + self._mark_worker_as_unavailable(worker_id, shutdown=True) + for w in self._workers: + # We should be able to join here, but in case anything went + # wrong, we set a timeout and if the workers fail to join, + # they are killed in the `finally` block. + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + for q in self._index_queues: + q.cancel_join_thread() + q.close() + finally: + # Even though all this function does is putting into queues that + # we have called `cancel_join_thread` on, weird things can + # happen when a worker is killed by a signal, e.g., hanging in + # `Event.set()`. So we need to guard this with SIGCHLD handler, + # and remove pids from the C side data structure only at the + # end. + # + # FIXME: Unfortunately, for Windows, we are missing a worker + # error detection mechanism here in this function, as it + # doesn't provide a SIGCHLD handler. + if self._worker_pids_set: + _utils.signal_handling._remove_worker_pids(id(self)) + self._worker_pids_set = False + for w in self._workers: + if w.is_alive(): + # Existing mechanisms try to make the workers exit + # peacefully, but in case that we unfortunately reach + # here, which we shouldn't, (e.g., pytorch/pytorch#39570), + # we kill the worker. + w.terminate() + + # staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter` + @staticmethod + def _clean_up_worker(w): + try: + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + finally: + if w.is_alive(): + w.terminate() + + def __del__(self): + self._shutdown_workers() diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f19389e21bfefff0ea2705680d0c133730cfa228 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py @@ -0,0 +1,3 @@ +from . import iter +from . import map +from . import dataframe diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b29ee945d513fc1b795ade7ad869114bd1415e90 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e97183243268c6f22d9311d1621810b5f2b99e0 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f8c6766adbec8e94e6c30545124253c94f558d7 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..7463cc55d27c97aeb0af44433451e581b811b127 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py @@ -0,0 +1,248 @@ +import inspect +import functools +from enum import Enum + +import torch.autograd + + +class _SnapshotState(Enum): + r""" + These are the snapshotting-related states that IterDataPipes can be in. + + `NotStarted` - allows you to restore a snapshot and create an iterator with reset + `Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe + `Iterating` - can restore, will reset if you create a new iterator + """ + + NotStarted = 0 + Restored = 1 + Iterating = 2 + + +def _simplify_obj_name(obj) -> str: + """Simplify the display strings of objects for the purpose of rendering within DataPipe error messages.""" + if inspect.isfunction(obj): + return obj.__name__ + else: + return repr(obj) + + +def _strip_datapipe_from_name(name: str) -> str: + return name.replace("IterDataPipe", "").replace("MapDataPipe", "") + + +def _generate_input_args_string(obj): + """Generate a string for the input arguments of an object.""" + signature = inspect.signature(obj.__class__) + input_param_names = set() + for param_name in signature.parameters.keys(): + input_param_names.add(param_name) + result = [] + for name, value in inspect.getmembers(obj): + if name in input_param_names: + result.append((name, _simplify_obj_name(value))) + return ', '.join([f'{name}={value}' for name, value in result]) + + +def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False): + output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if simplify_dp_name: + output_string = _strip_datapipe_from_name(output_string) + return output_string + + +def _gen_invalid_iterdatapipe_msg(datapipe): + return ("This iterator has been invalidated because another iterator has been created " + f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n" + "This may be caused multiple references to the same IterDataPipe. We recommend " + "using `.fork()` if that is necessary.") + + +_feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free " + "to comment on this issue: https://github.com/pytorch/data/issues/45.") + + +def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None: + r""" + Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception. + + In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well. + """ + if next_method_exists: + # This is the case where `IterDataPipe` has both `__iter__` and `__next__`. + # The `_valid_iterator_id` should either be never set (`None`), or set by at most one + # iterator (`0`). Otherwise, it means there are multiple iterators. + if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0: + extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method" + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg) + elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_check_valid_iterator_id"): + if not datapipe._check_valid_iterator_id(iterator_id): + raise RuntimeError("This iterator has been invalidated, because a new iterator has been created " + f"from one of the ChildDataPipes of " + f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg) + else: + raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.") + elif datapipe._valid_iterator_id != iterator_id: + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg) + + +def _set_datapipe_valid_iterator_id(datapipe): + """Given a DataPipe, updates its valid iterator ID and reset the DataPipe.""" + if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"): + datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate + else: + raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.") + else: + if datapipe._valid_iterator_id is None: + datapipe._valid_iterator_id = 0 + else: + datapipe._valid_iterator_id += 1 + datapipe.reset() + return datapipe._valid_iterator_id + + +def hook_iterator(namespace): + r""" + Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`. + + This is done for the purpose of profiling and checking if an iterator is still valid. + """ + + def profiler_record_fn_context(datapipe): + if not hasattr(datapipe, "_profile_name"): + datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True) + return torch.autograd.profiler.record_function(datapipe._profile_name) + + class IteratorDecorator: + r""" + Wrap the iterator and modifying its `__next__` method. + + This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function. + Those `__iter__` method commonly returns `self` but not necessarily. + """ + + def __init__(self, iterator, datapipe, iterator_id, has_next_method): + self.iterator = iterator + self.datapipe = datapipe + self.iterator_id = iterator_id + self._profiler_enabled = torch.autograd._profiler_enabled() + # Check if `__iter__` returns `self` and `DataPipe` has `__next__` + self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method + + def __iter__(self): + return self + + def _get_next(self): + """Return next with logic related to iterator validity, profiler, and incrementation of samples yielded.""" + _check_iterator_valid(self.datapipe, self.iterator_id) + result = next(self.iterator) + if not self.self_and_has_next_method: + self.datapipe._number_of_samples_yielded += 1 + return result + + def __next__(self): + # TODO: Add try-except to in-place reduce traceback from the Exception + # See: https://github.com/pytorch/data/issues/284 + if self._profiler_enabled: + with profiler_record_fn_context(self.datapipe): + return self._get_next() + else: # Decided against using `contextlib.nullcontext` for performance reasons + return self._get_next() + + def __getattr__(self, name): + return getattr(self.iterator, name) + + func = namespace['__iter__'] + + # ``__iter__`` of IterDataPipe is a generator function + if inspect.isgeneratorfunction(func): + @functools.wraps(func) + def wrap_generator(*args, **kwargs): + gen = func(*args, **kwargs) + datapipe = args[0] + if datapipe._fast_forward_iterator: + it = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + datapipe._snapshot_state = _SnapshotState.Iterating + while True: + try: + yield next(it) + except StopIteration: + return + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + _profiler_enabled = torch.autograd._profiler_enabled() + try: + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + response = gen.send(None) + else: + response = gen.send(None) + + while True: + datapipe._number_of_samples_yielded += 1 + request = yield response + # Pass through here every time `__next__` is called + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + else: # Decided against using `contextlib.nullcontext` for performance reasons + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + except StopIteration as e: + return + except Exception as e: + # TODO: Simplify the traceback message to skip over `response = gen.send(None)` + # Part of https://github.com/pytorch/data/issues/284 + datapipe = args[0] + msg = "thrown by __iter__ of" + single_iterator_msg = "single iterator per IterDataPipe constraint" + if hasattr(e.args, '__len__'): + full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist + e.args = (f'\nThis exception is {full_msg}',) + elif msg not in e.args[0] and single_iterator_msg not in e.args[0]: + e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:] + raise + + namespace['__iter__'] = wrap_generator + else: # ``__iter__`` of IterDataPipe is NOT a generator function + # IterDataPipe is an iterator with both ``__iter__`` and ``__next__`` + # And ``__iter__`` may or may not return `self` + if '__next__' in namespace: # If `__next__` exists, put a wrapper around it + next_func = namespace['__next__'] + + @functools.wraps(next_func) + def wrap_next(*args, **kwargs): + datapipe = args[0] + if torch.autograd._profiler_enabled(): + with profiler_record_fn_context(datapipe): + result = next_func(*args, **kwargs) + else: + result = next_func(*args, **kwargs) + datapipe._number_of_samples_yielded += 1 + return result + + namespace['__next__'] = wrap_next + + # Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but + # the user will be violating the iterator protocol. Potential issue: + # 1. Valid iterator ID may not update or checked properly + # 2. The number of samples yielded will be miscounted + + # Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators + @functools.wraps(func) + def wrap_iter(*args, **kwargs): + iter_ret = func(*args, **kwargs) + datapipe = args[0] + datapipe._snapshot_state = _SnapshotState.Iterating + if datapipe._fast_forward_iterator: + iter_ret = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + return iter_ret + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace) + + namespace['__iter__'] = wrap_iter diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf2907abf1051d8eaf96eae89c679ebe9c57fb9 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py @@ -0,0 +1,430 @@ +# Taking reference from official Python typing +# https://github.com/python/cpython/blob/master/Lib/typing.py + +import collections +import functools +import numbers +import sys + +from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState +from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union, + get_type_hints) +from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined] +from typing import ForwardRef + +# TODO: Use TypeAlias when Python 3.6 is deprecated +# Please check [Note: TypeMeta and TypeAlias] +# In case of metaclass conflict due to ABCMeta or _ProtocolMeta +# For Python 3.9, only Protocol in typing uses metaclass +from abc import ABCMeta +from typing import _GenericAlias # type: ignore[attr-defined, no-redef] + +class GenericMeta(ABCMeta): # type: ignore[no-redef] + pass + + +class Integer(numbers.Integral): + pass + + +class Boolean(numbers.Integral): + pass + + +# Python 'type' object is not subscriptable +# Tuple[int, List, dict] -> valid +# tuple[int, list, dict] -> invalid +# Map Python 'type' to abstract base class +TYPE2ABC = { + bool: Boolean, + int: Integer, + float: numbers.Real, + complex: numbers.Complex, + dict: Dict, + list: List, + set: Set, + tuple: Tuple, + None: type(None), +} + + +def issubtype(left, right, recursive=True): + r""" + Check if the left-side type is a subtype of the right-side type. + + If any of type is a composite type like `Union` and `TypeVar` with + bounds, it would be expanded into a list of types and check all + of left-side types are subtypes of either one from right-side types. + """ + left = TYPE2ABC.get(left, left) + right = TYPE2ABC.get(right, right) + + if right is Any or left == right: + return True + + if isinstance(right, _GenericAlias): + if getattr(right, '__origin__', None) is Generic: + return True + + if right == type(None): + return False + + # Right-side type + constraints = _decompose_type(right) + + if len(constraints) == 0 or Any in constraints: + return True + + if left is Any: + return False + + # Left-side type + variants = _decompose_type(left) + + # all() will return True for empty variants + if len(variants) == 0: + return False + + return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants) + + +def _decompose_type(t, to_list=True): + if isinstance(t, TypeVar): + if t.__bound__ is not None: + ts = [t.__bound__] + else: + # For T_co, __constraints__ is () + ts = list(t.__constraints__) + elif hasattr(t, '__origin__') and t.__origin__ == Union: + ts = t.__args__ + else: + if not to_list: + return None + ts = [t] + # Ignored: Generator has incompatible item type "object"; expected "Type[Any]" + ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc] + return ts + + +def _issubtype_with_constraints(variant, constraints, recursive=True): + r""" + Check if the variant is a subtype of either one from constraints. + + For composite types like `Union` and `TypeVar` with bounds, they + would be expanded for testing. + """ + if variant in constraints: + return True + + # [Note: Subtype for Union and TypeVar] + # Python typing is able to flatten Union[Union[...]] or Union[TypeVar]. + # But it couldn't flatten the following scenarios: + # - Union[int, TypeVar[Union[...]]] + # - TypeVar[TypeVar[...]] + # So, variant and each constraint may be a TypeVar or a Union. + # In these cases, all of inner types from the variant are required to be + # extraced and verified as a subtype of any constraint. And, all of + # inner types from any constraint being a TypeVar or a Union are + # also required to be extracted and verified if the variant belongs to + # any of them. + + # Variant + vs = _decompose_type(variant, to_list=False) + + # Variant is TypeVar or Union + if vs is not None: + return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs) + + # Variant is not TypeVar or Union + if hasattr(variant, '__origin__') and variant.__origin__ is not None: + v_origin = variant.__origin__ + # In Python-3.9 typing library untyped generics do not have args + v_args = getattr(variant, "__args__", None) + else: + v_origin = variant + v_args = None + + # Constraints + for constraint in constraints: + cs = _decompose_type(constraint, to_list=False) + + # Constraint is TypeVar or Union + if cs is not None: + if _issubtype_with_constraints(variant, cs, recursive): + return True + # Constraint is not TypeVar or Union + else: + # __origin__ can be None for plain list, tuple, ... in Python 3.6 + if hasattr(constraint, '__origin__') and constraint.__origin__ is not None: + c_origin = constraint.__origin__ + if v_origin == c_origin: + if not recursive: + return True + # In Python-3.9 typing library untyped generics do not have args + c_args = getattr(constraint, "__args__", None) + if c_args is None or len(c_args) == 0: + return True + if v_args is not None and len(v_args) == len(c_args) and \ + all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)): + return True + # Tuple[int] -> Tuple + else: + if v_origin == constraint: + return True + + return False + + +def issubinstance(data, data_type): + if not issubtype(type(data), data_type, recursive=False): + return False + + # In Python-3.9 typing library __args__ attribute is not defined for untyped generics + dt_args = getattr(data_type, "__args__", None) + if isinstance(data, tuple): + if dt_args is None or len(dt_args) == 0: + return True + if len(dt_args) != len(data): + return False + return all(issubinstance(d, t) for d, t in zip(data, dt_args)) + elif isinstance(data, (list, set)): + if dt_args is None or len(dt_args) == 0: + return True + t = dt_args[0] + return all(issubinstance(d, t) for d in data) + elif isinstance(data, dict): + if dt_args is None or len(dt_args) == 0: + return True + kt, vt = dt_args + return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items()) + + return True + + +# [Note: TypeMeta and TypeAlias] +# In order to keep compatibility for Python 3.6, use Meta for the typing. +# TODO: When PyTorch drops the support for Python 3.6, it can be converted +# into the Alias system and using `__class_getitem__` for DataPipe. The +# typing system will gain benefit of performance and resolving metaclass +# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/ + + +class _DataPipeType: + r"""Save type annotation in `param`.""" + + def __init__(self, param): + self.param = param + + def __repr__(self): + return _type_repr(self.param) + + def __eq__(self, other): + if isinstance(other, _DataPipeType): + return self.param == other.param + return NotImplemented + + def __hash__(self): + return hash(self.param) + + def issubtype(self, other): + if isinstance(other.param, _GenericAlias): + if getattr(other.param, '__origin__', None) is Generic: + return True + if isinstance(other, _DataPipeType): + return issubtype(self.param, other.param) + if isinstance(other, type): + return issubtype(self.param, other) + raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}") + + def issubtype_of_instance(self, other): + return issubinstance(other, self.param) + + +# Default type for DataPipe without annotation +T_co = TypeVar('T_co', covariant=True) +_DEFAULT_TYPE = _DataPipeType(Generic[T_co]) + + +class _DataPipeMeta(GenericMeta): + r""" + Metaclass for `DataPipe`. + + Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`. + + Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`. + """ + + type: _DataPipeType + + def __new__(cls, name, bases, namespace, **kwargs): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now. + cls.__origin__ = None + if 'type' in namespace: + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace['__type_class__'] = False + # For plain derived class without annotation + for base in bases: + if isinstance(base, _DataPipeMeta): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace.update({'type': _DEFAULT_TYPE, + '__init_subclass__': _dp_init_subclass}) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + def __init__(self, name, bases, namespace, **kwargs): + super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: Fix isinstance bug + @_tp_cache + def _getitem_(self, params): + if params is None: + raise TypeError(f'{self.__name__}[t]: t can not be None') + if isinstance(params, str): + params = ForwardRef(params) + if not isinstance(params, tuple): + params = (params, ) + + msg = f"{self.__name__}[t]: t must be a type" + params = tuple(_type_check(p, msg) for p in params) + + if isinstance(self.type.param, _GenericAlias): + orig = getattr(self.type.param, '__origin__', None) + if isinstance(orig, type) and orig is not Generic: + p = self.type.param[params] # type: ignore[index] + t = _DataPipeType(p) + l = len(str(self.type)) + 2 + name = self.__name__[:-l] + name = name + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + 'type': t, + '__type_class__': True}) + + if len(params) > 1: + raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1') + + t = _DataPipeType(params[0]) + + if not t.issubtype(self.type): + raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]') + + # Types are equal, fast path for inheritance + if self.type == t: + return self + + name = self.__name__ + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + '__type_class__': True, + 'type': t}) + + # TODO: Fix isinstance bug + def _eq_(self, other): + if not isinstance(other, _DataPipeMeta): + return NotImplemented + if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type] + return self is other + return (self.__origin__ == other.__origin__ # type: ignore[has-type] + and self.type == other.type) + + # TODO: Fix isinstance bug + def _hash_(self): + return hash((self.__name__, self.type)) + + +class _IterDataPipeMeta(_DataPipeMeta): + r""" + Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`. + + Add various functions for behaviors specific to `IterDataPipe`. + """ + + def __new__(cls, name, bases, namespace, **kwargs): + + if 'reset' in namespace: + reset_func = namespace['reset'] + + @functools.wraps(reset_func) + def conditional_reset(*args, **kwargs): + r""" + Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`. + + This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call. + """ + datapipe = args[0] + if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted): + # Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have + # already begun iterating. + datapipe._number_of_samples_yielded = 0 + datapipe._fast_forward_iterator = None + reset_func(*args, **kwargs) + datapipe._snapshot_state = _SnapshotState.Iterating + + namespace['reset'] = conditional_reset + + if '__iter__' in namespace: + hook_iterator(namespace) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + +def _dp_init_subclass(sub_cls, *args, **kwargs): + # Add function for datapipe instance to reinforce the type + sub_cls.reinforce_type = reinforce_type + + # TODO: + # - add global switch for type checking at compile-time + + # Ignore internal type class + if getattr(sub_cls, '__type_class__', False): + return + + # Check if the string type is valid + if isinstance(sub_cls.type.param, ForwardRef): + base_globals = sys.modules[sub_cls.__module__].__dict__ + try: + param = _eval_type(sub_cls.type.param, base_globals, locals()) + sub_cls.type.param = param + except TypeError as e: + raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e + + if '__iter__' in sub_cls.__dict__: + iter_fn = sub_cls.__dict__['__iter__'] + hints = get_type_hints(iter_fn) + if 'return' in hints: + return_hint = hints['return'] + # Plain Return Hint for Python 3.6 + if return_hint == Iterator: + return + if not (hasattr(return_hint, '__origin__') and + (return_hint.__origin__ == Iterator or + return_hint.__origin__ == collections.abc.Iterator)): + raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}" + ", but found {}".format(sub_cls.__name__, _type_repr(hints['return']))) + data_type = return_hint.__args__[0] + if not issubtype(data_type, sub_cls.type.param): + raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}" + " for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__)) + + +def reinforce_type(self, expected_type): + r""" + Reinforce the type for DataPipe instance. + + And the 'expected_type' is required to be a subtype of the original type + hint to restrict the type requirement of DataPipe instance. + """ + if isinstance(expected_type, tuple): + expected_type = Tuple[expected_type] + _type_check(expected_type, msg="'expected_type' must be a type") + + if not issubtype(expected_type, self.type.param): + raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}") + + self.type = _DataPipeType(expected_type) + return self diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29df13de74b1ff66eb20f0548a73807393d8ec50 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da9fe0347e4418e2ac0167dfc553ea2e2ece3a7a Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..3596cc171e5da567417535cedc4a174cd417cae1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py @@ -0,0 +1,125 @@ +from typing import Any, Optional + +_pandas: Any = None +_WITH_PANDAS: Optional[bool] = None + + +def _try_import_pandas() -> bool: + try: + import pandas # type: ignore[import] + global _pandas + _pandas = pandas + return True + except ImportError: + return False + + +# pandas used only for prototyping, will be shortly replaced with TorchArrow +def _with_pandas() -> bool: + global _WITH_PANDAS + if _WITH_PANDAS is None: + _WITH_PANDAS = _try_import_pandas() + return _WITH_PANDAS + + +class PandasWrapper: + @classmethod + def create_dataframe(cls, data, columns): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr] + + @classmethod + def is_dataframe(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr] + + @classmethod + def is_column(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr] + + @classmethod + def iterate(cls, data): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + yield from data.itertuples(index=False) + + @classmethod + def concat(cls, buffer): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.concat(buffer) # type: ignore[union-attr] + + @classmethod + def get_item(cls, data, idx): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return data[idx: idx + 1] + + @classmethod + def get_len(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return len(df.index) + + @classmethod + def get_columns(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return list(df.columns.values.tolist()) + + +# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class) +default_wrapper = PandasWrapper + + +def get_df_wrapper(): + return default_wrapper + + +def set_df_wrapper(wrapper): + global default_wrapper + default_wrapper = wrapper + + +def create_dataframe(data, columns=None): + wrapper = get_df_wrapper() + return wrapper.create_dataframe(data, columns) + + +def is_dataframe(data): + wrapper = get_df_wrapper() + return wrapper.is_dataframe(data) + + +def get_columns(data): + wrapper = get_df_wrapper() + return wrapper.get_columns(data) + + +def is_column(data): + wrapper = get_df_wrapper() + return wrapper.is_column(data) + + +def concat(buffer): + wrapper = get_df_wrapper() + return wrapper.concat(buffer) + + +def iterate(data): + wrapper = get_df_wrapper() + return wrapper.iterate(data) + + +def get_item(data, idx): + wrapper = get_df_wrapper() + return wrapper.get_item(data, idx) + + +def get_len(df): + wrapper = get_df_wrapper() + return wrapper.get_len(df) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py new file mode 100644 index 0000000000000000000000000000000000000000..69a14e06fcbf7db40fe415fc70cf9c28cec3fc73 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py @@ -0,0 +1,433 @@ +from typing import Any, Dict, List, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe.structures import DataChunkDF + +# TODO(VitalyFedyunin): Add error when two different traces get combined + +__all__ = [ + "Capture", + "CaptureA", + "CaptureAdd", + "CaptureCall", + "CaptureControl", + "CaptureDataFrame", + "CaptureDataFrameWithDataPipeOps", + "CaptureF", + "CaptureGetAttr", + "CaptureGetItem", + "CaptureInitial", + "CaptureLikeMock", + "CaptureMul", + "CaptureSetItem", + "CaptureSub", + "CaptureVariable", + "CaptureVariableAssign", + "DataFrameTracer", + "DataFrameTracedOps", + "disable_capture", + "get_val", +] + + +def disable_capture(): + CaptureControl.disabled = True + + +class CaptureControl: + disabled = False + + +class DataFrameTracedOps(DFIterDataPipe): + def __init__(self, source_datapipe, output_var): + self.source_datapipe = source_datapipe + self.output_var = output_var + + def __iter__(self): + for item in self.source_datapipe: + yield self.output_var.apply_ops(item) + + +# TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions +DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe', + 'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle'] + +UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding'] + + +class Capture: + # TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures + + def __init__(self, schema_df=None): + self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df} + + def __str__(self): + return self._ops_str() + + def _ops_str(self): + res = "" + for op in self.ctx['operations']: + if len(res) > 0: + res += "\n" + res += str(op) + return res + + def __getstate__(self): + # TODO(VitalyFedyunin): Currently can't pickle (why?) + self.ctx['schema_df'] = None + for var in self.ctx['variables']: + var.calculated_value = None + state = {} + for item in self.__dict__: + state[item] = getattr(self, item) + return state + + def __setstate__(self, state): + for k, v in state.items(): + setattr(self, k, v) + + def __getattr__(self, attrname): + if attrname == 'kwarg' or attrname == 'kwargs': + raise Exception('no kwargs!') + if attrname in ['__deepcopy__']: + raise AttributeError() + result = CaptureGetAttr(self, attrname, ctx=self.ctx) + return result + + def __getitem__(self, key): + return CaptureGetItem(self, key, ctx=self.ctx) + + def __setitem__(self, key, value): + self.ctx['operations'].append( + CaptureSetItem(self, key, value, ctx=self.ctx)) + + def __add__(self, add_val): + res = CaptureAdd(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __sub__(self, add_val): + res = CaptureSub(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __mul__(self, add_val): + res = CaptureMul(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx) + self.ctx['operations'].append(t) + return var + + def _is_context_empty(self): + return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0 + + def apply_ops_2(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + + @property + def columns(self): + self.apply_ops_2(self.ctx['schema_df']) + value = self.execute() + return value.columns + + # TODO(VitalyFedyunin): Add tests + # TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture + + def __call__(self, *args, **kwargs): + # TODO: Check if args or kwargs have more than one different context + if self._is_context_empty(): + # TODO: Allow CaptureA to take context from mock + for arg in args: + if isinstance(arg, Capture) and not arg._is_context_empty(): + self.ctx = arg.ctx + break + if self._is_context_empty(): + for k, v in kwargs.items(): + if isinstance(k, Capture) and not k._is_context_empty(): + self.ctx = k.ctx + break + if isinstance(v, Capture) and not v._is_context_empty(): + self.ctx = v.ctx + break + + res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs) + var = CaptureVariable(None, ctx=self.ctx) + t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res) + self.ctx['operations'].append(t) + return var + + +class CaptureF(Capture): + def __init__(self, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + + +class CaptureA(CaptureF): + def __str__(self): + return f"{self.kwargs['name']}" + + def execute(self): + value = self.kwargs['real_attribute'] + return value + + +class CaptureLikeMock: + def __init__(self, name): + import unittest.mock as mock + # TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead. + get_target, attribute = mock._get_target(name) # type: ignore[attr-defined] + self.get_target = get_target + self.attribute = attribute + self.name = name + + def __enter__(self): + self.save = getattr(self.get_target(), self.attribute) + capt = CaptureA(name=self.name, real_attribute=self.save) + setattr(self.get_target(), self.attribute, capt) + + def __exit__(self, *exc_info): + setattr(self.get_target(), self.attribute, self.save) + + +class CaptureCall(Capture): + + def __init__(self, callable, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + self.callable = callable + + def __str__(self): + return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs) + + def execute(self): + + # TODO: VitalyFedyunin execute kwargs and maybe nested structures + executed_args = [] + for arg in self.kwargs['args']: + if isinstance(arg, Capture): + executed_args.append(arg.execute()) + else: + executed_args.append(arg) + left = get_val(self.callable) + return left(*executed_args, **self.kwargs['kwargs']) + + +class CaptureVariableAssign(CaptureF): + def __str__(self): + variable = self.kwargs['variable'] + value = self.kwargs['value'] + return f"{variable} = {value}" + + def execute(self): + self.kwargs['variable'].calculated_value = self.kwargs['value'].execute() + + +class CaptureVariable(Capture): + # TODO(VitalyFedyunin): This should be atomic and thread safe + names_idx = 0 + + def __init__(self, value, ctx): + if CaptureControl.disabled: + raise Exception('Attempting to create capture variable with capture off') + self.ctx = ctx + self.value = value + self.name = f'var_{CaptureVariable.names_idx}' + CaptureVariable.names_idx += 1 + self.ctx['variables'].append(self) + + def __str__(self): + return self.name + + def execute(self): + return self.calculated_value + + def apply_ops(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + return self.calculated_value + + +class CaptureGetItem(Capture): + def __init__(self, left, key, ctx): + self.ctx = ctx + self.left = left + self.key = key + + def __str__(self): + return f"{self.left}[{get_val(self.key)}]" + + def execute(self): + left = self.left.execute() + return left[self.key] + + +class CaptureSetItem(Capture): + def __init__(self, left, key, value, ctx): + self.ctx = ctx + self.left = left + self.key = key + self.value = value + + def __str__(self): + return f"{self.left}[{get_val(self.key)}] = {self.value}" + + def execute(self): + left = self.left.execute() + value = self.value.execute() + left[self.key] = value + + +class CaptureAdd(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} + {self.right}" + + def execute(self): + return get_val(self.left) + get_val(self.right) + + +class CaptureMul(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} * {self.right}" + + def execute(self): + return get_val(self.left) * get_val(self.right) + + +class CaptureSub(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} - {self.right}" + + def execute(self): + return get_val(self.left) - get_val(self.right) + + +class CaptureGetAttr(Capture): + def __init__(self, src, name, ctx): + self.ctx = ctx + self.src = src + self.name = name + + def __str__(self): + return f"{self.src}.{self.name}" + + def execute(self): + val = get_val(self.src) + return getattr(val, self.name) + + +def get_val(capture): + if isinstance(capture, Capture): + return capture.execute() + elif isinstance(capture, str): + return f'"{capture}"' + else: + return capture + + +class CaptureInitial(CaptureVariable): + def __init__(self, schema_df=None): + new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df} + super().__init__(None, new_ctx) + self.name = f'input_{self.name}' + + +class CaptureDataFrame(CaptureInitial): + pass + + +class CaptureDataFrameWithDataPipeOps(CaptureDataFrame): + def as_datapipe(self): + return DataFrameTracedOps( + self.ctx['variables'][0].source_datapipe, self) + + def raw_iterator(self): + return self.as_datapipe().__iter__() + + def __iter__(self): + return iter(self._dataframes_as_tuples()) + + def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF): + dp = self._dataframes_per_row()._dataframes_concat(batch_size) + dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class) + dp._dp_contains_dataframe = True + return dp + + def groupby(self, + group_key_fn, + *, + buffer_size=10000, + group_size=None, + guaranteed_group_size=None, + drop_remaining=False): + dp = self._dataframes_per_row() + dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size, + guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining) + return dp + + def shuffle(self, *args, **kwargs): + return self._dataframes_shuffle(*args, **kwargs) + + def filter(self, *args, **kwargs): + return self._dataframes_filter(*args, **kwargs) + + def collate(self, *args, **kwargs): + raise Exception("Can't collate unbatched DataFrames stream") + + def __getattr__(self, attrname): # ? + if attrname in UNIMPLEMENTED_ATTR: + raise AttributeError('Attempting to get ', attrname) + if attrname in DATAPIPES_OPS: + return (self.as_datapipe()).__getattr__(attrname) + return super().__getattr__(attrname) + + +@functional_datapipe('trace_as_dataframe') +class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc] + source_datapipe: Optional[Any] = None + + # TODO(VitalyFedyunin): Must implement all special functions of datapipes + + def set_shuffle_settings(self, *args, **kwargs): + pass + + def is_shardable(self): + return False + + def __init__(self, source_datapipe, schema_df=None): + self.source_datapipe = source_datapipe + if schema_df is None: + schema_df = next(iter(self.source_datapipe)) + super().__init__(schema_df=schema_df) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py new file mode 100644 index 0000000000000000000000000000000000000000..a75cc5c7a7c210d67cbc6291dcf892576669eb2a --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py @@ -0,0 +1,131 @@ +import random + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper + +__all__ = [ + "ConcatDataFramesPipe", + "DataFramesAsTuplesPipe", + "ExampleAggregateAsDataFrames", + "FilterDataFramesPipe", + "PerRowDataFramesPipe", + "ShuffleDataFramesPipe", +] + + +@functional_datapipe('_dataframes_as_tuples') +class DataFramesAsTuplesPipe(IterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # for record in df.to_records(index=False): + yield from df_wrapper.iterate(df) + + +@functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True) +class PerRowDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup + for i in range(len(df)): + yield df[i:i + 1] + + +@functional_datapipe('_dataframes_concat', enable_df_api_tracing=True) +class ConcatDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, batch=3): + self.source_datapipe = source_datapipe + self.n_batch = batch + + def __iter__(self): + buffer = [] + for df in self.source_datapipe: + buffer.append(df) + if len(buffer) == self.n_batch: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True) +class ShuffleDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + size = None + all_buffer = [] + for df in self.source_datapipe: + if size is None: + size = df_wrapper.get_len(df) + for i in range(df_wrapper.get_len(df)): + all_buffer.append(df_wrapper.get_item(df, i)) + random.shuffle(all_buffer) + buffer = [] + for df in all_buffer: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_filter', enable_df_api_tracing=True) +class FilterDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, filter_fn): + self.source_datapipe = source_datapipe + self.filter_fn = filter_fn + + def __iter__(self): + size = None + all_buffer = [] + filter_res = [] + for df in self.source_datapipe: + if size is None: + size = len(df.index) + for i in range(len(df.index)): + all_buffer.append(df[i:i + 1]) + filter_res.append(self.filter_fn(df.iloc[i])) + + buffer = [] + for df, res in zip(all_buffer, filter_res): + if res: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True) +class ExampleAggregateAsDataFrames(DFIterDataPipe): + def __init__(self, source_datapipe, dataframe_size=10, columns=None): + self.source_datapipe = source_datapipe + self.columns = columns + self.dataframe_size = dataframe_size + + def _as_list(self, item): + try: + return list(item) + except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception + return [item] + + def __iter__(self): + aggregate = [] + for item in self.source_datapipe: + aggregate.append(self._as_list(item)) + if len(aggregate) == self.dataframe_size: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) + aggregate = [] + if len(aggregate) > 0: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi new file mode 100644 index 0000000000000000000000000000000000000000..34e80bcb95f5e487c8fd7c9e8dcb01db56307bc5 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi @@ -0,0 +1,689 @@ +# This base template ("datapipe.pyi.in") is generated from mypy stubgen with minimal editing for code injection +# The output file will be "datapipe.pyi". This is executed as part of torch/CMakeLists.txt +# Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other +# classes/objects here, even though we are not injecting extra code into them at the moment. + +from typing import Any, Callable, Dict, Generic, Iterator, List, Literal, Optional, TypeVar, Union + +from torch.utils.data import Dataset, default_collate, IterableDataset +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +UNTRACABLE_DATAFRAME_PIPES: Any + +class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherMapDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> MapDataPipe: + r""" + Create mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, + or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> batch_dp = dp.batch(batch_size=2) + >>> list(batch_dp) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + """ + + # Functional form of 'ConcaterMapDataPipe' + def concat(self, *datapipes: MapDataPipe) -> MapDataPipe: + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + # Functional form of 'MapperMapDataPipe' + def map(self, fn: Callable= ...) -> MapDataPipe: + r""" + Apply the input function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source MapDataPipe + fn: Function being applied to each item + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, indices: Optional[List] = None) -> IterDataPipe: + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + # Functional form of 'ZipperMapDataPipe' + def zip(self, *datapipes: MapDataPipe[T_co]) -> MapDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + +class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + _number_of_samples_yielded: int = ... + _snapshot_state: _SnapshotState = _SnapshotState.Iterating + _fast_forward_iterator: Optional[Iterator] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + enable_df_api_tracing: bool = ..., + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherIterDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> IterDataPipe: + r""" + Creates mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the + last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding, + defaults to ``DataChunk`` + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> dp = dp.batch(batch_size=3, drop_last=True) + >>> list(dp) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + # Functional form of 'CollatorIterDataPipe' + def collate(self, conversion: Optional[Union[Callable[..., Any],Dict[Union[str, Any], Union[Callable, Any]],]] = default_collate, collate_fn: Optional[Callable] = None) -> IterDataPipe: + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + # Functional form of 'ConcaterIterDataPipe' + def concat(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Concatenates multiple Iterable DataPipes (functional name: ``concat``). + + The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. + + Args: + datapipes: Iterable DataPipes being concatenated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> import random + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1 = IterableWrapper(range(3)) + >>> dp2 = IterableWrapper(range(5)) + >>> list(dp1.concat(dp2)) + [0, 1, 2, 0, 1, 2, 3, 4] + """ + + # Functional form of 'DemultiplexerIterDataPipe' + def demux(self, num_instances: int, classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000) -> List[IterDataPipe]: + r""" + Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``). + + A list of the child DataPipes is returned from this operation. + + Args: + datapipe: Iterable DataPipe being filtered + num_instances: number of instances of the DataPipe to create + classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None`` + drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None`` + buffer_size: this defines the maximum number of inputs that the buffer can hold across all child + DataPipes while waiting for their values to be yielded. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + + Examples: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def odd_or_even(n): + ... return n % 2 + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even) + >>> list(dp1) + [0, 2, 4] + >>> list(dp2) + [1, 3] + >>> # It can also filter out any element that gets `None` from the `classifier_fn` + >>> def odd_or_even_no_zero(n): + ... return n % 2 if n != 0 else None + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) + >>> list(dp1) + [2, 4] + >>> list(dp2) + [1, 3] + """ + + # Functional form of 'FilterIterDataPipe' + def filter(self, filter_fn: Callable, input_col=None) -> IterDataPipe: + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + # Functional form of 'ForkerIterDataPipe' + def fork(self, num_instances: int, buffer_size: int = 1000, copy: Optional[Literal["shallow", "deep"]] = None) -> List[IterDataPipe]: + r""" + Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``). + + Args: + datapipe: Iterable DataPipe being copied + num_instances: number of instances of the datapipe to create + buffer_size: this restricts how far ahead the leading child DataPipe + can read relative to the slowest child DataPipe. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + copy: copy strategy to use for items yielded by each branch. Supported + options are ``None`` for no copying, ``"shallow"`` for shallow object + copies, and ``"deep"`` for deep object copies. Defaults to ``None``. + + Note: + All branches of the forked pipeline return the identical object unless + the copy parameter is supplied. If the object is mutable or contains + mutable objects, changing them in one branch will affect all others. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.fork(num_instances=2) + >>> list(dp1) + [0, 1, 2, 3, 4] + >>> list(dp2) + [0, 1, 2, 3, 4] + """ + + # Functional form of 'GrouperIterDataPipe' + def groupby(self, group_key_fn: Callable[[T_co], Any], *, keep_key: bool = False, buffer_size: int = 10000, group_size: Optional[int] = None, guaranteed_group_size: Optional[int] = None, drop_remaining: bool = False) -> IterDataPipe: + r""" + Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``. + + (functional name: ``groupby``). + + The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group + will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full, + the DataPipe will yield the largest batch with the same key, provided that its size is larger + than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``. + + After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity + will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``. + + Args: + datapipe: Iterable datapipe to be grouped + group_key_fn: Function used to generate group key from the data of the source datapipe + keep_key: Option to yield the matching key along with the items in a tuple, + resulting in `(key, [items])` otherwise returning [items] + buffer_size: The size of buffer for ungrouped data + group_size: The max size of each group, a batch is yielded as soon as it reaches this size + guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full + drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer + when the buffer is full + + Example: + >>> import os + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def group_fn(file): + ... return os.path.basename(file).split(".")[0] + >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"]) + >>> dp0 = source_dp.groupby(group_key_fn=group_fn) + >>> list(dp0) + [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']] + >>> # A group is yielded as soon as its size equals to `group_size` + >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2) + >>> list(dp1) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size` + >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2) + >>> list(dp2) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + """ + + # Functional form of 'FileListerIterDataPipe' + def list_files(self, masks: Union[str, List[str]] = '', *, recursive: bool = False, abspath: bool = False, non_deterministic: bool = False, length: int = -1) -> IterDataPipe: + r""" + Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. + + Multiple root directories can be provided (functional name: ``list_files``). + + Args: + root: Root directory or a sequence of root directories + masks: Unix style filter string or string list for filtering file name(s) + recursive: Whether to return pathname from nested directories or not + abspath: Whether to return relative pathname or absolute pathname + non_deterministic: Whether to return pathname in sorted order or not. + If ``False``, the results yielded from each root directory will be sorted + length: Nominal length of the datapipe + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister + >>> dp = FileLister(root=".", recursive=True) + >>> list(dp) + ['example.py', './data/data.tar'] + """ + + # Functional form of 'MapperIterDataPipe' + def map(self, fn: Callable, input_col=None, output_col=None) -> IterDataPipe: + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'MultiplexerIterDataPipe' + def mux(self, *datapipes) -> IterDataPipe: + r""" + Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). + + As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, + and so on. It ends when the shortest input DataPipe is exhausted. + + Args: + datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.mux(dp2, dp3)) + [0, 10, 20, 1, 11, 21, 2, 12, 22] + """ + + # Functional form of 'FileOpenerIterDataPipe' + def open_files(self, mode: str = 'r', encoding: Optional[str] = None, length: int = -1) -> IterDataPipe: + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + # Functional form of 'StreamReaderIterDataPipe' + def read_from_stream(self, chunk=None) -> IterDataPipe: + r""" + Given IO streams and their label names, yield bytes with label name as tuple. + + (functional name: ``read_from_stream``). + + Args: + datapipe: Iterable DataPipe provides label/URL and byte stream + chunk: Number of bytes to be read from stream per iteration. + If ``None``, all bytes will be read until the EOF. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader + >>> from io import StringIO + >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))]) + >>> list(StreamReader(dp, chunk=1)) + [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')] + """ + + # Functional form of 'RoutedDecoderIterDataPipe' + def routed_decode(self, *handlers: Callable, key_fn: Callable= ...) -> IterDataPipe: + r""" + Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. + + (functional name: ``routed_decode``) + + Args: + datapipe: Iterable datapipe that provides pathname and binary stream in tuples + handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder + handlers will be set as default. If multiple handles are provided, the priority + order follows the order of handlers (the first handler has the top priority) + key_fn: Function for decoder to extract key from pathname to dispatch handlers. + Default is set to extract file extension from pathname + + Note: + When ``key_fn`` is specified returning anything other than extension, the default + handler will not work and users need to specify custom handler. Custom handler + could use regex to determine the eligibility to handle data. + """ + + # Functional form of 'ShardingFilterIterDataPipe' + def sharding_filter(self, sharding_group_filter=None) -> IterDataPipe: + r""" + Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). + + After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the + original DataPipe, where `n` equals to the number of instances. + + Args: + source_datapipe: Iterable DataPipe that will be sharded + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, buffer_size: int = 10000, unbatch_level: int = 0) -> IterDataPipe: + r""" + Shuffle the input DataPipe with a buffer (functional name: ``shuffle``). + + The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then, + each item will be yielded from the buffer by reservoir sampling via iterator. + + ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the + datapipe is not shuffled. In order to fully shuffle all elements from datapipe, + ``buffer_size`` is required to be greater than or equal to the size of datapipe. + + When it is used with :class:`torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed + for each worker process. + + Args: + datapipe: The IterDataPipe being shuffled + buffer_size: The buffer size for shuffling (default to ``10000``) + unbatch_level: Specifies if it is necessary to unbatch source data before + applying the shuffle + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> shuffle_dp = dp.shuffle() + >>> list(shuffle_dp) + [0, 4, 1, 6, 3, 2, 9, 5, 7, 8] + """ + + # Functional form of 'UnBatcherIterDataPipe' + def unbatch(self, unbatch_level: int = 1) -> IterDataPipe: + r""" + Undos batching of data (functional name: ``unbatch``). + + In other words, it flattens the data up to the specified level within a batched DataPipe. + + Args: + datapipe: Iterable DataPipe being un-batched + unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``, + it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) + >>> dp1 = source_dp.unbatch() + >>> list(dp1) + [[0, 1], [2], [3, 4], [5], [6]] + >>> dp2 = source_dp.unbatch(unbatch_level=2) + >>> list(dp2) + [0, 1, 2, 3, 4, 5, 6] + """ + + # Functional form of 'ZipperIterDataPipe' + def zip(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + The output is stopped as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Iterable DataPipes being aggregated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.zip(dp2, dp3)) + [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)] + """ + + +class DFIterDataPipe(IterDataPipe): + def _is_dfpipe(self): ... + def __iter__(self): ... + +class _DataPipeSerializationWrapper: + def __init__(self, datapipe): ... + def __getstate__(self): ... + def __setstate__(self, state): ... + def __len__(self): ... + +class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): + def __iter__(self): ... + +class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): + def __getitem__(self, idx): ... + +class DataChunk(list, Generic[T]): + def __init__(self, items): + super().__init__(items) + self.items = items + def as_str(self, indent: str = "") -> str: + res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" + return res + def __iter__(self) -> Iterator[T]: + yield from super().__iter__() + def raw_iterator(self) -> T: # type: ignore[misc] + yield from self.items diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a775f0be8753677f8255e1201dc8d70649172baf --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py @@ -0,0 +1,64 @@ +from torch.utils.data.datapipes.iter.utils import ( + IterableWrapperIterDataPipe as IterableWrapper, +) +from torch.utils.data.datapipes.iter.callable import ( + CollatorIterDataPipe as Collator, + MapperIterDataPipe as Mapper, +) +from torch.utils.data.datapipes.iter.combinatorics import ( + SamplerIterDataPipe as Sampler, + ShufflerIterDataPipe as Shuffler, +) +from torch.utils.data.datapipes.iter.combining import ( + ConcaterIterDataPipe as Concater, + DemultiplexerIterDataPipe as Demultiplexer, + ForkerIterDataPipe as Forker, + MultiplexerIterDataPipe as Multiplexer, + ZipperIterDataPipe as Zipper, +) +from torch.utils.data.datapipes.iter.filelister import ( + FileListerIterDataPipe as FileLister, +) +from torch.utils.data.datapipes.iter.fileopener import ( + FileOpenerIterDataPipe as FileOpener, +) +from torch.utils.data.datapipes.iter.grouping import ( + BatcherIterDataPipe as Batcher, + GrouperIterDataPipe as Grouper, + UnBatcherIterDataPipe as UnBatcher, +) +from torch.utils.data.datapipes.iter.sharding import ( + ShardingFilterIterDataPipe as ShardingFilter, +) +from torch.utils.data.datapipes.iter.routeddecoder import ( + RoutedDecoderIterDataPipe as RoutedDecoder, +) +from torch.utils.data.datapipes.iter.selecting import ( + FilterIterDataPipe as Filter, +) +from torch.utils.data.datapipes.iter.streamreader import ( + StreamReaderIterDataPipe as StreamReader, +) + +__all__ = ['Batcher', + 'Collator', + 'Concater', + 'Demultiplexer', + 'FileLister', + 'FileOpener', + 'Filter', + 'Forker', + 'Grouper', + 'IterableWrapper', + 'Mapper', + 'Multiplexer', + 'RoutedDecoder', + 'Sampler', + 'ShardingFilter', + 'Shuffler', + 'StreamReader', + 'UnBatcher', + 'Zipper'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ec3133d3896c7f50dfc53569192c6ff48d2d8b0 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ecccab77fc65bde7b353967deec70b6bc4b18fe Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py new file mode 100644 index 0000000000000000000000000000000000000000..48875e40a68d111042e21d086cf895b24b6e0474 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py @@ -0,0 +1,237 @@ +import functools +from collections import namedtuple + +from typing import Callable, Iterator, Sized, TypeVar, Optional, Union, Any, Dict, List + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data._utils.collate import default_collate +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import (_check_unpickable_fn, + validate_input_col) + +__all__ = [ + "CollatorIterDataPipe", + "MapperIterDataPipe", +] + +T_co = TypeVar("T_co", covariant=True) + + +@functional_datapipe("map") +class MapperIterDataPipe(IterDataPipe[T_co]): + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + datapipe: IterDataPipe + fn: Callable + + def __init__( + self, + datapipe: IterDataPipe, + fn: Callable, + input_col=None, + output_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(fn) + self.fn = fn # type: ignore[assignment] + + self.input_col = input_col + if input_col is None and output_col is not None: + raise ValueError("`output_col` must be None when `input_col` is None.") + if isinstance(output_col, (list, tuple)): + if len(output_col) > 1: + raise ValueError("`output_col` must be a single-element list or tuple") + output_col = output_col[0] + self.output_col = output_col + validate_input_col(fn, input_col) + + def _apply_fn(self, data): + if self.input_col is None and self.output_col is None: + return self.fn(data) + + if self.input_col is None: + res = self.fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + res = self.fn(*args) + else: + res = self.fn(data[self.input_col]) + + # Copy tuple to list and run in-place modification because tuple is immutable. + if isinstance(data, tuple): + t_flag = True + data = list(data) + else: + t_flag = False + + if self.output_col is None: + if isinstance(self.input_col, (list, tuple)): + data[self.input_col[0]] = res + for idx in sorted(self.input_col[1:], reverse=True): + del data[idx] + else: + data[self.input_col] = res + else: + if self.output_col == -1: + data.append(res) + else: + data[self.output_col] = res + + # Convert list back to tuple + return tuple(data) if t_flag else data + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + yield self._apply_fn(data) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError( + f"{type(self).__name__} instance doesn't have valid length" + ) + + +def _collate_helper(conversion, item): + # TODO(VitalyFedyunin): Verify that item is any sort of batch + if len(item.items) > 1: + # TODO(VitalyFedyunin): Compact all batch dataframes into one + raise Exception("Only supports one DataFrame per batch") + df = item[0] + columns_name = df_wrapper.get_columns(df) + tuple_names: List = [] + tuple_values: List = [] + + for name in conversion.keys(): + if name not in columns_name: + raise Exception("Conversion keys missmatch") + + for name in columns_name: + if name in conversion: + if not callable(conversion[name]): + raise Exception('Collate (DF)DataPipe requires callable as dict values') + collation_fn = conversion[name] + else: + # TODO(VitalyFedyunin): Add default collation into df_wrapper + try: + import torcharrow.pytorch as tap # type: ignore[import] + collation_fn = tap.rec.Default() + except Exception as e: + raise Exception("unable to import default collation function from the TorchArrow") from e + + tuple_names.append(str(name)) + value = collation_fn(df[name]) + tuple_values.append(value) + + # TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here + # TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty + tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc] + tuple = tpl_cls(*tuple_values) + return tuple + + +@functional_datapipe("collate") +class CollatorIterDataPipe(MapperIterDataPipe): + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + def __init__( + self, + datapipe: IterDataPipe, + conversion: Optional[ + Union[ + Callable[..., Any], + Dict[Union[str, Any], Union[Callable, Any]], + ] + ] = default_collate, + collate_fn: Optional[Callable] = None, + ) -> None: + # TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]` + # TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]` + if collate_fn is not None: + super().__init__(datapipe, fn=collate_fn) + else: + if callable(conversion): + super().__init__(datapipe, fn=conversion) + else: + # TODO(VitalyFedyunin): Validate passed dictionary + collate_fn = functools.partial(_collate_helper, conversion) + super().__init__(datapipe, fn=collate_fn) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4365516a33f3f9bc3a3877db09a78bc72a6289 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py @@ -0,0 +1,639 @@ +import warnings + +from abc import ABC, abstractmethod +from collections import deque +import copy as copymodule +from typing import Any, Callable, Iterator, List, Literal, Optional, Sized, Tuple, TypeVar, Deque + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import StreamWrapper, _check_unpickable_fn + +__all__ = [ + "ConcaterIterDataPipe", + "DemultiplexerIterDataPipe", + "ForkerIterDataPipe", + "MultiplexerIterDataPipe", + "ZipperIterDataPipe", +] + +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('concat') +class ConcaterIterDataPipe(IterDataPipe): + r""" + Concatenates multiple Iterable DataPipes (functional name: ``concat``). + + The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. + + Args: + datapipes: Iterable DataPipes being concatenated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> import random + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1 = IterableWrapper(range(3)) + >>> dp2 = IterableWrapper(range(5)) + >>> list(dp1.concat(dp2)) + [0, 1, 2, 0, 1, 2, 3, 4] + """ + + datapipes: Tuple[IterDataPipe] + + def __init__(self, *datapipes: IterDataPipe): + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, IterDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `IterDataPipe`") + self.datapipes = datapipes # type: ignore[assignment] + + def __iter__(self) -> Iterator: + for dp in self.datapipes: + yield from dp + + def __len__(self) -> int: + if all(isinstance(dp, Sized) for dp in self.datapipes): + return sum(len(dp) for dp in self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + +@functional_datapipe('fork') +class ForkerIterDataPipe(IterDataPipe): + r""" + Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``). + + Args: + datapipe: Iterable DataPipe being copied + num_instances: number of instances of the datapipe to create + buffer_size: this restricts how far ahead the leading child DataPipe + can read relative to the slowest child DataPipe. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + copy: copy strategy to use for items yielded by each branch. Supported + options are ``None`` for no copying, ``"shallow"`` for shallow object + copies, and ``"deep"`` for deep object copies. Defaults to ``None``. + + Note: + All branches of the forked pipeline return the identical object unless + the copy parameter is supplied. If the object is mutable or contains + mutable objects, changing them in one branch will affect all others. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.fork(num_instances=2) + >>> list(dp1) + [0, 1, 2, 3, 4] + >>> list(dp2) + [0, 1, 2, 3, 4] + """ + + def __new__( + cls, + datapipe: IterDataPipe, + num_instances: int, + buffer_size: int = 1000, + copy: Optional[Literal["shallow", "deep"]] = None + ): + if num_instances < 1: + raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found") + if num_instances == 1: + return datapipe + container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy) # type: ignore[abstract] + return [_ChildDataPipe(container, i) for i in range(num_instances)] + + +class _ContainerTemplate(ABC): + r"""Abstract class for container ``DataPipes``. The followings are three required methods.""" + + @abstractmethod + def get_next_element_by_instance(self, instance_id: int): + ... + + @abstractmethod + def is_every_instance_exhausted(self) -> bool: + ... + + @abstractmethod + def reset(self) -> None: + ... + + @abstractmethod + def get_length_by_instance(self, instance_id: int): + r"""Raise TypeError if it's not supposed to be implemented to support `list(datapipe)`.""" + + +def _no_op(x): + return x + + +class _ForkerIterDataPipe(IterDataPipe, _ContainerTemplate): + r""" + Container to hold instance-specific information on behalf of ForkerIterDataPipe. + + It tracks the state of its child DataPipes, maintains the buffer, and yields the next value + as requested by the child DataPipes. + """ + + def __init__( + self, + datapipe: IterDataPipe, + num_instances: int, + buffer_size: int = 1000, + copy: Optional[Literal["shallow", "deep"]] = None + ): + self.main_datapipe = datapipe + self._datapipe_iterator: Optional[Iterator[Any]] = None + self.num_instances = num_instances + self.buffer: Deque = deque() + self.buffer_size = buffer_size + if self.buffer_size < 0: + warnings.warn( + "Unlimited buffer size is set for `fork`, " + "please be aware of OOM at random places", + UserWarning + ) + if copy is None: + self.copy_fn = _no_op + elif copy == "shallow": + self.copy_fn = copymodule.copy + elif copy == "deep": + self.copy_fn = copymodule.deepcopy + else: + raise ValueError(f"Unknown copy method `{copy}` requested, choose one of None, `shallow` or `deep`.") + + self.child_pointers: List[int] = [0] * num_instances # Indicate the indices of the next element to get + self.slowest_ptr = 0 # The index to read by the slowest child + self.leading_ptr = 0 # The index to read by the fastest child + self.end_ptr: Optional[int] = None # The index to stop child + self._child_stop: List[bool] = [True for _ in range(num_instances)] + + def __len__(self): + return len(self.main_datapipe) + + def get_next_element_by_instance(self, instance_id: int): + if self._datapipe_iterator is None and self._child_stop[instance_id]: + self._datapipe_iterator = iter(self.main_datapipe) + self._snapshot_state = _SnapshotState.Iterating + for i in range(self.num_instances): + self._child_stop[i] = False + try: + while not self._child_stop[instance_id]: + self.child_pointers[instance_id] += 1 + if self.end_ptr is not None and self.child_pointers[instance_id] == self.end_ptr: + self._child_stop[instance_id] = True + break + # Use buffer + if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr: + idx = self.child_pointers[instance_id] - self.slowest_ptr - 1 + return_val = self.buffer[idx] + else: # Retrieve one element from main datapipe + self.leading_ptr = self.child_pointers[instance_id] + try: + return_val = next(self._datapipe_iterator) # type: ignore[arg-type] + self.buffer.append(return_val) + except StopIteration: + self._child_stop[instance_id] = True + self._datapipe_iterator = None + self.end_ptr = self.leading_ptr + continue + if self.child_pointers[instance_id] == self.slowest_ptr + 1: + new_min = min(self.child_pointers) # Can optimize by avoiding the call to min() + if self.slowest_ptr < new_min: + self.slowest_ptr = new_min + self.buffer.popleft() + if self.buffer_size >= 0 and self.leading_ptr > self.buffer_size + self.slowest_ptr: + raise BufferError("ForkerIterDataPipe buffer overflow," + + f"buffer size {self.buffer_size} is insufficient.") + + yield self.copy_fn(return_val) # type: ignore[possibly-undefined] + finally: + self._child_stop[instance_id] = True + # Cleanup _datapipe_iterator for the case that fork exits earlier + if all(self._child_stop): + self._datapipe_iterator = None + self._cleanup() + + def is_every_instance_exhausted(self) -> bool: + return self.end_ptr is not None and all(self._child_stop) + + def get_length_by_instance(self, instance_id: int) -> int: + return len(self.main_datapipe) + + def reset(self) -> None: + self._datapipe_iterator = None + self.buffer = deque() + self.child_pointers = [0] * self.num_instances + self.slowest_ptr = 0 + self.leading_ptr = 0 + self.end_ptr = None + self._child_stop = [True for _ in range(self.num_instances)] + + def __getstate__(self): + state = ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.copy_fn, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.copy_fn, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._datapipe_iterator = None + self.buffer = deque() + self.child_pointers = [0] * self.num_instances + self.slowest_ptr = 0 + self.leading_ptr = 0 + self.end_ptr = None + self._child_stop = [True for _ in range(self.num_instances)] + + def _cleanup(self): + while self.buffer: + d = self.buffer.popleft() + StreamWrapper.close_streams(d) + + def __del__(self): + self._cleanup() + + +class _ChildDataPipe(IterDataPipe): + r""" + Iterable Datapipe that is a child of a main DataPipe. + + The instance of this class will pass its instance_id to get the next value from its main DataPipe. + + Note: + ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint. + Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes, + the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe + hasn't had an iterator created from it since the last invalidation. See the example below. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> # Singler Iterator per IteraDataPipe Invalidation + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(10)) + >>> cdp1, cdp2 = source_dp.fork(num_instances=2) + >>> it1, it2 = iter(cdp1), iter(cdp2) + >>> it3 = iter(cdp1) + >>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`. + >>> it4 = iter(cdp2) + >>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since + >>> # the last invalidation. + + Args: + main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)' + instance_id: integer identifier of this instance + """ + + _is_child_datapipe: bool = True + + def __init__(self, main_datapipe: IterDataPipe, instance_id: int): + assert isinstance(main_datapipe, _ContainerTemplate) + + self.main_datapipe: IterDataPipe = main_datapipe + self.instance_id = instance_id + + def __iter__(self): + # Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator` + # We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called + return self.main_datapipe.get_next_element_by_instance(self.instance_id) + + def __len__(self): + return self.main_datapipe.get_length_by_instance(self.instance_id) + + # This method is called by `hook_iterator` in `_typing.py`. + def _set_main_datapipe_valid_iterator_id(self) -> int: + r""" + Update the valid iterator ID for both this DataPipe object and `main_datapipe`. + + `main_datapipe.reset()` is called when the ID is incremented to a new generation. + """ + # 1. First time any child iterator is created + if self.main_datapipe._valid_iterator_id is None: + self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined] + # 2. This instance was already in the same generation as `main_datapipe`, + # we need to increment the ID further by 1 + elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type] + self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined] + # Whenever a new generation of iterator is created, the `main_datapipe` must reset + if not self.main_datapipe.is_every_instance_exhausted(): + warnings.warn("Some child DataPipes are not exhausted when __iter__ is called. We are resetting " + "the buffer and each child DataPipe will read from the start again.", UserWarning) + self.main_datapipe.reset() + # 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting + # the instance's iterator to match that of `main_datapipe` + self._valid_iterator_id = self.main_datapipe._valid_iterator_id + return self._valid_iterator_id + + # This method is called by `hook_iterator` in `_typing.py`. + def _check_valid_iterator_id(self, iterator_id) -> bool: + r"""Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`.""" + return iterator_id == self._valid_iterator_id and iterator_id == self.main_datapipe._valid_iterator_id + + +@functional_datapipe('demux') +class DemultiplexerIterDataPipe(IterDataPipe): + r""" + Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``). + + A list of the child DataPipes is returned from this operation. + + Args: + datapipe: Iterable DataPipe being filtered + num_instances: number of instances of the DataPipe to create + classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None`` + drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None`` + buffer_size: this defines the maximum number of inputs that the buffer can hold across all child + DataPipes while waiting for their values to be yielded. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + + Examples: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def odd_or_even(n): + ... return n % 2 + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even) + >>> list(dp1) + [0, 2, 4] + >>> list(dp2) + [1, 3] + >>> # It can also filter out any element that gets `None` from the `classifier_fn` + >>> def odd_or_even_no_zero(n): + ... return n % 2 if n != 0 else None + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) + >>> list(dp1) + [2, 4] + >>> list(dp2) + [1, 3] + """ + + def __new__(cls, datapipe: IterDataPipe, num_instances: int, + classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000): + if num_instances < 1: + raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found") + + _check_unpickable_fn(classifier_fn) + + # When num_instances == 1, demux can be replaced by filter, + # but keep it as Demultiplexer for the sake of consistency + # like throwing Error when classification result is out of o range + container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size) # type: ignore[abstract] + return [_ChildDataPipe(container, i) for i in range(num_instances)] + + +class _DemultiplexerIterDataPipe(IterDataPipe, _ContainerTemplate): + r""" + Container to hold instance-specific information on behalf of DemultiplexerIterDataPipe. + + It tracks the state of its child DataPipes, maintains the buffer, classifies and yields the next correct value + as requested by the child DataPipes. + """ + + def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int, + classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool, buffer_size: int): + self.main_datapipe = datapipe + self._datapipe_iterator: Optional[Iterator[Any]] = None + self.num_instances = num_instances + self.buffer_size = buffer_size + if self.buffer_size < 0: + warnings.warn( + "Unlimited buffer size is set for `demux`, " + "please be aware of OOM at random places", + UserWarning + ) + self.current_buffer_usage = 0 + self.child_buffers: List[Deque[T_co]] = [deque() for _ in range(num_instances)] + self.classifier_fn = classifier_fn + self.drop_none = drop_none + self.main_datapipe_exhausted = False + self._child_stop: List[bool] = [True for _ in range(num_instances)] + + def _find_next(self, instance_id: int) -> T_co: # type: ignore[type-var] + while True: + if self.main_datapipe_exhausted or self._child_stop[instance_id]: + raise StopIteration + if self._datapipe_iterator is None: + raise ValueError( + "_datapipe_iterator has not been set, likely because this private method is called directly " + "without invoking get_next_element_by_instance() first.") + value = next(self._datapipe_iterator) + classification = self.classifier_fn(value) + if classification is None and self.drop_none: + StreamWrapper.close_streams(value) + continue + if classification is None or classification >= self.num_instances or classification < 0: + raise ValueError(f"Output of the classification fn should be between 0 and {self.num_instances - 1}. " + + f"{classification} is returned.") + if classification == instance_id: + return value + self.child_buffers[classification].append(value) + self.current_buffer_usage += 1 + if self.buffer_size >= 0 and self.current_buffer_usage > self.buffer_size: + raise BufferError( + f"DemultiplexerIterDataPipe buffer overflow, buffer size {self.buffer_size} is insufficient.") + + def get_next_element_by_instance(self, instance_id: int): + if self._datapipe_iterator is None and self._child_stop[instance_id]: + self._datapipe_iterator = iter(self.main_datapipe) + self._snapshot_state = _SnapshotState.Iterating # This is necessary for the DataPipe to reset properly. + self.main_datapipe_exhausted = False + for i in range(self.num_instances): + self._child_stop[i] = False + + try: + while not self._child_stop[instance_id]: + if self.child_buffers[instance_id]: + self.current_buffer_usage -= 1 + yield self.child_buffers[instance_id].popleft() + else: + try: + yield self._find_next(instance_id) + except StopIteration: + self._child_stop[instance_id] = True + self.main_datapipe_exhausted = True + self._datapipe_iterator = None + finally: + self._child_stop[instance_id] = True + # Cleanup _datapipe_iterator for the case that demux exits earlier + if all(self._child_stop): + self._datapipe_iterator = None + if self.child_buffers[instance_id]: + self._cleanup(instance_id) + + def is_every_instance_exhausted(self) -> bool: + return self.main_datapipe_exhausted and all(self._child_stop) + + def get_length_by_instance(self, instance_id: int) -> int: + raise TypeError + + def reset(self) -> None: + self._datapipe_iterator = None + self.current_buffer_usage = 0 + self.child_buffers = [deque() for _ in range(self.num_instances)] + self._child_stop = [True for _ in range(self.num_instances)] + self.main_datapipe_exhausted = False + + def __getstate__(self): + state = ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.classifier_fn, + self.drop_none, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.classifier_fn, + self.drop_none, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._datapipe_iterator = None + self.current_buffer_usage = 0 + self.child_buffers = [deque() for _ in range(self.num_instances)] + self._child_stop = [True for _ in range(self.num_instances)] + self.main_datapipe_exhausted = False + + def _cleanup(self, instance_id: Optional[int] = None): + ids = range(self.num_instances) if instance_id is None else [instance_id, ] + for i in ids: + q = self.child_buffers[i] + while q: + d = q.popleft() + StreamWrapper.close_streams(d) + + + def __del__(self): + self._cleanup() + + +@functional_datapipe('mux') +class MultiplexerIterDataPipe(IterDataPipe): + r""" + Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). + + As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, + and so on. It ends when the shortest input DataPipe is exhausted. + + Args: + datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.mux(dp2, dp3)) + [0, 10, 20, 1, 11, 21, 2, 12, 22] + """ + + def __init__(self, *datapipes): + self.datapipes = datapipes + self.buffer: List = [] # Store values to be yielded only when every iterator provides one + + def __iter__(self): + iterators = [iter(x) for x in self.datapipes] + while len(iterators): + for it in iterators: + try: + value = next(it) + self.buffer.append(value) + except StopIteration: + self.buffer.clear() + return + yield from self.buffer + self.buffer.clear() + + def __len__(self): + if all(isinstance(dp, Sized) for dp in self.datapipes): + return min(len(dp) for dp in self.datapipes) * len(self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + def reset(self) -> None: + self.buffer = [] + + def __getstate__(self): + state = ( + self.datapipes, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipes, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self.buffer = [] + + def __del__(self): + self.buffer.clear() + + +@functional_datapipe('zip') +class ZipperIterDataPipe(IterDataPipe[Tuple[T_co]]): + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + The output is stopped as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Iterable DataPipes being aggregated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.zip(dp2, dp3)) + [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)] + """ + + datapipes: Tuple[IterDataPipe] + + def __init__(self, *datapipes: IterDataPipe): + if not all(isinstance(dp, IterDataPipe) for dp in datapipes): + raise TypeError("All inputs are required to be `IterDataPipe` " + "for `ZipIterDataPipe`.") + super().__init__() + self.datapipes = datapipes # type: ignore[assignment] + + def __iter__(self) -> Iterator[Tuple[T_co]]: + iterators = [iter(datapipe) for datapipe in self.datapipes] + yield from zip(*iterators) + + def __len__(self) -> int: + if all(isinstance(dp, Sized) for dp in self.datapipes): + return min(len(dp) for dp in self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py new file mode 100644 index 0000000000000000000000000000000000000000..67e9797fe3356f9d0756de492eee3ca618f43fd3 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py @@ -0,0 +1,71 @@ +from io import IOBase +from typing import Iterable, Tuple, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames + +__all__ = [ + "FileOpenerIterDataPipe", +] + + +@functional_datapipe("open_files") +class FileOpenerIterDataPipe(IterDataPipe[Tuple[str, IOBase]]): + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + def __init__( + self, + datapipe: Iterable[str], + mode: str = 'r', + encoding: Optional[str] = None, + length: int = -1): + super().__init__() + self.datapipe: Iterable = datapipe + self.mode: str = mode + self.encoding: Optional[str] = encoding + + if self.mode not in ('b', 't', 'rb', 'rt', 'r'): + raise ValueError(f"Invalid mode {mode}") + # TODO: enforce typing for each instance based on mode, otherwise + # `argument_validation` with this DataPipe may be potentially broken + + if 'b' in mode and encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + + self.length: int = length + + # Remove annotation due to 'IOBase' is a general type and true type + # is determined at runtime based on mode. Some `DataPipe` requiring + # a subtype would cause mypy error. + def __iter__(self): + yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding) + + def __len__(self): + if self.length == -1: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + return self.length diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f1878365538362d8f870e4119798601d0d1173 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py @@ -0,0 +1,66 @@ +from io import BufferedIOBase +from typing import Any, Callable, Iterable, Iterator, Sized, Tuple + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import _deprecation_warning +from torch.utils.data.datapipes.utils.decoder import ( + Decoder, + basichandlers as decoder_basichandlers, + imagehandler as decoder_imagehandler, + extension_extract_fn +) + +__all__ = ["RoutedDecoderIterDataPipe", ] + + +@functional_datapipe('routed_decode') +class RoutedDecoderIterDataPipe(IterDataPipe[Tuple[str, Any]]): + r""" + Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. + + (functional name: ``routed_decode``) + + Args: + datapipe: Iterable datapipe that provides pathname and binary stream in tuples + handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder + handlers will be set as default. If multiple handles are provided, the priority + order follows the order of handlers (the first handler has the top priority) + key_fn: Function for decoder to extract key from pathname to dispatch handlers. + Default is set to extract file extension from pathname + + Note: + When ``key_fn`` is specified returning anything other than extension, the default + handler will not work and users need to specify custom handler. Custom handler + could use regex to determine the eligibility to handle data. + """ + + def __init__(self, + datapipe: Iterable[Tuple[str, BufferedIOBase]], + *handlers: Callable, + key_fn: Callable = extension_extract_fn) -> None: + super().__init__() + self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe + if not handlers: + handlers = (decoder_basichandlers, decoder_imagehandler('torch')) + self.decoder = Decoder(*handlers, key_fn=key_fn) + _deprecation_warning( + type(self).__name__, + deprecation_version="1.12", + removal_version="1.13", + old_functional_name="routed_decode", + ) + + def add_handler(self, *handler: Callable) -> None: + self.decoder.add_handler(*handler) + + def __iter__(self) -> Iterator[Tuple[str, Any]]: + for data in self.datapipe: + pathname = data[0] + result = self.decoder(data) + yield (pathname, result[pathname]) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py new file mode 100644 index 0000000000000000000000000000000000000000..fee74582e61bd613a60bf5eac7c7f5c3f60ca91f --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py @@ -0,0 +1,96 @@ +from typing import Callable, Iterator, Tuple, TypeVar + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.utils.common import ( + _check_unpickable_fn, + StreamWrapper, + validate_input_col +) + + +__all__ = ["FilterIterDataPipe", ] + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('filter') +class FilterIterDataPipe(IterDataPipe[T_co]): + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + datapipe: IterDataPipe[T_co] + filter_fn: Callable + + def __init__( + self, + datapipe: IterDataPipe[T_co], + filter_fn: Callable, + input_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(filter_fn) + self.filter_fn = filter_fn # type: ignore[assignment] + + self.input_col = input_col + validate_input_col(filter_fn, input_col) + + def _apply_filter_fn(self, data) -> bool: + if self.input_col is None: + return self.filter_fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + return self.filter_fn(*args) + else: + return self.filter_fn(data[self.input_col]) + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + condition, filtered = self._returnIfTrue(data) + if condition: + yield filtered + else: + StreamWrapper.close_streams(data) + + def _returnIfTrue(self, data: T) -> Tuple[bool, T]: + condition = self._apply_filter_fn(data) + + if df_wrapper.is_column(condition): + # We are operating on DataFrames filter here + result = [] + for idx, mask in enumerate(df_wrapper.iterate(condition)): + if mask: + result.append(df_wrapper.get_item(data, idx)) + if len(result): + return True, df_wrapper.concat(result) + else: + return False, None # type: ignore[return-value] + + if not isinstance(condition, bool): + raise ValueError("Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition)) + + return condition, data diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..0b25d6baf796aa4645b009451af1bbe7ab759c42 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py @@ -0,0 +1,84 @@ +from typing import ( + Dict, + Sized, + Tuple, +) + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from enum import IntEnum + +__all__ = [ + "SHARDING_PRIORITIES", + "ShardingFilterIterDataPipe", +] + + +class SHARDING_PRIORITIES(IntEnum): + DEFAULT = 1 + DISTRIBUTED = 2 + MULTIPROCESSING = 3 + + +class _ShardingIterDataPipe(IterDataPipe): + def apply_sharding(self, num_of_instances: int, instance_id: int, sharding_group: SHARDING_PRIORITIES): + raise NotImplementedError + + +@functional_datapipe('sharding_filter') +class ShardingFilterIterDataPipe(_ShardingIterDataPipe): + r""" + Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). + + After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the + original DataPipe, where `n` equals to the number of instances. + + Args: + source_datapipe: Iterable DataPipe that will be sharded + """ + + def __init__(self, source_datapipe: IterDataPipe, sharding_group_filter=None): + self.source_datapipe = source_datapipe + self.sharding_group_filter = sharding_group_filter + self.groups: Dict[int, Tuple[int, int]] = {} + self.num_of_instances = 1 + self.instance_id = 0 + self._update_num_of_instances() + + def apply_sharding(self, num_of_instances, instance_id, sharding_group=SHARDING_PRIORITIES.DEFAULT): + if instance_id >= num_of_instances: + raise ValueError(f"instance_id({instance_id}) should be smaller than num_of_instances({num_of_instances})") + if sharding_group == SHARDING_PRIORITIES.DEFAULT: + if len(self.groups) and SHARDING_PRIORITIES.DEFAULT not in self.groups: + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + else: + if SHARDING_PRIORITIES.DEFAULT in self.groups: + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + self.groups[sharding_group] = (num_of_instances, instance_id) + self._update_num_of_instances() + + def _update_num_of_instances(self): + sorted_sharding_groups = [] + for key in sorted(self.groups.keys()): + if self.sharding_group_filter is None or key == self.sharding_group_filter: + sorted_sharding_groups.append(self.groups[key]) + + sorted_sharding_groups.reverse() + + self.num_of_instances = 1 + self.instance_id = 0 + + for group_num_of_instances, group_instance_id in sorted_sharding_groups: + self.instance_id += self.num_of_instances * group_instance_id + self.num_of_instances *= group_num_of_instances + + def __iter__(self): + for i, item in enumerate(self.source_datapipe): + if i % self.num_of_instances == self.instance_id: + yield item + + def __len__(self): + if isinstance(self.source_datapipe, Sized): + return len(self.source_datapipe) // self.num_of_instances +\ + (1 if (self.instance_id < len(self.source_datapipe) % self.num_of_instances) else 0) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3794f7f0e77834bf5da0d21be8a2d00285eb07ed --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py @@ -0,0 +1,51 @@ +import copy +import warnings +from torch.utils.data.datapipes.datapipe import IterDataPipe + +__all__ = ["IterableWrapperIterDataPipe", ] + + +class IterableWrapperIterDataPipe(IterDataPipe): + r""" + Wraps an iterable object to create an IterDataPipe. + + Args: + iterable: Iterable object to be wrapped into an IterDataPipe + deepcopy: Option to deepcopy input iterable object for each + iterator. The copy is made when the first element is read in ``iter()``. + + .. note:: + If ``deepcopy`` is explicitly set to ``False``, users should ensure + that the data pipeline doesn't contain any in-place operations over + the iterable instance to prevent data inconsistency across iterations. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> list(dp) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + def __init__(self, iterable, deepcopy=True): + self.iterable = iterable + self.deepcopy = deepcopy + + def __iter__(self): + source_data = self.iterable + if self.deepcopy: + try: + source_data = copy.deepcopy(self.iterable) + # For the case that data cannot be deep-copied, + # all in-place operations will affect iterable variable. + # When this DataPipe is iterated second time, it will + # yield modified items. + except TypeError: + warnings.warn( + "The input iterable can not be deepcopied, " + "please be aware of in-place modification would affect source data." + ) + yield from source_data + + def __len__(self): + return len(self.iterable) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py new file mode 100644 index 0000000000000000000000000000000000000000..c21d532d4925d59296d2f111c55a6755b4ae9101 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py @@ -0,0 +1,126 @@ +import random + +import torch +from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe +from typing import Iterator, List, Optional, TypeVar + +__all__ = ["ShufflerIterDataPipe", ] + + +T_co = TypeVar('T_co', covariant=True) + + +# @functional_datapipe('shuffle') +class ShufflerIterDataPipe(IterDataPipe[T_co]): + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + datapipe: MapDataPipe[T_co] + _enabled: bool + _seed: Optional[int] + _rng: random.Random + + def __init__(self, + datapipe: MapDataPipe[T_co], + *, + indices: Optional[List] = None, + ) -> None: + super().__init__() + self.datapipe = datapipe + self.indices = list(range(len(datapipe))) if indices is None else indices + self._enabled = True + self._seed = None + self._rng = random.Random() + self._shuffled_indices: List = self.indices + + def set_shuffle(self, shuffle=True): + self._enabled = shuffle + return self + + def set_seed(self, seed: int): + self._seed = seed + return self + + def __iter__(self) -> Iterator[T_co]: + if not self._enabled: + for idx in self.indices: + yield self.datapipe[idx] + else: + while self._shuffled_indices: + idx = self._shuffled_indices.pop() + yield self.datapipe[idx] + + def reset(self) -> None: + if self._enabled and self._seed is None: + self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) + self._rng.seed(self._seed) + self._seed = None + self._shuffled_indices = self._rng.sample(self.indices, len(self.indices)) + + def __len__(self) -> int: + return len(self.datapipe) + + def __getstate__(self): + state = ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + self._rng.getstate(), + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + rng_state, + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._rng = random.Random() + self._rng.setstate(rng_state) + + +MapDataPipe.register_datapipe_as_function("shuffle", ShufflerIterDataPipe) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py new file mode 100644 index 0000000000000000000000000000000000000000..809b44dc96cd8f0a8e7d3bf8795f76b512cb244f --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py @@ -0,0 +1,99 @@ +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe +from typing import Sized, Tuple, TypeVar + +__all__ = ["ConcaterMapDataPipe", "ZipperMapDataPipe"] + +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('concat') +class ConcaterMapDataPipe(MapDataPipe): + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + datapipes: Tuple[MapDataPipe] + + def __init__(self, *datapipes: MapDataPipe): + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes # type: ignore[assignment] + + def __getitem__(self, index) -> T_co: # type: ignore[type-var] + offset = 0 + for dp in self.datapipes: + if index - offset < len(dp): + return dp[index - offset] + else: + offset += len(dp) + raise IndexError(f"Index {index} is out of range.") + + def __len__(self) -> int: + return sum(len(dp) for dp in self.datapipes) + + +@functional_datapipe('zip') +class ZipperMapDataPipe(MapDataPipe[Tuple[T_co, ...]]): + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + datapipes: Tuple[MapDataPipe[T_co], ...] + + def __init__(self, *datapipes: MapDataPipe[T_co]) -> None: + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes + + def __getitem__(self, index) -> Tuple[T_co, ...]: + res = [] + for dp in self.datapipes: + try: + res.append(dp[index]) + except IndexError as e: + raise IndexError(f"Index {index} is out of range for one of the input MapDataPipes {dp}.") from e + return tuple(res) + + def __len__(self) -> int: + return min(len(dp) for dp in self.datapipes) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d47a670f2bf62ff762c225f2eb42e0d6d07b96f Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16f338fcd9e51584aa60066e7edfd5ee5ef9131c Binary files /dev/null and b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..0211a8fe4ba462a768d41e95f2a00c4084aec7df --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py @@ -0,0 +1,330 @@ +# This file takes partial of the implementation from NVIDIA's webdataset at here: +# https://github.com/tmbdev/webdataset/blob/master/webdataset/autodecode.py + +import io +import json +import os.path +import pickle +import tempfile + +import torch +from torch.utils.data.datapipes.utils.common import StreamWrapper + + +__all__ = [ + "Decoder", + "ImageHandler", + "MatHandler", + "audiohandler", + "basichandlers", + "extension_extract_fn", + "handle_extension", + "imagehandler", + "mathandler", + "videohandler", +] + + +################################################################ +# handle basic datatypes +################################################################ +def basichandlers(extension, data): + + if extension in "txt text transcript": + return data.decode("utf-8") + + if extension in "cls cls2 class count index inx id".split(): + try: + return int(data) + except ValueError: + return None + + if extension in "json jsn": + return json.loads(data) + + if extension in "pyd pickle".split(): + return pickle.loads(data) + + if extension in "pt".split(): + stream = io.BytesIO(data) + return torch.load(stream) + + # if extension in "ten tb".split(): + # from . import tenbin + # return tenbin.decode_buffer(data) + + # if extension in "mp msgpack msg".split(): + # import msgpack + # return msgpack.unpackb(data) + + return None + + +################################################################ +# handle images +################################################################ +imagespecs = { + "l8": ("numpy", "uint8", "l"), + "rgb8": ("numpy", "uint8", "rgb"), + "rgba8": ("numpy", "uint8", "rgba"), + "l": ("numpy", "float", "l"), + "rgb": ("numpy", "float", "rgb"), + "rgba": ("numpy", "float", "rgba"), + "torchl8": ("torch", "uint8", "l"), + "torchrgb8": ("torch", "uint8", "rgb"), + "torchrgba8": ("torch", "uint8", "rgba"), + "torchl": ("torch", "float", "l"), + "torchrgb": ("torch", "float", "rgb"), + "torch": ("torch", "float", "rgb"), + "torchrgba": ("torch", "float", "rgba"), + "pill": ("pil", None, "l"), + "pil": ("pil", None, "rgb"), + "pilrgb": ("pil", None, "rgb"), + "pilrgba": ("pil", None, "rgba"), +} + +def handle_extension(extensions, f): + """ + Return a decoder handler function for the list of extensions. + + Extensions can be a space separated list of extensions. + Extensions can contain dots, in which case the corresponding number + of extension components must be present in the key given to f. + Comparisons are case insensitive. + Examples: + handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg + handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg + """ + extensions = extensions.lower().split() + + def g(key, data): + extension = key.lower().split(".") + + for target in extensions: + target = target.split(".") + if len(target) > len(extension): + continue + + if extension[-len(target):] == target: + return f(data) + return None + return g + + +class ImageHandler: + """ + Decode image data using the given `imagespec`. + + The `imagespec` specifies whether the image is decoded + to numpy/torch/pi, decoded to uint8/float, and decoded + to l/rgb/rgba: + + - l8: numpy uint8 l + - rgb8: numpy uint8 rgb + - rgba8: numpy uint8 rgba + - l: numpy float l + - rgb: numpy float rgb + - rgba: numpy float rgba + - torchl8: torch uint8 l + - torchrgb8: torch uint8 rgb + - torchrgba8: torch uint8 rgba + - torchl: torch float l + - torchrgb: torch float rgb + - torch: torch float rgb + - torchrgba: torch float rgba + - pill: pil None l + - pil: pil None rgb + - pilrgb: pil None rgb + - pilrgba: pil None rgba + """ + + def __init__(self, imagespec): + assert imagespec in list(imagespecs.keys()), f"unknown image specification: {imagespec}" + self.imagespec = imagespec.lower() + + def __call__(self, extension, data): + if extension.lower() not in "jpg jpeg png ppm pgm pbm pnm".split(): + return None + + try: + import numpy as np + except ImportError as e: + raise ModuleNotFoundError("Package `numpy` is required to be installed for default image decoder." + "Please use `pip install numpy` to install the package") from e + + try: + import PIL.Image + except ImportError as e: + raise ModuleNotFoundError("Package `PIL` is required to be installed for default image decoder." + "Please use `pip install Pillow` to install the package") from e + + imagespec = self.imagespec + atype, etype, mode = imagespecs[imagespec] + + with io.BytesIO(data) as stream: + img = PIL.Image.open(stream) + img.load() + img = img.convert(mode.upper()) + if atype == "pil": + return img + elif atype == "numpy": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + if etype == "uint8": + return result + else: + return result.astype("f") / 255.0 + elif atype == "torch": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + + if etype == "uint8": + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) + else: + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) / 255.0 + return None + +def imagehandler(imagespec): + return ImageHandler(imagespec) + + +################################################################ +# torch video +################################################################ +def videohandler(extension, data): + if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split(): + return None + + try: + import torchvision.io + except ImportError as e: + raise ModuleNotFoundError("Package `torchvision` is required to be installed for default video file loader." + "Please use `pip install torchvision` or `conda install torchvision -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchvision.io.read_video(fname) + + +################################################################ +# torchaudio +################################################################ +def audiohandler(extension, data): + if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]: + return None + + try: + import torchaudio # type: ignore[import] + except ImportError as e: + raise ModuleNotFoundError("Package `torchaudio` is required to be installed for default audio file loader." + "Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchaudio.load(fname) + + +################################################################ +# mat +################################################################ +class MatHandler: + def __init__(self, **loadmat_kwargs) -> None: + try: + import scipy.io as sio + except ImportError as e: + raise ModuleNotFoundError("Package `scipy` is required to be installed for mat file." + "Please use `pip install scipy` or `conda install scipy`" + "to install the package") from e + self.sio = sio + self.loadmat_kwargs = loadmat_kwargs + + def __call__(self, extension, data): + if extension != 'mat': + return None + with io.BytesIO(data) as stream: + return self.sio.loadmat(stream, **self.loadmat_kwargs) + +def mathandler(**loadmat_kwargs): + return MatHandler(**loadmat_kwargs) + + +################################################################ +# a sample decoder +################################################################ +# Extract extension from pathname +def extension_extract_fn(pathname): + ext = os.path.splitext(pathname)[1] + # Remove dot + if ext: + ext = ext[1:] + return ext + + +class Decoder: + """ + Decode key/data sets using a list of handlers. + + For each key/data item, this iterates through the list of + handlers until some handler returns something other than None. + """ + + def __init__(self, *handler, key_fn=extension_extract_fn): + self.handlers = list(handler) if handler else [] + self.key_fn = key_fn + + # Insert new handler from the beginning of handlers list to make sure the new + # handler having the highest priority + def add_handler(self, *handler): + if not handler: + return + self.handlers = list(handler) + self.handlers + + @staticmethod + def _is_stream_handle(data): + obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data + return isinstance(obj_to_check, (io.BufferedIOBase, io.RawIOBase)) + + def decode1(self, key, data): + if not data: + return data + + # if data is a stream handle, we need to read all the content before decoding + if Decoder._is_stream_handle(data): + ds = data + # The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead + data = b"".join(data) + ds.close() + + for f in self.handlers: + result = f(key, data) + if result is not None: + return result + return data + + def decode(self, data): + result = {} + # single data tuple(pathname, data stream) + if isinstance(data, tuple): + data = [data] + + if data is not None: + for k, v in data: + # TODO: xinyu, figure out why Nvidia do this? + if k[0] == "_": + if isinstance(v, bytes): + v = v.decode("utf-8") + result[k] = v + continue + result[k] = self.decode1(self.key_fn(k), v) + return result + + def __call__(self, data): + return self.decode(data) diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..02487d0da5737363a59bdcd18a4fe16ead2fdcbb --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py @@ -0,0 +1,58 @@ +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.graph_settings import apply_random_seed + + +# TODO: Caveats +# 1. Caller (either the ReadingService or DataLoader) must pass in the initial RNG +# 2. `in_batch_shuffle` and `bucketbatch` are not compatible with this because they currently +# lack the option to `set_seed`. +def _simple_graph_snapshot_restoration(datapipe: IterDataPipe, n_iterations: int, rng=None) -> None: + r""" + Fast-forward the given DataPipe and its parents by ``n_iterations``, re-doing computations to restore a snapshot. + + For instance, applying this function to the final DataPipe of a graph will restore the snapshot + (via fast-forward) every DataPipe within the graph. + + After you deserialize a DataPipe, you can use its `_number_of_samples_yielded` attribute as the input + to this function to forward the DataPipe. + + A DataPipe cannot be restored twice in a row unless there is an iteration started between the restoration + attempts. + + Note: + This is the simplest but least efficient way to fast-forward a DataPipe. Usage of other fast-forwarding + methods (custom ones if necessary) are recommended. + + Args: + datapipe: IterDataPipe to be fast-forwarded + n_iterations: number of iterations to fast-forward + rng: ``Optional[torch.Generator]``. If not ``None``, this RNG will be used for shuffling. The generator + should be in its `initial` state as it was first passed into ``DataLoader`` or ``ReadingService``. + """ + if datapipe._snapshot_state == _SnapshotState.Restored: + raise RuntimeError( + "Snapshot restoration cannot be applied. You can only restore simple snapshot to the graph " + "if your graph has not been restored.") + + # For this snapshot restoration function, we want the DataPipe to be at its initial state prior to + # simple fast-forwarding. Therefore, we need to call `reset` twice, because if `SnapshotState` is `Restored`, + # the first reset will not actually reset. + datapipe.reset() # This ensures `SnapshotState` is `Iterating` by this point, even if it was `Restored`. + apply_random_seed(datapipe, rng) + + remainder = n_iterations + it = iter(datapipe) # This always reset the DataPipe if it hasn't already. + while remainder > 0: + try: + next(it) + remainder -= 1 + except StopIteration as e: + raise RuntimeError(f"Fast-forward {datapipe} by {n_iterations} iterations " + "exceeds the number of samples available.") from e + datapipe._fast_forward_iterator = it + # While the DataPipe has `_fast_forward_iterator`, `next()` will get result from there instead of elsewhere. + + # This will prevent the DataPipe from resetting in the `iter()` call + # If another DataPipe is consuming it, it won't have to start over again + datapipe._snapshot_state = _SnapshotState.Restored diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/dataset.py b/moondream/lib/python3.10/site-packages/torch/utils/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..554bf90d108bdd4e76e1e0e001be960dc9b41255 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/dataset.py @@ -0,0 +1,488 @@ +import bisect +import itertools +import math +import warnings +from typing import ( + cast, + Dict, + Generic, + Iterable, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +# No 'default_generator' in torch/__init__.pyi +from torch import default_generator, randperm + +from ... import Generator, Tensor + +__all__ = [ + "Dataset", + "IterableDataset", + "TensorDataset", + "StackDataset", + "ConcatDataset", + "ChainDataset", + "Subset", + "random_split", +] + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +T_dict = Dict[str, T_co] +T_tuple = Tuple[T_co, ...] +T_stack = TypeVar("T_stack", T_tuple, T_dict) + + +class Dataset(Generic[T_co]): + r"""An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + """ + + def __getitem__(self, index) -> T_co: + raise NotImplementedError("Subclasses of Dataset should implement __getitem__.") + + # def __getitems__(self, indices: List) -> List[T_co]: + # Not implemented to prevent false-positives in fetcher check in + # torch.utils.data._utils.fetch._MapDatasetFetcher + + def __add__(self, other: "Dataset[T_co]") -> "ConcatDataset[T_co]": + return ConcatDataset([self, other]) + + # No `def __len__(self)` default? + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # in pytorch/torch/utils/data/sampler.py + + +class IterableDataset(Dataset[T_co], Iterable[T_co]): + r"""An iterable Dataset. + + All datasets that represent an iterable of data samples should subclass it. + Such form of datasets is particularly useful when data come from a stream. + + All subclasses should overwrite :meth:`__iter__`, which would return an + iterator of samples in this dataset. + + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the dataset object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Example 1: splitting workload across all workers in :meth:`__iter__`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> # xdoctest: +SKIP("Fails on MacOS12") + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... worker_info = torch.utils.data.get_worker_info() + ... if worker_info is None: # single-process data loading, return the full iterator + ... iter_start = self.start + ... iter_end = self.end + ... else: # in a worker process + ... # split workload + ... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... iter_start = self.start + worker_id * per_worker + ... iter_end = min(iter_start + per_worker, self.end) + ... return iter(range(iter_start, iter_end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [tensor([3]), tensor([4]), tensor([5]), tensor([6])] + + >>> # xdoctest: +REQUIRES(POSIX) + >>> # Mult-process loading with two worker processes + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + >>> # With even more workers + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + Example 2: splitting workload across all workers using :attr:`worker_init_fn`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [3, 4, 5, 6] + >>> + >>> # Directly doing multi-process loading yields duplicate data + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [3, 3, 4, 4, 5, 5, 6, 6] + + >>> # Define a `worker_init_fn` that configures each dataset copy differently + >>> def worker_init_fn(worker_id): + ... worker_info = torch.utils.data.get_worker_info() + ... dataset = worker_info.dataset # the dataset copy in this worker process + ... overall_start = dataset.start + ... overall_end = dataset.end + ... # configure the dataset to only process the split workload + ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... dataset.start = overall_start + worker_id * per_worker + ... dataset.end = min(dataset.start + per_worker, overall_end) + ... + + >>> # Mult-process loading with the custom `worker_init_fn` + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn))) + [3, 5, 4, 6] + + >>> # With even more workers + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn))) + [3, 4, 5, 6] + """ + + def __add__(self, other: Dataset[T_co]): + return ChainDataset([self, other]) + + # No `def __len__(self)` default? Subclasses raise `TypeError` when needed. + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + + +class TensorDataset(Dataset[Tuple[Tensor, ...]]): + r"""Dataset wrapping tensors. + + Each sample will be retrieved by indexing tensors along the first dimension. + + Args: + *tensors (Tensor): tensors that have the same size of the first dimension. + """ + + tensors: Tuple[Tensor, ...] + + def __init__(self, *tensors: Tensor) -> None: + assert all( + tensors[0].size(0) == tensor.size(0) for tensor in tensors + ), "Size mismatch between tensors" + self.tensors = tensors + + def __getitem__(self, index): + return tuple(tensor[index] for tensor in self.tensors) + + def __len__(self): + return self.tensors[0].size(0) + + +class StackDataset(Dataset[T_stack]): + r"""Dataset as a stacking of multiple datasets. + + This class is useful to assemble different parts of complex input data, given as datasets. + + Example: + >>> # xdoctest: +SKIP + >>> images = ImageDataset() + >>> texts = TextDataset() + >>> tuple_stack = StackDataset(images, texts) + >>> tuple_stack[0] == (images[0], texts[0]) + >>> dict_stack = StackDataset(image=images, text=texts) + >>> dict_stack[0] == {'image': images[0], 'text': texts[0]} + + Args: + *args (Dataset): Datasets for stacking returned as tuple. + **kwargs (Dataset): Datasets for stacking returned as dict. + """ + + datasets: Union[tuple, dict] + + def __init__(self, *args: Dataset[T_co], **kwargs: Dataset[T_co]) -> None: + if args: + if kwargs: + raise ValueError( + "Supported either ``tuple``- (via ``args``) or" + "``dict``- (via ``kwargs``) like input/output, but both types are given." + ) + self._length = len(args[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in args): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = args + elif kwargs: + tmp = list(kwargs.values()) + self._length = len(tmp[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in tmp): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = kwargs + else: + raise ValueError("At least one dataset should be passed") + + def __getitem__(self, index): + if isinstance(self.datasets, dict): + return {k: dataset[index] for k, dataset in self.datasets.items()} + return tuple(dataset[index] for dataset in self.datasets) + + def __getitems__(self, indices: list): + # add batched sampling support when parent datasets supports it. + if isinstance(self.datasets, dict): + dict_batch: List[T_dict] = [{} for _ in indices] + for k, dataset in self.datasets.items(): + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, d_sample in zip(items, dict_batch): + d_sample[k] = data + else: + for idx, d_sample in zip(indices, dict_batch): + d_sample[k] = dataset[idx] + return dict_batch + + # tuple data + list_batch: List[list] = [[] for _ in indices] + for dataset in self.datasets: + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, t_sample in zip(items, list_batch): + t_sample.append(data) + else: + for idx, t_sample in zip(indices, list_batch): + t_sample.append(dataset[idx]) + tuple_batch: List[T_tuple] = [tuple(sample) for sample in list_batch] + return tuple_batch + + def __len__(self): + return self._length + + +class ConcatDataset(Dataset[T_co]): + r"""Dataset as a concatenation of multiple datasets. + + This class is useful to assemble different existing datasets. + + Args: + datasets (sequence): List of datasets to be concatenated + """ + + datasets: List[Dataset[T_co]] + cumulative_sizes: List[int] + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = list(datasets) + assert len(self.datasets) > 0, "datasets should not be an empty iterable" # type: ignore[arg-type] + for d in self.datasets: + assert not isinstance( + d, IterableDataset + ), "ConcatDataset does not support IterableDataset" + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError( + "absolute value of index should not exceed dataset length" + ) + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + @property + def cummulative_sizes(self): + warnings.warn( + "cummulative_sizes attribute is renamed to " "cumulative_sizes", + DeprecationWarning, + stacklevel=2, + ) + return self.cumulative_sizes + + +class ChainDataset(IterableDataset): + r"""Dataset for chaining multiple :class:`IterableDataset` s. + + This class is useful to assemble different existing dataset streams. The + chaining operation is done on-the-fly, so concatenating large-scale + datasets with this class will be efficient. + + Args: + datasets (iterable of IterableDataset): datasets to be chained together + """ + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = datasets + + def __iter__(self): + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + yield from d + + def __len__(self): + total = 0 + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + total += len(d) # type: ignore[arg-type] + return total + + +class Subset(Dataset[T_co]): + r""" + Subset of a dataset at specified indices. + + Args: + dataset (Dataset): The whole Dataset + indices (sequence): Indices in the whole set selected for subset + """ + + dataset: Dataset[T_co] + indices: Sequence[int] + + def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None: + self.dataset = dataset + self.indices = indices + + def __getitem__(self, idx): + if isinstance(idx, list): + return self.dataset[[self.indices[i] for i in idx]] + return self.dataset[self.indices[idx]] + + def __getitems__(self, indices: List[int]) -> List[T_co]: + # add batched sampling support when parent dataset supports it. + # see torch.utils.data._utils.fetch._MapDatasetFetcher + if callable(getattr(self.dataset, "__getitems__", None)): + return self.dataset.__getitems__([self.indices[idx] for idx in indices]) # type: ignore[attr-defined] + else: + return [self.dataset[self.indices[idx]] for idx in indices] + + def __len__(self): + return len(self.indices) + + +def random_split( + dataset: Dataset[T], + lengths: Sequence[Union[int, float]], + generator: Optional[Generator] = default_generator, +) -> List[Subset[T]]: + r""" + Randomly split a dataset into non-overlapping new datasets of given lengths. + + If a list of fractions that sum up to 1 is given, + the lengths will be computed automatically as + floor(frac * len(dataset)) for each fraction provided. + + After computing the lengths, if there are any remainders, 1 count will be + distributed in round-robin fashion to the lengths + until there are no remainders left. + + Optionally fix the generator for reproducible results, e.g.: + + Example: + >>> # xdoctest: +SKIP + >>> generator1 = torch.Generator().manual_seed(42) + >>> generator2 = torch.Generator().manual_seed(42) + >>> random_split(range(10), [3, 7], generator=generator1) + >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2) + + Args: + dataset (Dataset): Dataset to be split + lengths (sequence): lengths or fractions of splits to be produced + generator (Generator): Generator used for the random permutation. + """ + if math.isclose(sum(lengths), 1) and sum(lengths) <= 1: + subset_lengths: List[int] = [] + for i, frac in enumerate(lengths): + if frac < 0 or frac > 1: + raise ValueError(f"Fraction at index {i} is not between 0 and 1") + n_items_in_split = int( + math.floor(len(dataset) * frac) # type: ignore[arg-type] + ) + subset_lengths.append(n_items_in_split) + remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type] + # add 1 to all the lengths in round-robin fashion until the remainder is 0 + for i in range(remainder): + idx_to_add_at = i % len(subset_lengths) + subset_lengths[idx_to_add_at] += 1 + lengths = subset_lengths + for i, length in enumerate(lengths): + if length == 0: + warnings.warn( + f"Length of split at index {i} is 0. " + f"This might result in an empty dataset." + ) + + # Cannot verify that dataset is Sized + if sum(lengths) != len(dataset): # type: ignore[arg-type] + raise ValueError( + "Sum of input lengths does not equal the length of the input dataset!" + ) + + indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[arg-type, call-overload] + lengths = cast(Sequence[int], lengths) + return [ + Subset(dataset, indices[offset - length : offset]) + for offset, length in zip(itertools.accumulate(lengths), lengths) + ] diff --git a/moondream/lib/python3.10/site-packages/torch/utils/data/graph_settings.py b/moondream/lib/python3.10/site-packages/torch/utils/data/graph_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..4b42cc6065a788e18afd38aea6fe6cdf63214430 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/utils/data/graph_settings.py @@ -0,0 +1,160 @@ +import inspect +import warnings + +from typing import Any, List, Optional, Set + +import torch + +from torch.utils.data.datapipes.iter.sharding import ( + _ShardingIterDataPipe, + SHARDING_PRIORITIES, +) +from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps + +__all__ = [ + "apply_random_seed", + "apply_sharding", + "apply_shuffle_seed", + "apply_shuffle_settings", + "get_all_graph_pipes", +] + + +def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]: + return _get_all_graph_pipes_helper(graph, set()) + + +def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]: + results: List[DataPipe] = [] + for dp_id, (datapipe, sub_graph) in graph.items(): + if dp_id in id_cache: + continue + id_cache.add(dp_id) + results.append(datapipe) + results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache)) + return results + + +def _is_sharding_datapipe(datapipe: DataPipe) -> bool: + if isinstance(datapipe, _ShardingIterDataPipe): + return True + if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding): + return True + return False + + +def apply_sharding(datapipe: DataPipe, + num_of_instances: int, + instance_id: int, + sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe: + r""" + Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``. + + RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch. + """ + graph = traverse_dps(datapipe) + + def _helper(graph, prev_applied=None): + for (dp, sub_graph) in graph.values(): + applied = None + if _is_sharding_datapipe(dp): + if prev_applied is not None: + raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. " + f"Sharding already applied to {prev_applied} while trying to apply to {dp}") + # For BC, only provide sharding_group if accepted + sig = inspect.signature(dp.apply_sharding) + if len(sig.parameters) < 3: + dp.apply_sharding(num_of_instances, instance_id) + else: + dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group) + applied = dp + if applied is None: + applied = prev_applied + _helper(sub_graph, applied) + + _helper(graph) + + return datapipe + + +def _is_shuffle_datapipe(datapipe: DataPipe) -> bool: + if not hasattr(datapipe, "set_shuffle") or not hasattr(datapipe, "set_seed"): + return False + if not inspect.ismethod(datapipe.set_shuffle) or not inspect.ismethod(datapipe.set_seed): + return False + return True + + +def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find and set shuffle attribute. + + Apply the method to each `DataPipe` that has APIs of ``set_shuffle`` + and ``set_seed``. + + Args: + datapipe: DataPipe that needs to set shuffle attribute + shuffle: Shuffle option (default: ``None`` and no-op to the graph) + """ + if shuffle is None: + return datapipe + + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)] + if not shufflers and shuffle: + warnings.warn( + "`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. " + "Be aware that the default buffer size might not be sufficient for your task." + ) + datapipe = datapipe.shuffle() + shufflers = [datapipe, ] # type: ignore[list-item] + + for shuffler in shufflers: + shuffler.set_shuffle(shuffle) + + return datapipe + + +def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe: + warnings.warn( + "`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases." + "\nPlease use `apply_random_seed` instead." + ) + return apply_random_seed(datapipe, rng) + + +def _is_random_datapipe(datapipe: DataPipe) -> bool: + if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed): + return True + return False + + +def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of ``set_seed``. + + Then set the random seed based on the provided RNG to those ``DataPipe``. + + Args: + datapipe: DataPipe that needs to set randomness + rng: Random number generator to generate random seeds + """ + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + # Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once. + # And, `id` is used in case of unhashable DataPipe + cache = set() + random_datapipes = [] + for pipe in all_pipes: + if id(pipe) in cache: + continue + if _is_random_datapipe(pipe): + random_datapipes.append(pipe) + cache.add(id(pipe)) + + for pipe in random_datapipes: + random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item()) + pipe.set_seed(random_seed) + + return datapipe