diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb14e5b7deb4df6cc93d24772e3cb20579b2e808 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float.h new file mode 100644 index 0000000000000000000000000000000000000000..7e47953e8db9858de743b27649def4e79109e1ed --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor +inline at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Float::call(self, non_blocking); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b9996b919a40c75266c9bdfe0f1c637bb4a24e47 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _rowwise_prune { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_rowwise_prune") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_backward_data_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_backward_data_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc1b1c947e40ea5f2cbd025c284f3f34f08fe5d0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_backward_data_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sparse_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor softmax_backward_sparse_cpu(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self); +TORCH_API at::Tensor softmax_backward_sparse_cuda(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad6d5abfc71a8e55a98d0cd4c1da6ce999413a71 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank); +TORCH_API bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..320b39a8b2d056588333ea56375a24b8b2fecd25 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _validate_sparse_csr_tensor_args { + using schema = void (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_validate_sparse_csr_tensor_args") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()") + static void call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2a1fb871bdae62272ac22c2fd7e9b4e0216a1ce --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor atan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & atan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & atan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8797991c170d1a666bcb66ee0b0f4d9303d8da0e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & col_indices_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & col_indices_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..25c0cbb161eaf72a68e0a1dbf8abc45e1ca8f08a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API exponential_ { + using schema = at::Tensor & (at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double lambd, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, c10::optional generator); +}; + +struct TORCH_API exponential_out { + using schema = at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API exponential { + using schema = at::Tensor (const at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double lambd, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7404a372626b2999e9a273ed241a1cea2521a2d9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f89b0a6447d9db4dda2150924abe066f5d073dd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f3a25b4e6312d845972eddd2ec851cb0ffbf84f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logdet(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04ea12298a75b2eef1df0122bbbdd74cb3dbdf69 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/neg_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/neg_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..690d24220b768a96ea9c14035037407b5bcbb617 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/neg_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor neg(const at::Tensor & self); +TORCH_API at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & neg_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b09858283e169c2f4e86d63565572a38ed939ab --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8918e5c22c1e63b7ee57c56d3e5163a7c672252e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow=2, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b1d2310a47ed5d3fedacc15a24e772f0c8835d3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API row_indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::row_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "row_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a895095e9956a6ad9c8a215f21cc07a8fc98f31f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_modified_bessel_k1(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..658d8fe795ab7d4315aa0bc0033cc3e6bf3798f1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..143eb40dab7b71c54373b601aa3155bfe307ee9d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/_validators.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/_validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..604358787dabbb2d0e1368e2ed8739cc79708ba3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/_validators.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_file_cache.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_file_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e067f7fbdcd2aa09b86f80feb31a37cb537233 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_file_cache.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_instance_cache.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_instance_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eba8b7d7c76eebbda6e4396c09db50829abaca31 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_instance_cache.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_manifest_entry.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_manifest_entry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6d1ab1a571b6412ade73dabd166ccea30beb36f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/artifact_manifest_entry.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/storage_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/storage_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a9b3fcf439b6e2233eb44bf8f6c888ec99362b8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/__pycache__/storage_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/_validators.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..643f57587a8c4d2daa75b32dc448a7323a202ca5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/_validators.py @@ -0,0 +1,121 @@ +"""Internal validation utilities that are specific to artifacts.""" + +from __future__ import annotations + +import re +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast, overload + +from wandb.sdk.artifacts.exceptions import ( + ArtifactFinalizedError, + ArtifactNotLoggedError, +) + +if TYPE_CHECKING: + from typing import Collection, Final, Iterable, Union + + from wandb.sdk.artifacts.artifact import Artifact + + ArtifactT = TypeVar("ArtifactT", bound=Artifact) + T = TypeVar("T") + ClassInfo = Union[type[T], tuple[type[T], ...]] + + +REGISTRY_PREFIX: Final[str] = "wandb-registry-" + + +# For mypy checks +@overload +def always_list(obj: Iterable[T], base_type: ClassInfo = ...) -> list[T]: ... +@overload +def always_list(obj: T, base_type: ClassInfo = ...) -> list[T]: ... + + +def always_list(obj: Any, base_type: Any = (str, bytes)) -> list[T]: + """Return a guaranteed list of objects from a single instance OR iterable of such objects. + + By default, assume the returned list should have string-like elements (i.e. `str`/`bytes`). + + Adapted from `more_itertools.always_iterable`, but simplified for internal use. See: + https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.always_iterable + """ + return [obj] if isinstance(obj, base_type) else list(obj) + + +def validate_aliases(aliases: Collection[str] | str) -> list[str]: + """Validate the artifact aliases and return them as a list. + + Raises: + ValueError: If any of the aliases contain invalid characters. + """ + aliases_list = always_list(aliases) + + invalid_chars = ("/", ":") + if any(char in alias for alias in aliases_list for char in invalid_chars): + raise ValueError( + f"Aliases must not contain any of the following characters: {', '.join(invalid_chars)}" + ) + return aliases_list + + +_VALID_TAG_PATTERN: re.Pattern[str] = re.compile(r"^[-\w]+( +[-\w]+)*$") + + +def validate_tags(tags: Collection[str] | str) -> list[str]: + """Validate the artifact tag names and return them as a deduped list. + + In the case of duplicates, only keep the first tag, and otherwise maintain the order of appearance. + + Raises: + ValueError: If any of the tags contain invalid characters. + """ + tags_list = always_list(tags) + + if any(not _VALID_TAG_PATTERN.match(tag) for tag in tags_list): + raise ValueError( + "Invalid tag(s). " + "Tags must only contain alphanumeric characters separated by hyphens, underscores, and/or spaces." + ) + return list(dict.fromkeys(tags_list)) + + +DecoratedF = TypeVar("DecoratedF", bound=Callable[..., Any]) +"""Type hint for a decorated function that'll preserve its signature (e.g. for arg autocompletion in IDEs).""" + + +def ensure_logged(method: DecoratedF) -> DecoratedF: + """Decorator to ensure that an Artifact method can only be called if the artifact has been logged. + + If the method is called on an artifact that's not logged, `ArtifactNotLoggedError` is raised. + """ + # For clarity, use the qualified (full) name of the method + method_fullname = method.__qualname__ + + @wraps(method) + def wrapper(self: ArtifactT, *args: Any, **kwargs: Any) -> Any: + if self.is_draft(): + raise ArtifactNotLoggedError(fullname=method_fullname, obj=self) + return method(self, *args, **kwargs) + + return cast(DecoratedF, wrapper) + + +def ensure_not_finalized(method: DecoratedF) -> DecoratedF: + """Decorator to ensure that an `Artifact` method can only be called if the artifact isn't finalized. + + If the method is called on an artifact that's not logged, `ArtifactFinalizedError` is raised. + """ + # For clarity, use the qualified (full) name of the method + method_fullname = method.__qualname__ + + @wraps(method) + def wrapper(self: ArtifactT, *args: Any, **kwargs: Any) -> Any: + if self._final: + raise ArtifactFinalizedError(fullname=method_fullname, obj=self) + return method(self, *args, **kwargs) + + return cast(DecoratedF, wrapper) + + +def is_artifact_registry_project(project: str) -> bool: + return project.startswith(REGISTRY_PREFIX) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7eacc05b9ebc5f1fa94807b9c2383b3686abc0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact.py @@ -0,0 +1,2358 @@ +"""Artifact class.""" + +from __future__ import annotations + +import atexit +import concurrent.futures +import contextlib +import json +import logging +import multiprocessing.dummy +import os +import re +import shutil +import stat +import tempfile +import time +from copy import copy +from datetime import datetime, timedelta +from functools import partial +from pathlib import PurePosixPath +from typing import IO, TYPE_CHECKING, Any, Dict, Iterator, Literal, Sequence, Type, cast +from urllib.parse import urlparse + +import requests + +import wandb +from wandb import data_types, env, util +from wandb.apis.normalize import normalize_exceptions +from wandb.apis.public import ArtifactCollection, ArtifactFiles, RetryingClient, Run +from wandb.data_types import WBValue +from wandb.errors.term import termerror, termlog, termwarn +from wandb.sdk.artifacts._validators import ( + ensure_logged, + ensure_not_finalized, + validate_aliases, + validate_tags, +) +from wandb.sdk.artifacts.artifact_download_logger import ArtifactDownloadLogger +from wandb.sdk.artifacts.artifact_instance_cache import artifact_instance_cache +from wandb.sdk.artifacts.artifact_manifest import ArtifactManifest +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.artifact_manifests.artifact_manifest_v1 import ( + ArtifactManifestV1, +) +from wandb.sdk.artifacts.artifact_state import ArtifactState +from wandb.sdk.artifacts.artifact_ttl import ArtifactTTL +from wandb.sdk.artifacts.exceptions import ArtifactNotLoggedError, WaitTimeoutError +from wandb.sdk.artifacts.staging import get_staging_dir +from wandb.sdk.artifacts.storage_handlers.gcs_handler import _GCSIsADirectoryError +from wandb.sdk.artifacts.storage_layout import StorageLayout +from wandb.sdk.artifacts.storage_policies import WANDB_STORAGE_POLICY +from wandb.sdk.artifacts.storage_policy import StoragePolicy +from wandb.sdk.data_types._dtypes import Type as WBType +from wandb.sdk.data_types._dtypes import TypeRegistry +from wandb.sdk.internal.internal_api import Api as InternalApi +from wandb.sdk.internal.thread_local_settings import _thread_local_api_settings +from wandb.sdk.lib import filesystem, retry, runid, telemetry +from wandb.sdk.lib.deprecate import Deprecated, deprecate +from wandb.sdk.lib.hashutil import B64MD5, b64_to_hex_id, md5_file_b64 +from wandb.sdk.lib.mailbox import Mailbox +from wandb.sdk.lib.paths import FilePathStr, LogicalPath, StrPath, URIStr +from wandb.sdk.lib.runid import generate_id + +reset_path = util.vendor_setup() + +from wandb_gql import gql # noqa: E402 + +reset_path() + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from wandb.sdk.interface.message_future import MessageFuture + + +class Artifact: + """Flexible and lightweight building block for dataset and model versioning. + + Construct an empty W&B Artifact. Populate an artifacts contents with methods that + begin with `add`. Once the artifact has all the desired files, you can call + `wandb.log_artifact()` to log it. + + Args: + name: A human-readable name for the artifact. Use the name to identify + a specific artifact in the W&B App UI or programmatically. You can + interactively reference an artifact with the `use_artifact` Public API. + A name can contain letters, numbers, underscores, hyphens, and dots. + The name must be unique across a project. + type: The artifact's type. Use the type of an artifact to both organize + and differentiate artifacts. You can use any string that contains letters, + numbers, underscores, hyphens, and dots. Common types include `dataset` or `model`. + Include `model` within your type string if you want to link the artifact + to the W&B Model Registry. + description: A description of the artifact. For Model or Dataset Artifacts, + add documentation for your standardized team model or dataset card. View + an artifact's description programmatically with the `Artifact.description` + attribute or programmatically with the W&B App UI. W&B renders the + description as markdown in the W&B App. + metadata: Additional information about an artifact. Specify metadata as a + dictionary of key-value pairs. You can specify no more than 100 total keys. + + Returns: + An `Artifact` object. + """ + + _TMP_DIR = tempfile.TemporaryDirectory("wandb-artifacts") + atexit.register(_TMP_DIR.cleanup) + + def __init__( + self, + name: str, + type: str, + description: str | None = None, + metadata: dict[str, Any] | None = None, + incremental: bool = False, + use_as: str | None = None, + ) -> None: + if not re.match(r"^[a-zA-Z0-9_\-.]+$", name): + raise ValueError( + f"Artifact name may only contain alphanumeric characters, dashes, " + f"underscores, and dots. Invalid name: {name}" + ) + if type == "job" or type.startswith("wandb-"): + raise ValueError( + "Artifact types 'job' and 'wandb-*' are reserved for internal use. " + "Please use a different type." + ) + if incremental: + termwarn("Using experimental arg `incremental`") + + # Internal. + self._client: RetryingClient | None = None + + storage_policy_cls = StoragePolicy.lookup_by_name(WANDB_STORAGE_POLICY) + layout = StorageLayout.V1 if env.get_use_v1_artifacts() else StorageLayout.V2 + policy_config = {"storageLayout": layout} + self._storage_policy = storage_policy_cls.from_config(config=policy_config) + + self._tmp_dir: tempfile.TemporaryDirectory | None = None + self._added_objs: dict[int, tuple[WBValue, ArtifactManifestEntry]] = {} + self._added_local_paths: dict[str, ArtifactManifestEntry] = {} + self._save_future: MessageFuture | None = None + self._download_roots: set[str] = set() + # Set by new_draft(), otherwise the latest artifact will be used as the base. + self._base_id: str | None = None + # Properties. + self._id: str | None = None + self._client_id: str = runid.generate_id(128) + self._sequence_client_id: str = runid.generate_id(128) + self._entity: str | None = None + self._project: str | None = None + self._name: str = name # includes version after saving + self._version: str | None = None + self._source_entity: str | None = None + self._source_project: str | None = None + self._source_name: str = name # includes version after saving + self._source_version: str | None = None + self._type: str = type + self._description: str | None = description + self._metadata: dict = self._normalize_metadata(metadata) + self._ttl_duration_seconds: int | None = None + self._ttl_is_inherited: bool = True + self._ttl_changed: bool = False + self._aliases: list[str] = [] + self._saved_aliases: list[str] = [] + self._tags: list[str] = [] + self._saved_tags: list[str] = [] + self._distributed_id: str | None = None + self._incremental: bool = incremental + self._use_as: str | None = use_as + self._state: ArtifactState = ArtifactState.PENDING + self._manifest: ArtifactManifest | None = ArtifactManifestV1( + self._storage_policy + ) + self._commit_hash: str | None = None + self._file_count: int | None = None + self._created_at: str | None = None + self._updated_at: str | None = None + self._final: bool = False + + # Cache. + artifact_instance_cache[self._client_id] = self + + def __repr__(self) -> str: + return f"" + + @classmethod + def _from_id(cls, artifact_id: str, client: RetryingClient) -> Artifact | None: + artifact = artifact_instance_cache.get(artifact_id) + if artifact is not None: + return artifact + + query = gql( + """ + query ArtifactByID($id: ID!) { + artifact(id: $id) { + ...ArtifactFragment + } + } + """ + + _gql_artifact_fragment() + ) + response = client.execute( + query, + variable_values={"id": artifact_id}, + ) + attrs = response.get("artifact") + if attrs is None: + return None + + src_collection = attrs["artifactSequence"] + src_project = src_collection["project"] + + entity_name = src_project["entityName"] if src_project else "" + project_name = src_project["name"] if src_project else "" + + name = "{}:v{}".format(src_collection["name"], attrs["versionIndex"]) + return cls._from_attrs(entity_name, project_name, name, attrs, client) + + @classmethod + def _from_name( + cls, + *, + entity: str, + project: str, + name: str, + client: RetryingClient, + enable_tracking: bool = False, + ) -> Artifact: + server_supports_enabling_artifact_usage_tracking = ( + InternalApi().server_project_type_introspection() + ) + query_vars = ["$entityName: String!", "$projectName: String!", "$name: String!"] + query_args = ["name: $name"] + if server_supports_enabling_artifact_usage_tracking: + query_vars.append("$enableTracking: Boolean") + query_args.append("enableTracking: $enableTracking") + + vars_str = ", ".join(query_vars) + args_str = ", ".join(query_args) + + query = gql( + f""" + query ArtifactByName({vars_str}) {{ + project(name: $projectName, entityName: $entityName) {{ + artifact({args_str}) {{ + ...ArtifactFragment + }} + }} + }} + {_gql_artifact_fragment()} + """ + ) + query_variable_values: dict[str, Any] = { + "entityName": entity, + "projectName": project, + "name": name, + } + if server_supports_enabling_artifact_usage_tracking: + query_variable_values["enableTracking"] = enable_tracking + + response = client.execute( + query, + variable_values=query_variable_values, + ) + project_attrs = response.get("project") + if not project_attrs: + raise ValueError(f"project '{project}' not found under entity '{entity}'") + attrs = project_attrs.get("artifact") + if not attrs: + raise ValueError(f"artifact '{name}' not found in '{entity}/{project}'") + return cls._from_attrs(entity, project, name, attrs, client) + + @classmethod + def _from_attrs( + cls, + entity: str, + project: str, + name: str, + attrs: dict[str, Any], + client: RetryingClient, + ) -> Artifact: + # Placeholder is required to skip validation. + artifact = cls("placeholder", type="placeholder") + artifact._client = client + artifact._entity = entity + artifact._project = project + artifact._name = name + artifact._assign_attrs(attrs) + + artifact.finalize() + + # Cache. + assert artifact.id is not None + artifact_instance_cache[artifact.id] = artifact + return artifact + + def _assign_attrs(self, attrs: dict[str, Any]) -> None: + """Update this Artifact's attributes using the server response.""" + self._id = attrs["id"] + + src_version = f"v{attrs['versionIndex']}" + src_collection = attrs["artifactSequence"] + src_project = src_collection["project"] + + self._source_entity = src_project["entityName"] if src_project else "" + self._source_project = src_project["name"] if src_project else "" + self._source_name = f"{src_collection['name']}:{src_version}" + self._source_version = src_version + + if self._entity is None: + self._entity = self._source_entity + if self._project is None: + self._project = self._source_project + + if self._name is None: + self._name = self._source_name + + self._type = attrs["artifactType"]["name"] + self._description = attrs["description"] + + entity = self._entity + project = self._project + collection, *_ = self._name.split(":") + aliases = [ + obj["alias"] + for obj in attrs["aliases"] + if obj["artifactCollection"] + and obj["artifactCollection"]["project"] + and obj["artifactCollection"]["project"]["entityName"] == entity + and obj["artifactCollection"]["project"]["name"] == project + and obj["artifactCollection"]["name"] == collection + ] + + version_aliases = [ + alias for alias in aliases if util.alias_is_version_index(alias) + ] + other_aliases = [ + alias for alias in aliases if not util.alias_is_version_index(alias) + ] + if version_aliases: + try: + [version] = version_aliases + except ValueError: + raise ValueError( + f"Expected at most one version alias, got {len(version_aliases)}: {version_aliases!r}" + ) + else: + version = src_version + + self._version = version + + if ":" not in self._name: + self._name = f"{self._name}:{version}" + + self._aliases = other_aliases + self._saved_aliases = copy(other_aliases) + + tags = [obj["name"] for obj in attrs.get("tags", [])] + self._tags = tags + self._saved_tags = copy(tags) + + metadata_str = attrs["metadata"] + self.metadata = self._normalize_metadata( + json.loads(metadata_str) if metadata_str else {} + ) + + self._ttl_duration_seconds = _ttl_duration_seconds_from_gql( + attrs.get("ttlDurationSeconds") + ) + self._ttl_is_inherited = ( + True if (attrs.get("ttlIsInherited") is None) else attrs["ttlIsInherited"] + ) + + self._state = ArtifactState(attrs["state"]) + + try: + manifest_url = attrs["currentManifest"]["file"]["directUrl"] + except (LookupError, TypeError): + self._manifest = None + else: + self._manifest = self._load_manifest(manifest_url) + + self._commit_hash = attrs["commitHash"] + self._file_count = attrs["fileCount"] + self._created_at = attrs["createdAt"] + self._updated_at = attrs["updatedAt"] + + @ensure_logged + def new_draft(self) -> Artifact: + """Create a new draft artifact with the same content as this committed artifact. + + The artifact returned can be extended or modified and logged as a new version. + + Returns: + An `Artifact` object. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + # Name, _entity and _project are set to the *source* name/entity/project: + # if this artifact is saved it must be saved to the source sequence. + artifact = Artifact(self.source_name.split(":")[0], self.type) + artifact._entity = self._source_entity + artifact._project = self._source_project + artifact._source_entity = self._source_entity + artifact._source_project = self._source_project + + # This artifact's parent is the one we are making a draft from. + artifact._base_id = self.id + + # We can reuse the client, and copy over all the attributes that aren't + # version-dependent and don't depend on having been logged. + artifact._client = self._client + artifact._description = self.description + artifact._metadata = self.metadata + artifact._manifest = ArtifactManifest.from_manifest_json( + self.manifest.to_manifest_json() + ) + return artifact + + # Properties (Python Class managed attributes). + + @property + def id(self) -> str | None: + """The artifact's ID.""" + if self.is_draft(): + return None + assert self._id is not None + return self._id + + @property + @ensure_logged + def entity(self) -> str: + """The name of the entity of the secondary (portfolio) artifact collection.""" + assert self._entity is not None + return self._entity + + @property + @ensure_logged + def project(self) -> str: + """The name of the project of the secondary (portfolio) artifact collection.""" + assert self._project is not None + return self._project + + @property + def name(self) -> str: + """The artifact name and version in its secondary (portfolio) collection. + + A string with the format `{collection}:{alias}`. Before the artifact is saved, + contains only the name since the version is not yet known. + """ + return self._name + + @property + def qualified_name(self) -> str: + """The entity/project/name of the secondary (portfolio) collection.""" + return f"{self.entity}/{self.project}/{self.name}" + + @property + @ensure_logged + def version(self) -> str: + """The artifact's version in its secondary (portfolio) collection.""" + assert self._version is not None + return self._version + + @property + @ensure_logged + def collection(self) -> ArtifactCollection: + """The collection this artifact was retrieved from. + + A collection is an ordered group of artifact versions. + If this artifact was retrieved from a portfolio / linked collection, that + collection will be returned rather than the collection + that an artifact version originated from. The collection + that an artifact originates from is known as the source sequence. + """ + base_name = self.name.split(":")[0] + return ArtifactCollection( + self._client, self.entity, self.project, base_name, self.type + ) + + @property + @ensure_logged + def source_entity(self) -> str: + """The name of the entity of the primary (sequence) artifact collection.""" + assert self._source_entity is not None + return self._source_entity + + @property + @ensure_logged + def source_project(self) -> str: + """The name of the project of the primary (sequence) artifact collection.""" + assert self._source_project is not None + return self._source_project + + @property + def source_name(self) -> str: + """The artifact name and version in its primary (sequence) collection. + + A string with the format `{collection}:{alias}`. Before the artifact is saved, + contains only the name since the version is not yet known. + """ + return self._source_name + + @property + def source_qualified_name(self) -> str: + """The entity/project/name of the primary (sequence) collection.""" + return f"{self.source_entity}/{self.source_project}/{self.source_name}" + + @property + @ensure_logged + def source_version(self) -> str: + """The artifact's version in its primary (sequence) collection. + + A string with the format `v{number}`. + """ + assert self._source_version is not None + return self._source_version + + @property + @ensure_logged + def source_collection(self) -> ArtifactCollection: + """The artifact's primary (sequence) collection.""" + base_name = self.source_name.split(":")[0] + return ArtifactCollection( + self._client, self.source_entity, self.source_project, base_name, self.type + ) + + @property + def type(self) -> str: + """The artifact's type. Common types include `dataset` or `model`.""" + return self._type + + @property + def description(self) -> str | None: + """A description of the artifact.""" + return self._description + + @description.setter + def description(self, description: str | None) -> None: + """Set the description of the artifact. + + For model or dataset Artifacts, add documentation for your + standardized team model or dataset card. In the W&B UI the + description is rendered as markdown. + + Args: + description: Free text that offers a description of the artifact. + """ + self._description = description + + @property + def metadata(self) -> dict: + """User-defined artifact metadata. + + Structured data associated with the artifact. + """ + return self._metadata + + @metadata.setter + def metadata(self, metadata: dict) -> None: + """User-defined artifact metadata. + + Metadata set this way will eventually be queryable and plottable in the UI; e.g. + the class distribution of a dataset. + + Note: There is currently a limit of 100 total keys. + + Args: + metadata: Structured data associated with the artifact. + """ + self._metadata = self._normalize_metadata(metadata) + + @property + def ttl(self) -> timedelta | None: + """The time-to-live (TTL) policy of an artifact. + + Artifacts are deleted shortly after a TTL policy's duration passes. + If set to `None`, the artifact deactivates TTL policies and will be not + scheduled for deletion, even if there is a team default TTL. + An artifact inherits a TTL policy from + the team default if the team administrator defines a default + TTL and there is no custom policy set on an artifact. + + Raises: + ArtifactNotLoggedError: Unable to fetch inherited TTL if the artifact has not been logged or saved + """ + if self._ttl_is_inherited and (self.is_draft() or self._ttl_changed): + raise ArtifactNotLoggedError(f"{type(self).__name__}.ttl", self) + if self._ttl_duration_seconds is None: + return None + return timedelta(seconds=self._ttl_duration_seconds) + + @ttl.setter + def ttl(self, ttl: timedelta | ArtifactTTL | None) -> None: + """The time-to-live (TTL) policy of an artifact. + + Artifacts are deleted shortly after a TTL policy's duration passes. + If set to `None`, the artifact has no TTL policy set and it is not + scheduled for deletion. An artifact inherits a TTL policy from + the team default if the team administrator defines a default + TTL and there is no custom policy set on an artifact. + + Args: + ttl: The duration as a positive Python `datetime.timedelta` Type + that represents how long the artifact will remain active from its creation. + + """ + if self.type == "wandb-history": + raise ValueError("Cannot set artifact TTL for type wandb-history") + + self._ttl_changed = True + if isinstance(ttl, ArtifactTTL): + if ttl == ArtifactTTL.INHERIT: + self._ttl_is_inherited = True + else: + raise ValueError(f"Unhandled ArtifactTTL enum {ttl}") + else: + self._ttl_is_inherited = False + if ttl is None: + self._ttl_duration_seconds = None + else: + if ttl.total_seconds() <= 0: + raise ValueError( + f"Artifact TTL Duration has to be positive. ttl: {ttl.total_seconds()}" + ) + self._ttl_duration_seconds = int(ttl.total_seconds()) + + @property + @ensure_logged + def aliases(self) -> list[str]: + """List of one or more semantically-friendly references or identifying "nicknames" assigned to an artifact version. + + Aliases are mutable references that you can programmatically reference. + Change an artifact's alias with the W&B App UI or programmatically. + See [Create new artifact versions](https://docs.wandb.ai/guides/artifacts/create-a-new-artifact-version) + for more information. + """ + return self._aliases + + @aliases.setter + @ensure_logged + def aliases(self, aliases: list[str]) -> None: + """Set the aliases associated with this artifact.""" + self._aliases = validate_aliases(aliases) + + @property + @ensure_logged + def tags(self) -> list[str]: + """List of one or more tags assigned to this artifact version.""" + return self._tags + + @tags.setter + @ensure_logged + def tags(self, tags: list[str]) -> None: + """Set the tags associated with this artifact.""" + self._tags = validate_tags(tags) + + @property + def distributed_id(self) -> str | None: + return self._distributed_id + + @distributed_id.setter + def distributed_id(self, distributed_id: str | None) -> None: + self._distributed_id = distributed_id + + @property + def incremental(self) -> bool: + return self._incremental + + @property + def use_as(self) -> str | None: + return self._use_as + + @property + def state(self) -> str: + """The status of the artifact. One of: "PENDING", "COMMITTED", or "DELETED".""" + return self._state.value + + @property + def manifest(self) -> ArtifactManifest: + """The artifact's manifest. + + The manifest lists all of its contents, and can't be changed once the artifact + has been logged. + """ + if self._manifest is None: + query = gql( + """ + query ArtifactManifest( + $entityName: String!, + $projectName: String!, + $name: String! + ) { + project(entityName: $entityName, name: $projectName) { + artifact(name: $name) { + currentManifest { + file { + directUrl + } + } + } + } + } + """ + ) + assert self._client is not None + response = self._client.execute( + query, + variable_values={ + "entityName": self._entity, + "projectName": self._project, + "name": self._name, + }, + ) + attrs = response["project"]["artifact"] + self._manifest = self._load_manifest( + attrs["currentManifest"]["file"]["directUrl"] + ) + return self._manifest + + @property + def digest(self) -> str: + """The logical digest of the artifact. + + The digest is the checksum of the artifact's contents. If an artifact has the + same digest as the current `latest` version, then `log_artifact` is a no-op. + """ + return self.manifest.digest() + + @property + def size(self) -> int: + """The total size of the artifact in bytes. + + Includes any references tracked by this artifact. + """ + total_size: int = 0 + for entry in self.manifest.entries.values(): + if entry.size is not None: + total_size += entry.size + return total_size + + @property + @ensure_logged + def commit_hash(self) -> str: + """The hash returned when this artifact was committed.""" + assert self._commit_hash is not None + return self._commit_hash + + @property + @ensure_logged + def file_count(self) -> int: + """The number of files (including references).""" + assert self._file_count is not None + return self._file_count + + @property + @ensure_logged + def created_at(self) -> str: + """Timestamp when the artifact was created.""" + assert self._created_at is not None + return self._created_at + + @property + @ensure_logged + def updated_at(self) -> str: + """The time when the artifact was last updated.""" + assert self._created_at is not None + return self._updated_at or self._created_at + + # State management. + + def finalize(self) -> None: + """Finalize the artifact version. + + You cannot modify an artifact version once it is finalized because the artifact + is logged as a specific artifact version. Create a new artifact version + to log more data to an artifact. An artifact is automatically finalized + when you log the artifact with `log_artifact`. + """ + self._final = True + + def is_draft(self) -> bool: + """Check if artifact is not saved. + + Returns: Boolean. `False` if artifact is saved. `True` if artifact is not saved. + """ + return self._state == ArtifactState.PENDING + + def _is_draft_save_started(self) -> bool: + return self._save_future is not None + + def save( + self, + project: str | None = None, + settings: wandb.Settings | None = None, + ) -> None: + """Persist any changes made to the artifact. + + If currently in a run, that run will log this artifact. If not currently in a + run, a run of type "auto" is created to track this artifact. + + Args: + project: A project to use for the artifact in the case that a run is not + already in context. + settings: A settings object to use when initializing an automatic run. Most + commonly used in testing harness. + """ + if self._state != ArtifactState.PENDING: + return self._update() + + if self._incremental: + with telemetry.context() as tel: + tel.feature.artifact_incremental = True + + if wandb.run is None: + if settings is None: + settings = wandb.Settings(silent="true") + with wandb.init( # type: ignore + entity=self._source_entity, + project=project or self._source_project, + job_type="auto", + settings=settings, + ) as run: + # redoing this here because in this branch we know we didn't + # have the run at the beginning of the method + if self._incremental: + with telemetry.context(run=run) as tel: + tel.feature.artifact_incremental = True + run.log_artifact(self) + else: + wandb.run.log_artifact(self) + + def _set_save_future( + self, save_future: MessageFuture, client: RetryingClient + ) -> None: + self._save_future = save_future + self._client = client + + def wait(self, timeout: int | None = None) -> Artifact: + """If needed, wait for this artifact to finish logging. + + Args: + timeout: The time, in seconds, to wait. + + Returns: + An `Artifact` object. + """ + if self.is_draft(): + if self._save_future is None: + raise ArtifactNotLoggedError(type(self).wait.__qualname__, self) + result = self._save_future.get(timeout) + if not result: + raise WaitTimeoutError( + "Artifact upload wait timed out, failed to fetch Artifact response" + ) + response = result.response.log_artifact_response + if response.error_message: + raise ValueError(response.error_message) + self._populate_after_save(response.artifact_id) + return self + + def _populate_after_save(self, artifact_id: str) -> None: + query_template = """ + query ArtifactByIDShort($id: ID!) { + artifact(id: $id) { + ...ArtifactFragment + } + } + """ + _gql_artifact_fragment() + + query = gql(query_template) + + assert self._client is not None + response = self._client.execute( + query, + variable_values={"id": artifact_id}, + ) + + try: + attrs = response["artifact"] + except LookupError: + raise ValueError(f"Unable to fetch artifact with id: {artifact_id!r}") + else: + self._assign_attrs(attrs) + + @normalize_exceptions + def _update(self) -> None: + """Persists artifact changes to the wandb backend.""" + aliases = None + introspect_query = gql( + """ + query ProbeServerAddAliasesInput { + AddAliasesInputInfoType: __type(name: "AddAliasesInput") { + name + inputFields { + name + } + } + } + """ + ) + assert self._client is not None + response = self._client.execute(introspect_query) + if response.get("AddAliasesInputInfoType"): # wandb backend version >= 0.13.0 + aliases_to_add = set(self._aliases) - set(self._saved_aliases) + aliases_to_delete = set(self._saved_aliases) - set(self._aliases) + if aliases_to_add: + add_mutation = gql( + """ + mutation addAliases( + $artifactID: ID!, + $aliases: [ArtifactCollectionAliasInput!]!, + ) { + addAliases( + input: {artifactID: $artifactID, aliases: $aliases} + ) { + success + } + } + """ + ) + assert self._client is not None + self._client.execute( + add_mutation, + variable_values={ + "artifactID": self.id, + "aliases": [ + { + "entityName": self._entity, + "projectName": self._project, + "artifactCollectionName": self._name.split(":")[0], + "alias": alias, + } + for alias in aliases_to_add + ], + }, + ) + if aliases_to_delete: + delete_mutation = gql( + """ + mutation deleteAliases( + $artifactID: ID!, + $aliases: [ArtifactCollectionAliasInput!]!, + ) { + deleteAliases( + input: {artifactID: $artifactID, aliases: $aliases} + ) { + success + } + } + """ + ) + assert self._client is not None + self._client.execute( + delete_mutation, + variable_values={ + "artifactID": self.id, + "aliases": [ + { + "entityName": self._entity, + "projectName": self._project, + "artifactCollectionName": self._name.split(":")[0], + "alias": alias, + } + for alias in aliases_to_delete + ], + }, + ) + self._saved_aliases = copy(self._aliases) + else: # wandb backend version < 0.13.0 + aliases = [ + { + "artifactCollectionName": self._name.split(":")[0], + "alias": alias, + } + for alias in self._aliases + ] + + mutation_template = """ + mutation updateArtifact( + $artifactID: ID! + $description: String + $metadata: JSONString + _TTL_DURATION_SECONDS_TYPE_ + _TAGS_TO_ADD_TYPE_ + _TAGS_TO_DELETE_TYPE_ + $aliases: [ArtifactAliasInput!] + ) { + updateArtifact( + input: { + artifactID: $artifactID, + description: $description, + metadata: $metadata, + _TTL_DURATION_SECONDS_VALUE_ + _TAGS_TO_ADD_VALUE_ + _TAGS_TO_DELETE_VALUE_ + aliases: $aliases + } + ) { + artifact { + ...ArtifactFragment + } + } + } + """ + _gql_artifact_fragment() + + fields = InternalApi().server_artifact_introspection() + if "ttlIsInherited" in fields: + mutation_template = ( + mutation_template.replace( + "_TTL_DURATION_SECONDS_TYPE_", + "$ttlDurationSeconds: Int64", + ) + .replace( + "_TTL_DURATION_SECONDS_VALUE_", + "ttlDurationSeconds: $ttlDurationSeconds", + ) + .replace( + "_TTL_DURATION_SECONDS_FIELDS_", + "ttlDurationSeconds ttlIsInherited", + ) + ) + else: + if self._ttl_changed: + termwarn( + "Server not compatible with setting Artifact TTLs, please upgrade the server to use Artifact TTL" + ) + mutation_template = ( + mutation_template.replace("_TTL_DURATION_SECONDS_TYPE_", "") + .replace("_TTL_DURATION_SECONDS_VALUE_", "") + .replace("_TTL_DURATION_SECONDS_FIELDS_", "") + ) + + tags_to_add = validate_tags(set(self._tags) - set(self._saved_tags)) + tags_to_delete = validate_tags(set(self._saved_tags) - set(self._tags)) + if "tags" in fields: + mutation_template = ( + mutation_template.replace( + "_TAGS_TO_ADD_TYPE_", "$tagsToAdd: [TagInput!]" + ) + .replace("_TAGS_TO_DELETE_TYPE_", "$tagsToDelete: [TagInput!]") + .replace("_TAGS_TO_ADD_VALUE_", "tagsToAdd: $tagsToAdd") + .replace("_TAGS_TO_DELETE_VALUE_", "tagsToDelete: $tagsToDelete") + ) + else: + if tags_to_add or tags_to_delete: + termwarn( + "Server not compatible with Artifact tags. " + "To use Artifact tags, please upgrade the server to v0.85 or higher." + ) + mutation_template = ( + mutation_template.replace("_TAGS_TO_ADD_TYPE_", "") + .replace("_TAGS_TO_DELETE_TYPE_", "") + .replace("_TAGS_TO_ADD_VALUE_", "") + .replace("_TAGS_TO_DELETE_VALUE_", "") + ) + + mutation = gql(mutation_template) + assert self._client is not None + + ttl_duration_input = self._ttl_duration_seconds_to_gql() + response = self._client.execute( + mutation, + variable_values={ + "artifactID": self.id, + "description": self.description, + "metadata": util.json_dumps_safer(self.metadata), + "ttlDurationSeconds": ttl_duration_input, + "aliases": aliases, + "tagsToAdd": [{"tagName": tag_name} for tag_name in tags_to_add], + "tagsToDelete": [{"tagName": tag_name} for tag_name in tags_to_delete], + }, + ) + attrs = response["updateArtifact"]["artifact"] + self._assign_attrs(attrs) + + self._ttl_changed = False # Reset after updating artifact + + # Adding, removing, getting entries. + + def __getitem__(self, name: str) -> WBValue | None: + """Get the WBValue object located at the artifact relative `name`. + + Args: + name: The artifact relative name to get. + + Returns: + W&B object that can be logged with `wandb.log()` and visualized in the W&B UI. + + Raises: + ArtifactNotLoggedError: If the artifact isn't logged or the run is offline. + """ + return self.get(name) + + def __setitem__(self, name: str, item: WBValue) -> ArtifactManifestEntry: + """Add `item` to the artifact at path `name`. + + Args: + name: The path within the artifact to add the object. + item: The object to add. + + Returns: + The added manifest entry + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + """ + return self.add(item, name) + + @contextlib.contextmanager + @ensure_not_finalized + def new_file( + self, name: str, mode: str = "x", encoding: str | None = None + ) -> Iterator[IO]: + """Open a new temporary file and add it to the artifact. + + Args: + name: The name of the new file to add to the artifact. + mode: The file access mode to use to open the new file. + encoding: The encoding used to open the new file. + + Returns: + A new file object that can be written to. Upon closing, the file will be + automatically added to the artifact. + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + """ + overwrite: bool = "x" not in mode + + if self._tmp_dir is None: + self._tmp_dir = tempfile.TemporaryDirectory() + path = os.path.join(self._tmp_dir.name, name.lstrip("/")) + + filesystem.mkdir_exists_ok(os.path.dirname(path)) + try: + with util.fsync_open(path, mode, encoding) as f: + yield f + except FileExistsError: + raise ValueError(f"File with name {name!r} already exists at {path!r}") + except UnicodeEncodeError as e: + termerror( + f"Failed to open the provided file ({type(e).__name__}: {e}). Please " + f"provide the proper encoding." + ) + raise e + + self.add_file( + path, name=name, policy="immutable", skip_cache=True, overwrite=overwrite + ) + + @ensure_not_finalized + def add_file( + self, + local_path: str, + name: str | None = None, + is_tmp: bool | None = False, + skip_cache: bool | None = False, + policy: Literal["mutable", "immutable"] | None = "mutable", + overwrite: bool = False, + ) -> ArtifactManifestEntry: + """Add a local file to the artifact. + + Args: + local_path: The path to the file being added. + name: The path within the artifact to use for the file being added. Defaults + to the basename of the file. + is_tmp: If true, then the file is renamed deterministically to avoid + collisions. + skip_cache: If `True`, W&B will not copy files to the cache after uploading. + policy: By default, set to "mutable". If set to "mutable", create a temporary copy of the + file to prevent corruption during upload. If set to "immutable", disable + protection and rely on the user not to delete or change the file. + overwrite: If `True`, overwrite the file if it already exists. + + Returns: + The added manifest entry. + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + ValueError: Policy must be "mutable" or "immutable" + """ + if not os.path.isfile(local_path): + raise ValueError(f"Path is not a file: {local_path!r}") + + name = LogicalPath(name or os.path.basename(local_path)) + digest = md5_file_b64(local_path) + + if is_tmp: + file_path, file_name = os.path.split(name) + file_name_parts = file_name.split(".") + file_name_parts[0] = b64_to_hex_id(digest)[:20] + name = os.path.join(file_path, ".".join(file_name_parts)) + + return self._add_local_file( + name, + local_path, + digest=digest, + skip_cache=skip_cache, + policy=policy, + overwrite=overwrite, + ) + + @ensure_not_finalized + def add_dir( + self, + local_path: str, + name: str | None = None, + skip_cache: bool | None = False, + policy: Literal["mutable", "immutable"] | None = "mutable", + ) -> None: + """Add a local directory to the artifact. + + Args: + local_path: The path of the local directory. + name: The subdirectory name within an artifact. The name you specify appears + in the W&B App UI nested by artifact's `type`. + Defaults to the root of the artifact. + skip_cache: If set to `True`, W&B will not copy/move files to the cache while uploading + policy: "mutable" | "immutable". By default, "mutable" + "mutable": Create a temporary copy of the file to prevent corruption during upload. + "immutable": Disable protection, rely on the user not to delete or change the file. + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + ValueError: Policy must be "mutable" or "immutable" + """ + if not os.path.isdir(local_path): + raise ValueError("Path is not a directory: {}".format(local_path)) + + termlog( + "Adding directory to artifact ({})... ".format( + os.path.join(".", os.path.normpath(local_path)) + ), + newline=False, + ) + start_time = time.time() + + paths = [] + for dirpath, _, filenames in os.walk(local_path, followlinks=True): + for fname in filenames: + physical_path = os.path.join(dirpath, fname) + logical_path = os.path.relpath(physical_path, start=local_path) + if name is not None: + logical_path = os.path.join(name, logical_path) + paths.append((logical_path, physical_path)) + + def add_manifest_file(log_phy_path: tuple[str, str]) -> None: + logical_path, physical_path = log_phy_path + self._add_local_file( + name=logical_path, + path=physical_path, + skip_cache=skip_cache, + policy=policy, + ) + + num_threads = 8 + pool = multiprocessing.dummy.Pool(num_threads) + pool.map(add_manifest_file, paths) + pool.close() + pool.join() + + termlog("Done. %.1fs" % (time.time() - start_time), prefix=False) + + @ensure_not_finalized + def add_reference( + self, + uri: ArtifactManifestEntry | str, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + """Add a reference denoted by a URI to the artifact. + + Unlike files or directories that you add to an artifact, references are not + uploaded to W&B. For more information, + see [Track external files](https://docs.wandb.ai/guides/artifacts/track-external-files). + + By default, the following schemes are supported: + + - http(s): The size and digest of the file will be inferred by the + `Content-Length` and the `ETag` response headers returned by the server. + - s3: The checksum and size are pulled from the object metadata. If bucket + versioning is enabled, then the version ID is also tracked. + - gs: The checksum and size are pulled from the object metadata. If bucket + versioning is enabled, then the version ID is also tracked. + - https, domain matching `*.blob.core.windows.net` (Azure): The checksum and size + are be pulled from the blob metadata. If storage account versioning is + enabled, then the version ID is also tracked. + - file: The checksum and size are pulled from the file system. This scheme + is useful if you have an NFS share or other externally mounted volume + containing files you wish to track but not necessarily upload. + + For any other scheme, the digest is just a hash of the URI and the size is left + blank. + + Args: + uri: The URI path of the reference to add. The URI path can be an object + returned from `Artifact.get_entry` to store a reference to another + artifact's entry. + name: The path within the artifact to place the contents of this reference. + checksum: Whether or not to checksum the resource(s) located at the + reference URI. Checksumming is strongly recommended as it enables + automatic integrity validation. Disabling checksumming will speed up + artifact creation but reference directories will not iterated through so the + objects in the directory will not be saved to the artifact. We recommend + setting `checksum=False` when adding reference objects, in which case + a new version will only be created if the reference URI changes. + max_objects: The maximum number of objects to consider when adding a + reference that points to directory or bucket store prefix. By default, + the maximum number of objects allowed for Amazon S3, + GCS, Azure, and local files is 10,000,000. Other URI schemas do not have a maximum. + + Returns: + The added manifest entries. + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + """ + if name is not None: + name = LogicalPath(name) + + # This is a bit of a hack, we want to check if the uri is a of the type + # ArtifactManifestEntry. If so, then recover the reference URL. + if isinstance(uri, ArtifactManifestEntry): + uri_str = uri.ref_url() + elif isinstance(uri, str): + uri_str = uri + url = urlparse(str(uri_str)) + if not url.scheme: + raise ValueError( + "References must be URIs. To reference a local file, use file://" + ) + + manifest_entries = self._storage_policy.store_reference( + self, + URIStr(uri_str), + name=name, + checksum=checksum, + max_objects=max_objects, + ) + for entry in manifest_entries: + self.manifest.add_entry(entry) + + return manifest_entries + + @ensure_not_finalized + def add( + self, obj: WBValue, name: StrPath, overwrite: bool = False + ) -> ArtifactManifestEntry: + """Add wandb.WBValue `obj` to the artifact. + + Args: + obj: The object to add. Currently support one of Bokeh, JoinedTable, + PartitionedTable, Table, Classes, ImageMask, BoundingBoxes2D, Audio, + Image, Video, Html, Object3D + name: The path within the artifact to add the object. + overwrite: If True, overwrite existing objects with the same file path (if applicable). + + Returns: + The added manifest entry + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + """ + name = LogicalPath(name) + + # This is a "hack" to automatically rename tables added to + # the wandb /media/tables directory to their sha-based name. + # TODO: figure out a more appropriate convention. + is_tmp_name = name.startswith("media/tables") + + # Validate that the object is one of the correct wandb.Media types + # TODO: move this to checking subclass of wandb.Media once all are + # generally supported + allowed_types = ( + data_types.Bokeh, + data_types.JoinedTable, + data_types.PartitionedTable, + data_types.Table, + data_types.Classes, + data_types.ImageMask, + data_types.BoundingBoxes2D, + data_types.Audio, + data_types.Image, + data_types.Video, + data_types.Html, + data_types.Object3D, + data_types.Molecule, + data_types._SavedModel, + ) + if not isinstance(obj, allowed_types): + raise ValueError( + f"Found object of type {obj.__class__}, expected one of: {allowed_types}" + ) + + obj_id = id(obj) + if obj_id in self._added_objs: + return self._added_objs[obj_id][1] + + # If the object is coming from another artifact, save it as a reference + ref_path = obj._get_artifact_entry_ref_url() + if ref_path is not None: + return self.add_reference(ref_path, type(obj).with_suffix(name))[0] + + val = obj.to_json(self) + name = obj.with_suffix(name) + entry = self.manifest.get_entry_by_path(name) + if (not overwrite) and (entry is not None): + return entry + + if is_tmp_name: + file_path = os.path.join(self._TMP_DIR.name, str(id(self)), name) + folder_path, _ = os.path.split(file_path) + os.makedirs(folder_path, exist_ok=True) + with open(file_path, "w", encoding="utf-8") as tmp_f: + json.dump(val, tmp_f, sort_keys=True) + else: + filemode = "w" if overwrite else "x" + with self.new_file(name, mode=filemode, encoding="utf-8") as f: + json.dump(val, f, sort_keys=True) + file_path = f.name + + # Note, we add the file from our temp directory. + # It will be added again later on finalize, but succeed since + # the checksum should match + entry = self.add_file(file_path, name, is_tmp_name) + # We store a reference to the obj so that its id doesn't get reused. + self._added_objs[obj_id] = (obj, entry) + if obj._artifact_target is None: + obj._set_artifact_target(self, entry.path) + + if is_tmp_name: + with contextlib.suppress(FileNotFoundError): + os.remove(file_path) + + return entry + + def _add_local_file( + self, + name: StrPath, + path: StrPath, + digest: B64MD5 | None = None, + skip_cache: bool | None = False, + policy: Literal["mutable", "immutable"] | None = "mutable", + overwrite: bool = False, + ) -> ArtifactManifestEntry: + policy = policy or "mutable" + if policy not in ["mutable", "immutable"]: + raise ValueError( + f"Invalid policy {policy!r}. Policy may only be `mutable` or `immutable`." + ) + upload_path = path + if policy == "mutable": + with tempfile.NamedTemporaryFile(dir=get_staging_dir(), delete=False) as f: + staging_path = f.name + shutil.copyfile(path, staging_path) + # Set as read-only to prevent changes to the file during upload process + os.chmod(staging_path, stat.S_IRUSR) + upload_path = staging_path + + entry = ArtifactManifestEntry( + path=name, + digest=digest or md5_file_b64(upload_path), + size=os.path.getsize(upload_path), + local_path=upload_path, + skip_cache=skip_cache, + ) + self.manifest.add_entry(entry, overwrite=overwrite) + self._added_local_paths[os.fspath(path)] = entry + return entry + + @ensure_not_finalized + def remove(self, item: StrPath | ArtifactManifestEntry) -> None: + """Remove an item from the artifact. + + Args: + item: The item to remove. Can be a specific manifest entry or the name of an + artifact-relative path. If the item matches a directory all items in + that directory will be removed. + + Raises: + ArtifactFinalizedError: You cannot make changes to the current artifact + version because it is finalized. Log a new artifact version instead. + FileNotFoundError: If the item isn't found in the artifact. + """ + if isinstance(item, ArtifactManifestEntry): + self.manifest.remove_entry(item) + return + + path = str(PurePosixPath(item)) + entry = self.manifest.get_entry_by_path(path) + if entry: + self.manifest.remove_entry(entry) + return + + entries = self.manifest.get_entries_in_directory(path) + if not entries: + raise FileNotFoundError(f"No such file or directory: {path}") + for entry in entries: + self.manifest.remove_entry(entry) + + def get_path(self, name: StrPath) -> ArtifactManifestEntry: + """Deprecated. Use `get_entry(name)`.""" + deprecate( + field_name=Deprecated.artifact__get_path, + warning_message="Artifact.get_path(name) is deprecated, use Artifact.get_entry(name) instead.", + ) + return self.get_entry(name) + + @ensure_logged + def get_entry(self, name: StrPath) -> ArtifactManifestEntry: + """Get the entry with the given name. + + Args: + name: The artifact relative name to get + + Returns: + A `W&B` object. + + Raises: + ArtifactNotLoggedError: if the artifact isn't logged or the run is offline. + KeyError: if the artifact doesn't contain an entry with the given name. + """ + name = LogicalPath(name) + entry = self.manifest.entries.get(name) or self._get_obj_entry(name)[0] + if entry is None: + raise KeyError("Path not contained in artifact: {}".format(name)) + entry._parent_artifact = self + return entry + + @ensure_logged + def get(self, name: str) -> WBValue | None: + """Get the WBValue object located at the artifact relative `name`. + + Args: + name: The artifact relative name to retrieve. + + Returns: + W&B object that can be logged with `wandb.log()` and visualized in the W&B UI. + + Raises: + ArtifactNotLoggedError: if the artifact isn't logged or the run is offline + """ + entry, wb_class = self._get_obj_entry(name) + if entry is None or wb_class is None: + return None + + # If the entry is a reference from another artifact, then get it directly from + # that artifact. + referenced_id = entry._referenced_artifact_id() + if referenced_id: + assert self._client is not None + artifact = self._from_id(referenced_id, client=self._client) + assert artifact is not None + return artifact.get(util.uri_from_path(entry.ref)) + + # Special case for wandb.Table. This is intended to be a short term + # optimization. Since tables are likely to download many other assets in + # artifact(s), we eagerly download the artifact using the parallelized + # `artifact.download`. In the future, we should refactor the deserialization + # pattern such that this special case is not needed. + if wb_class == wandb.Table: + self.download() + + # Get the ArtifactManifestEntry + item = self.get_entry(entry.path) + item_path = item.download() + + # Load the object from the JSON blob + result = None + json_obj = {} + with open(item_path) as file: + json_obj = json.load(file) + result = wb_class.from_json(json_obj, self) + result._set_artifact_source(self, name) + return result + + def get_added_local_path_name(self, local_path: str) -> str | None: + """Get the artifact relative name of a file added by a local filesystem path. + + Args: + local_path: The local path to resolve into an artifact relative name. + + Returns: + The artifact relative name. + """ + entry = self._added_local_paths.get(local_path, None) + if entry is None: + return None + return entry.path + + def _get_obj_entry( + self, name: str + ) -> tuple[ArtifactManifestEntry, Type[WBValue]] | tuple[None, None]: # noqa: UP006 # `type` shadows `Artifact.type` + """Return an object entry by name, handling any type suffixes. + + When objects are added with `.add(obj, name)`, the name is typically changed to + include the suffix of the object type when serializing to JSON. So we need to be + able to resolve a name, without tasking the user with appending .THING.json. + This method returns an entry if it exists by a suffixed name. + + Args: + name: name used when adding + """ + for wb_class in WBValue.type_mapping().values(): + wandb_file_name = wb_class.with_suffix(name) + entry = self.manifest.entries.get(wandb_file_name) + if entry is not None: + return entry, wb_class + return None, None + + # Downloading. + + @ensure_logged + def download( + self, + root: StrPath | None = None, + allow_missing_references: bool = False, + skip_cache: bool | None = None, + path_prefix: StrPath | None = None, + ) -> FilePathStr: + """Download the contents of the artifact to the specified root directory. + + Existing files located within `root` are not modified. Explicitly delete `root` + before you call `download` if you want the contents of `root` to exactly match + the artifact. + + Args: + root: The directory W&B stores the artifact's files. + allow_missing_references: If set to `True`, any invalid reference paths + will be ignored while downloading referenced files. + skip_cache: If set to `True`, the artifact cache will be skipped when + downloading and W&B will download each file into the default root or + specified download directory. + path_prefix: If specified, only files with a path that starts with the given + prefix will be downloaded. Uses unix format (forward slashes). + + Returns: + The path to the downloaded contents. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + RuntimeError: If the artifact is attempted to be downloaded in offline mode. + """ + root = FilePathStr(str(root or self._default_root())) + self._add_download_root(root) + + # TODO: we need a better way to check for offline mode across the app, as this is an anti-pattern + if env.is_offline() or util._is_offline(): + raise RuntimeError("Cannot download artifacts in offline mode.") + + # TODO: download artifacts using core when implemented + # if is_require_core(): + # return self._download_using_core( + # root=root, + # allow_missing_references=allow_missing_references, + # skip_cache=bool(skip_cache), + # path_prefix=path_prefix, + # ) + return self._download( + root=root, + allow_missing_references=allow_missing_references, + skip_cache=skip_cache, + path_prefix=path_prefix, + ) + + def _download_using_core( + self, + root: str, + allow_missing_references: bool = False, + skip_cache: bool = False, + path_prefix: StrPath | None = None, + ) -> FilePathStr: + import pathlib + + from wandb.sdk.backend.backend import Backend + + if wandb.run is None: + wl = wandb.setup() + + stream_id = generate_id() + + settings = wl.settings.to_proto() + # TODO: remove this + tmp_dir = pathlib.Path(tempfile.mkdtemp()) + + settings.sync_dir.value = str(tmp_dir) + settings.sync_file.value = str(tmp_dir / f"{stream_id}.wandb") + settings.files_dir.value = str(tmp_dir / "files") + settings.run_id.value = stream_id + + service = wl.ensure_service() + service.inform_init(settings=settings, run_id=stream_id) + + mailbox = Mailbox() + backend = Backend( + settings=wl.settings, + service=service, + mailbox=mailbox, + ) + backend.ensure_launched() + + assert backend.interface + backend.interface._stream_id = stream_id # type: ignore + + mailbox.enable_keepalive() + else: + assert wandb.run._backend + backend = wandb.run._backend + + assert backend.interface + handle = backend.interface.deliver_download_artifact( + self.id, # type: ignore + root, + allow_missing_references, + skip_cache, + path_prefix, # type: ignore + ) + # TODO: Start the download process in the user process too, to handle reference downloads + self._download( + root=root, + allow_missing_references=allow_missing_references, + skip_cache=skip_cache, + path_prefix=path_prefix, + ) + result = handle.wait(timeout=-1) + + if result is None: + handle.abandon() + assert result is not None + response = result.response.download_artifact_response + if response.error_message: + raise ValueError(f"Error downloading artifact: {response.error_message}") + + return FilePathStr(root) + + def _download( + self, + root: str, + allow_missing_references: bool = False, + skip_cache: bool | None = None, + path_prefix: StrPath | None = None, + ) -> FilePathStr: + nfiles = len(self.manifest.entries) + size = sum(e.size or 0 for e in self.manifest.entries.values()) + log = False + if nfiles > 5000 or size > 50 * 1024 * 1024: + log = True + termlog( + "Downloading large artifact {}, {:.2f}MB. {} files... ".format( + self.name, size / (1024 * 1024), nfiles + ), + ) + start_time = datetime.now() + download_logger = ArtifactDownloadLogger(nfiles=nfiles) + + def _download_entry( + entry: ArtifactManifestEntry, + api_key: str | None, + cookies: dict | None, + headers: dict | None, + ) -> None: + _thread_local_api_settings.api_key = api_key + _thread_local_api_settings.cookies = cookies + _thread_local_api_settings.headers = headers + + try: + entry.download(root, skip_cache=skip_cache) + except FileNotFoundError as e: + if allow_missing_references: + wandb.termwarn(str(e)) + return + raise + except _GCSIsADirectoryError as e: + logger.debug(str(e)) + return + download_logger.notify_downloaded() + + download_entry = partial( + _download_entry, + api_key=_thread_local_api_settings.api_key, + cookies=_thread_local_api_settings.cookies, + headers=_thread_local_api_settings.headers, + ) + + with concurrent.futures.ThreadPoolExecutor(64) as executor: + active_futures = set() + has_next_page = True + cursor = None + while has_next_page: + fetch_url_batch_size = env.get_artifact_fetch_file_url_batch_size() + attrs = self._fetch_file_urls(cursor, fetch_url_batch_size) + has_next_page = attrs["pageInfo"]["hasNextPage"] + cursor = attrs["pageInfo"]["endCursor"] + for edge in attrs["edges"]: + entry = self.get_entry(edge["node"]["name"]) + # TODO: uncomment once artifact downloads are supported in core + # if require_core and entry.ref is None: + # # Handled by core + # continue + entry._download_url = edge["node"]["directUrl"] + if (not path_prefix) or entry.path.startswith(str(path_prefix)): + active_futures.add(executor.submit(download_entry, entry)) + # Wait for download threads to catch up. + max_backlog = fetch_url_batch_size + if len(active_futures) > max_backlog: + for future in concurrent.futures.as_completed(active_futures): + future.result() # check for errors + active_futures.remove(future) + if len(active_futures) <= max_backlog: + break + # Check for errors. + for future in concurrent.futures.as_completed(active_futures): + future.result() + + if log: + now = datetime.now() + delta = abs((now - start_time).total_seconds()) + hours = int(delta // 3600) + minutes = int((delta - hours * 3600) // 60) + seconds = delta - hours * 3600 - minutes * 60 + termlog( + f"Done. {hours}:{minutes}:{seconds:.1f}", + prefix=False, + ) + return FilePathStr(root) + + @retry.retriable( + retry_timedelta=timedelta(minutes=3), + retryable_exceptions=(requests.RequestException), + ) + def _fetch_file_urls(self, cursor: str | None, per_page: int | None = 5000) -> Any: + query = gql( + """ + query ArtifactFileURLs($id: ID!, $cursor: String, $perPage: Int) { + artifact(id: $id) { + files(after: $cursor, first: $perPage) { + pageInfo { + hasNextPage + endCursor + } + edges { + node { + name + directUrl + } + } + } + } + } + """ + ) + assert self._client is not None + response = self._client.execute( + query, + variable_values={"id": self.id, "cursor": cursor, "perPage": per_page}, + timeout=60, + ) + return response["artifact"]["files"] + + @ensure_logged + def checkout(self, root: str | None = None) -> str: + """Replace the specified root directory with the contents of the artifact. + + WARNING: This will delete all files in `root` that are not included in the + artifact. + + Args: + root: The directory to replace with this artifact's files. + + Returns: + The path of the checked out contents. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + root = root or self._default_root(include_version=False) + + for dirpath, _, files in os.walk(root): + for file in files: + full_path = os.path.join(dirpath, file) + artifact_path = os.path.relpath(full_path, start=root) + try: + self.get_entry(artifact_path) + except KeyError: + # File is not part of the artifact, remove it. + os.remove(full_path) + + return self.download(root=root) + + @ensure_logged + def verify(self, root: str | None = None) -> None: + """Verify that the contents of an artifact match the manifest. + + All files in the directory are checksummed and the checksums are then + cross-referenced against the artifact's manifest. References are not verified. + + Args: + root: The directory to verify. If None artifact will be downloaded to + './artifacts/self.name/' + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + ValueError: If the verification fails. + """ + root = root or self._default_root() + + for dirpath, _, files in os.walk(root): + for file in files: + full_path = os.path.join(dirpath, file) + artifact_path = os.path.relpath(full_path, start=root) + try: + self.get_entry(artifact_path) + except KeyError: + raise ValueError( + "Found file {} which is not a member of artifact {}".format( + full_path, self.name + ) + ) + + ref_count = 0 + for entry in self.manifest.entries.values(): + if entry.ref is None: + if md5_file_b64(os.path.join(root, entry.path)) != entry.digest: + raise ValueError("Digest mismatch for file: {}".format(entry.path)) + else: + ref_count += 1 + if ref_count > 0: + termwarn(f"skipped verification of {ref_count} refs") + + @ensure_logged + def file(self, root: str | None = None) -> StrPath: + """Download a single file artifact to the directory you specify with `root`. + + Args: + root: The root directory to store the file. Defaults to + './artifacts/self.name/'. + + Returns: + The full path of the downloaded file. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + ValueError: If the artifact contains more than one file. + """ + if root is None: + root = os.path.join(".", "artifacts", self.name) + + if len(self.manifest.entries) > 1: + raise ValueError( + "This artifact contains more than one file, call `.download()` to get " + 'all files or call .get_entry("filename").download()' + ) + + return self.get_entry(list(self.manifest.entries)[0]).download(root) + + @ensure_logged + def files( + self, names: list[str] | None = None, per_page: int = 50 + ) -> ArtifactFiles: + """Iterate over all files stored in this artifact. + + Args: + names: The filename paths relative to the root of the artifact you wish to + list. + per_page: The number of files to return per request. + + Returns: + An iterator containing `File` objects. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + return ArtifactFiles(self._client, self, names, per_page) + + def _default_root(self, include_version: bool = True) -> FilePathStr: + name = self.source_name if include_version else self.source_name.split(":")[0] + root = os.path.join(env.get_artifact_dir(), name) + # In case we're on a system where the artifact dir has a name corresponding to + # an unexpected filesystem, we'll check for alternate roots. If one exists we'll + # use that, otherwise we'll fall back to the system-preferred path. + path = filesystem.check_exists(root) or filesystem.system_preferred_path(root) + return FilePathStr(str(path)) + + def _add_download_root(self, dir_path: str) -> None: + self._download_roots.add(os.path.abspath(dir_path)) + + def _local_path_to_name(self, file_path: str) -> str | None: + """Convert a local file path to a path entry in the artifact.""" + abs_file_path = os.path.abspath(file_path) + abs_file_parts = abs_file_path.split(os.sep) + for i in range(len(abs_file_parts) + 1): + if os.path.join(os.sep, *abs_file_parts[:i]) in self._download_roots: + return os.path.join(*abs_file_parts[i:]) + return None + + # Others. + + @ensure_logged + def delete(self, delete_aliases: bool = False) -> None: + """Delete an artifact and its files. + + If called on a linked artifact (i.e. a member of a portfolio collection): only the link is deleted, and the + source artifact is unaffected. + + Args: + delete_aliases: If set to `True`, deletes all aliases associated with the artifact. + Otherwise, this raises an exception if the artifact has existing + aliases. + This parameter is ignored if the artifact is linked (i.e. a member of a portfolio collection). + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + if self.collection.is_sequence(): + self._delete(delete_aliases) + else: + self._unlink() + + @normalize_exceptions + def _delete(self, delete_aliases: bool = False) -> None: + mutation = gql( + """ + mutation DeleteArtifact($artifactID: ID!, $deleteAliases: Boolean) { + deleteArtifact(input: { + artifactID: $artifactID + deleteAliases: $deleteAliases + }) { + artifact { + id + } + } + } + """ + ) + assert self._client is not None + self._client.execute( + mutation, + variable_values={ + "artifactID": self.id, + "deleteAliases": delete_aliases, + }, + ) + + @normalize_exceptions + def link(self, target_path: str, aliases: list[str] | None = None) -> None: + """Link this artifact to a portfolio (a promoted collection of artifacts). + + Args: + target_path: The path to the portfolio inside a project. + The target path must adhere to one of the following + schemas `{portfolio}`, `{project}/{portfolio}` or + `{entity}/{project}/{portfolio}`. + To link the artifact to the Model Registry, rather than to a generic + portfolio inside a project, set `target_path` to the following + schema `{"model-registry"}/{Registered Model Name}` or + `{entity}/{"model-registry"}/{Registered Model Name}`. + aliases: A list of strings that uniquely identifies the artifact inside the + specified portfolio. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + if wandb.run is None: + with wandb.init( # type: ignore + entity=self._source_entity, + project=self._source_project, + job_type="auto", + settings=wandb.Settings(silent="true"), + ) as run: + run.link_artifact(self, target_path, aliases) + else: + wandb.run.link_artifact(self, target_path, aliases) + + @ensure_logged + def unlink(self) -> None: + """Unlink this artifact if it is currently a member of a portfolio (a promoted collection of artifacts). + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + ValueError: If the artifact is not linked, i.e. it is not a member of a portfolio collection. + """ + # Fail early if this isn't a linked artifact to begin with + if self.collection.is_sequence(): + raise ValueError( + f"Artifact {self.qualified_name!r} is not a linked artifact and cannot be unlinked. " + f"To delete it, use {self.delete.__qualname__!r} instead." + ) + + self._unlink() + + @normalize_exceptions + def _unlink(self) -> None: + mutation = gql( + """ + mutation UnlinkArtifact($artifactID: ID!, $artifactPortfolioID: ID!) { + unlinkArtifact( + input: { artifactID: $artifactID, artifactPortfolioID: $artifactPortfolioID } + ) { + artifactID + success + clientMutationId + } + } + """ + ) + assert self._client is not None + self._client.execute( + mutation, + variable_values={ + "artifactID": self.id, + "artifactPortfolioID": self.collection.id, + }, + ) + + @ensure_logged + def used_by(self) -> list[Run]: + """Get a list of the runs that have used this artifact. + + Returns: + A list of `Run` objects. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + query = gql( + """ + query ArtifactUsedBy( + $id: ID!, + ) { + artifact(id: $id) { + usedBy { + edges { + node { + name + project { + name + entityName + } + } + } + } + } + } + """ + ) + assert self._client is not None + response = self._client.execute( + query, + variable_values={"id": self.id}, + ) + return [ + Run( + self._client, + edge["node"]["project"]["entityName"], + edge["node"]["project"]["name"], + edge["node"]["name"], + ) + for edge in response.get("artifact", {}).get("usedBy", {}).get("edges", []) + ] + + @ensure_logged + def logged_by(self) -> Run | None: + """Get the W&B run that originally logged the artifact. + + Returns: + The name of the W&B run that originally logged the artifact. + + Raises: + ArtifactNotLoggedError: If the artifact is not logged. + """ + query = gql( + """ + query ArtifactCreatedBy( + $id: ID! + ) { + artifact(id: $id) { + createdBy { + ... on Run { + name + project { + name + entityName + } + } + } + } + } + """ + ) + assert self._client is not None + response = self._client.execute( + query, + variable_values={"id": self.id}, + ) + creator = response.get("artifact", {}).get("createdBy", {}) + if creator.get("name") is None: + return None + return Run( + self._client, + creator["project"]["entityName"], + creator["project"]["name"], + creator["name"], + ) + + @ensure_logged + def json_encode(self) -> dict[str, Any]: + """Returns the artifact encoded to the JSON format. + + Returns: + A `dict` with `string` keys representing attributes of the artifact. + """ + return util.artifact_to_json(self) + + @staticmethod + def _expected_type( + entity_name: str, project_name: str, name: str, client: RetryingClient + ) -> str | None: + """Returns the expected type for a given artifact name and project.""" + query = gql( + """ + query ArtifactType( + $entityName: String, + $projectName: String, + $name: String! + ) { + project(name: $projectName, entityName: $entityName) { + artifact(name: $name) { + artifactType { + name + } + } + } + } + """ + ) + if ":" not in name: + name += ":latest" + response = client.execute( + query, + variable_values={ + "entityName": entity_name, + "projectName": project_name, + "name": name, + }, + ) + return ( + ((response.get("project") or {}).get("artifact") or {}).get("artifactType") + or {} + ).get("name") + + @staticmethod + def _normalize_metadata(metadata: dict[str, Any] | None) -> dict[str, Any]: + if metadata is None: + return {} + if not isinstance(metadata, dict): + raise TypeError(f"metadata must be dict, not {type(metadata)}") + return cast( + Dict[str, Any], json.loads(json.dumps(util.json_friendly_val(metadata))) + ) + + def _load_manifest(self, url: str) -> ArtifactManifest: + with requests.get(url) as response: + response.raise_for_status() + return ArtifactManifest.from_manifest_json(response.json()) + + def _ttl_duration_seconds_to_gql(self) -> int | None: + # Set artifact ttl value to ttl_duration_seconds if the user set a value + # otherwise use ttl_status to indicate the backend INHERIT(-1) or DISABLED(-2) when the TTL is None + # When ttl_change = None its a no op since nothing changed + INHERIT = -1 # noqa: N806 + DISABLED = -2 # noqa: N806 + + if not self._ttl_changed: + return None + if self._ttl_is_inherited: + return INHERIT + return self._ttl_duration_seconds or DISABLED + + +def _ttl_duration_seconds_from_gql(gql_ttl_duration_seconds: int | None) -> int | None: + # If gql_ttl_duration_seconds is not positive, its indicating that TTL is DISABLED(-2) + # gql_ttl_duration_seconds only returns None if the server is not compatible with setting Artifact TTLs + if gql_ttl_duration_seconds and gql_ttl_duration_seconds > 0: + return gql_ttl_duration_seconds + return None + + +def _gql_artifact_fragment() -> str: + """Return a GraphQL query fragment with all parseable Artifact attributes.""" + allowed_fields = set(InternalApi().server_artifact_introspection()) + + supports_ttl = "ttlIsInherited" in allowed_fields + supports_tags = "tags" in allowed_fields + + ttl_duration_seconds = "ttlDurationSeconds" if supports_ttl else "" + ttl_is_inherited = "ttlIsInherited" if supports_ttl else "" + + tags = "tags {name}" if supports_tags else "" + + return f""" + fragment ArtifactFragment on Artifact {{ + id + artifactSequence {{ + project {{ + entityName + name + }} + name + }} + versionIndex + artifactType {{ + name + }} + description + metadata + {ttl_duration_seconds} + {ttl_is_inherited} + aliases {{ + artifactCollection {{ + project {{ + entityName + name + }} + name + }} + alias + }} + {tags} + state + currentManifest {{ + file {{ + directUrl + }} + }} + commitHash + fileCount + createdAt + updatedAt + }} + """ + + +class _ArtifactVersionType(WBType): + name = "artifactVersion" + types = [Artifact] + + +TypeRegistry.add(_ArtifactVersionType) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_instance_cache.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_instance_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..e6fd6e9676d048a0a7fc0715bb7ae107ae01e7cc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_instance_cache.py @@ -0,0 +1,17 @@ +"""Recent Artifact storage. + +Artifacts are registered in the cache to ensure they won't be immediately garbage +collected and can be retrieved by their ID. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from wandb.sdk.lib.capped_dict import CappedDict + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact import Artifact + +# There is nothing special about the artifact cache, it's just a global capped dict. +artifact_instance_cache: dict[str, Artifact] = CappedDict(100) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest.py new file mode 100644 index 0000000000000000000000000000000000000000..74df51a7fd697374859f6eec3bed1d9f3b636c61 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest.py @@ -0,0 +1,75 @@ +"""Artifact manifest.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Mapping + +from wandb.sdk.internal.internal_api import Api as InternalApi +from wandb.sdk.lib.hashutil import HexMD5 + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + from wandb.sdk.artifacts.storage_policy import StoragePolicy + + +class ArtifactManifest: + entries: dict[str, ArtifactManifestEntry] + + @classmethod + def from_manifest_json( + cls, manifest_json: dict, api: InternalApi | None = None + ) -> ArtifactManifest: + if "version" not in manifest_json: + raise ValueError("Invalid manifest format. Must contain version field.") + version = manifest_json["version"] + for sub in cls.__subclasses__(): + if sub.version() == version: + return sub.from_manifest_json(manifest_json, api=api) + raise ValueError("Invalid manifest version.") + + @classmethod + def version(cls) -> int: + raise NotImplementedError + + def __init__( + self, + storage_policy: StoragePolicy, + entries: Mapping[str, ArtifactManifestEntry] | None = None, + ) -> None: + self.storage_policy = storage_policy + self.entries = dict(entries) if entries else {} + + def __len__(self) -> int: + return len(self.entries) + + def to_manifest_json(self) -> dict: + raise NotImplementedError + + def digest(self) -> HexMD5: + raise NotImplementedError + + def add_entry(self, entry: ArtifactManifestEntry, overwrite: bool = False) -> None: + path = entry.path + if not overwrite: + prev_entry = self.entries.get(path) + if prev_entry and (entry.digest != prev_entry.digest): + raise ValueError(f"Cannot add the same path twice: {path!r}") + self.entries[path] = entry + + def remove_entry(self, entry: ArtifactManifestEntry) -> None: + try: + del self.entries[entry.path] + except LookupError: + raise FileNotFoundError(f"Cannot remove missing entry: '{entry.path}'") + + def get_entry_by_path(self, path: str) -> ArtifactManifestEntry | None: + return self.entries.get(path) + + def get_entries_in_directory(self, directory: str) -> list[ArtifactManifestEntry]: + return [ + self.entries[entry_key] + for entry_key in self.entries + if entry_key.startswith( + directory + "/" + ) # entries use forward slash even for windows + ] diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest_entry.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest_entry.py new file mode 100644 index 0000000000000000000000000000000000000000..5c5ca53b1cb82f1bf0c00688fb3f1bbd21f4c751 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifest_entry.py @@ -0,0 +1,249 @@ +"""Artifact manifest entry.""" + +from __future__ import annotations + +import json +import logging +import os +from pathlib import Path +from typing import TYPE_CHECKING +from urllib.parse import urlparse + +from wandb.sdk.lib import filesystem +from wandb.sdk.lib.deprecate import Deprecated, deprecate +from wandb.sdk.lib.hashutil import ( + B64MD5, + ETag, + b64_to_hex_id, + hex_to_b64_id, + md5_file_b64, +) +from wandb.sdk.lib.paths import FilePathStr, LogicalPath, StrPath, URIStr + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from typing_extensions import TypedDict + + from wandb.sdk.artifacts.artifact import Artifact + + class ArtifactManifestEntryDict(TypedDict, total=False): + path: str + digest: str + skip_cache: bool + ref: str + birthArtifactID: str + size: int + extra: dict + local_path: str + + +class ArtifactManifestEntry: + """A single entry in an artifact manifest.""" + + path: LogicalPath + digest: B64MD5 | URIStr | FilePathStr | ETag + skip_cache: bool + ref: FilePathStr | URIStr | None + birth_artifact_id: str | None + size: int | None + extra: dict + local_path: str | None + + _parent_artifact: Artifact | None = None + _download_url: str | None = None + + def __init__( + self, + path: StrPath, + digest: B64MD5 | URIStr | FilePathStr | ETag, + skip_cache: bool | None = False, + ref: FilePathStr | URIStr | None = None, + birth_artifact_id: str | None = None, + size: int | None = None, + extra: dict | None = None, + local_path: StrPath | None = None, + ) -> None: + self.path = LogicalPath(path) + self.digest = digest + self.ref = ref + self.birth_artifact_id = birth_artifact_id + self.size = size + self.extra = extra or {} + self.local_path = str(local_path) if local_path else None + if self.local_path and self.size is None: + self.size = Path(self.local_path).stat().st_size + self.skip_cache = skip_cache or False + + def __repr__(self) -> str: + cls = self.__class__.__name__ + ref = f", ref={self.ref!r}" if self.ref is not None else "" + birth_artifact_id = ( + f", birth_artifact_id={self.birth_artifact_id!r}" + if self.birth_artifact_id is not None + else "" + ) + size = f", size={self.size}" if self.size is not None else "" + extra = f", extra={json.dumps(self.extra)}" if self.extra else "" + local_path = f", local_path={self.local_path!r}" if self.local_path else "" + skip_cache = f", skip_cache={self.skip_cache}" + others = ref + birth_artifact_id + size + extra + local_path + skip_cache + return f"{cls}(path={self.path!r}, digest={self.digest!r}{others})" + + def __eq__(self, other: object) -> bool: + """Strict equality, comparing all public fields. + + ArtifactManifestEntries for the same file may not compare equal if they were + added in different ways or created for different parent artifacts. + """ + if not isinstance(other, ArtifactManifestEntry): + return False + return ( + self.path == other.path + and self.digest == other.digest + and self.ref == other.ref + and self.birth_artifact_id == other.birth_artifact_id + and self.size == other.size + and self.extra == other.extra + and self.local_path == other.local_path + and self.skip_cache == other.skip_cache + ) + + @property + def name(self) -> LogicalPath: + """Deprecated; use `path` instead.""" + deprecate( + field_name=Deprecated.artifactmanifestentry__name, + warning_message="ArtifactManifestEntry.name is deprecated, use .path instead.", + ) + return self.path + + def parent_artifact(self) -> Artifact: + """Get the artifact to which this artifact entry belongs. + + Returns: + (PublicArtifact): The parent artifact + """ + if self._parent_artifact is None: + raise NotImplementedError + return self._parent_artifact + + def download( + self, root: str | None = None, skip_cache: bool | None = None + ) -> FilePathStr: + """Download this artifact entry to the specified root path. + + Args: + root: (str, optional) The root path in which to download this + artifact entry. Defaults to the artifact's root. + + Returns: + (str): The path of the downloaded artifact entry. + """ + if self._parent_artifact is None: + raise NotImplementedError + + root = root or self._parent_artifact._default_root() + self._parent_artifact._add_download_root(root) + path = str(Path(self.path)) + dest_path = os.path.join(root, path) + + if skip_cache: + override_cache_path = dest_path + else: + override_cache_path = None + + # Skip checking the cache (and possibly downloading) if the file already exists + # and has the digest we're expecting. + try: + md5_hash = md5_file_b64(dest_path) + except (FileNotFoundError, IsADirectoryError): + logger.debug(f"unable to find {dest_path}, skip searching for file") + else: + if self.digest == md5_hash: + return FilePathStr(dest_path) + + if self.ref is not None: + cache_path = self._parent_artifact.manifest.storage_policy.load_reference( + self, local=True, dest_path=override_cache_path + ) + else: + cache_path = self._parent_artifact.manifest.storage_policy.load_file( + self._parent_artifact, self, dest_path=override_cache_path + ) + + if skip_cache: + return FilePathStr(dest_path) + else: + return FilePathStr( + str(filesystem.copy_or_overwrite_changed(cache_path, dest_path)) + ) + + def ref_target(self) -> FilePathStr | URIStr: + """Get the reference URL that is targeted by this artifact entry. + + Returns: + (str): The reference URL of this artifact entry. + + Raises: + ValueError: If this artifact entry was not a reference. + """ + if self.ref is None: + raise ValueError("Only reference entries support ref_target().") + if self._parent_artifact is None: + return self.ref + return self._parent_artifact.manifest.storage_policy.load_reference( + self._parent_artifact.manifest.entries[self.path], local=False + ) + + def ref_url(self) -> str: + """Get a URL to this artifact entry. + + These URLs can be referenced by another artifact. + + Returns: + (str): A URL representing this artifact entry. + + Examples: + Basic usage + ``` + ref_url = source_artifact.get_entry("file.txt").ref_url() + derived_artifact.add_reference(ref_url) + ``` + """ + if self._parent_artifact is None: + raise NotImplementedError + assert self._parent_artifact.id is not None + return ( + "wandb-artifact://" + + b64_to_hex_id(B64MD5(self._parent_artifact.id)) + + "/" + + self.path + ) + + def to_json(self) -> ArtifactManifestEntryDict: + contents: ArtifactManifestEntryDict = { + "path": self.path, + "digest": self.digest, + } + if self.size is not None: + contents["size"] = self.size + if self.ref: + contents["ref"] = self.ref + if self.birth_artifact_id: + contents["birthArtifactID"] = self.birth_artifact_id + if self.local_path: + contents["local_path"] = self.local_path + if self.skip_cache: + contents["skip_cache"] = self.skip_cache + if self.extra: + contents["extra"] = self.extra + return contents + + def _is_artifact_reference(self) -> bool: + return self.ref is not None and urlparse(self.ref).scheme == "wandb-artifact" + + def _referenced_artifact_id(self) -> str | None: + if not self._is_artifact_reference(): + return None + return hex_to_b64_id(urlparse(self.ref).netloc) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__init__.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074f2ebad04b0a57306d6024f6e7c3102e5fce6d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/artifact_manifest_v1.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/artifact_manifest_v1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fe9b43e025856c938c3df9ceedd53c7523fcf0e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/__pycache__/artifact_manifest_v1.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/artifact_manifest_v1.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/artifact_manifest_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..613871ff6c137385ad026b7f397a00904637453e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_manifests/artifact_manifest_v1.py @@ -0,0 +1,92 @@ +"""Artifact manifest v1.""" + +from __future__ import annotations + +from typing import Any, Mapping + +from wandb.sdk.artifacts.artifact_manifest import ArtifactManifest +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_policy import StoragePolicy +from wandb.sdk.internal.internal_api import Api as InternalApi +from wandb.sdk.lib.hashutil import HexMD5, _md5 + + +class ArtifactManifestV1(ArtifactManifest): + @classmethod + def version(cls) -> int: + return 1 + + @classmethod + def from_manifest_json( + cls, manifest_json: dict, api: InternalApi | None = None + ) -> ArtifactManifestV1: + if manifest_json["version"] != cls.version(): + raise ValueError( + "Expected manifest version 1, got {}".format(manifest_json["version"]) + ) + + storage_policy_name = manifest_json["storagePolicy"] + storage_policy_config = manifest_json.get("storagePolicyConfig", {}) + storage_policy_cls = StoragePolicy.lookup_by_name(storage_policy_name) + + entries: Mapping[str, ArtifactManifestEntry] + entries = { + name: ArtifactManifestEntry( + path=name, + digest=val["digest"], + birth_artifact_id=val.get("birthArtifactID"), + ref=val.get("ref"), + size=val.get("size"), + extra=val.get("extra"), + local_path=val.get("local_path"), + skip_cache=val.get("skip_cache"), + ) + for name, val in manifest_json["contents"].items() + } + + return cls( + storage_policy_cls.from_config(storage_policy_config, api=api), entries + ) + + def __init__( + self, + storage_policy: StoragePolicy, + entries: Mapping[str, ArtifactManifestEntry] | None = None, + ) -> None: + super().__init__(storage_policy, entries=entries) + + def to_manifest_json(self) -> dict: + """This is the JSON that's stored in wandb_manifest.json. + + If include_local is True we also include the local paths to files. This is + used to represent an artifact that's waiting to be saved on the current + system. We don't need to include the local paths in the artifact manifest + contents. + """ + contents = {} + for entry in sorted(self.entries.values(), key=lambda k: k.path): + json_entry: dict[str, Any] = { + "digest": entry.digest, + } + if entry.birth_artifact_id: + json_entry["birthArtifactID"] = entry.birth_artifact_id + if entry.ref: + json_entry["ref"] = entry.ref + if entry.extra: + json_entry["extra"] = entry.extra + if entry.size is not None: + json_entry["size"] = entry.size + contents[entry.path] = json_entry + return { + "version": self.__class__.version(), + "storagePolicy": self.storage_policy.name(), + "storagePolicyConfig": self.storage_policy.config() or {}, + "contents": contents, + } + + def digest(self) -> HexMD5: + hasher = _md5() + hasher.update(b"wandb-artifact-manifest-v1\n") + for name, entry in sorted(self.entries.items(), key=lambda kv: kv[0]): + hasher.update(f"{name}:{entry.digest}\n".encode()) + return HexMD5(hasher.hexdigest()) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_saver.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..05b6d9c145eb359d57c554f24aab727690694523 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_saver.py @@ -0,0 +1,265 @@ +"""Artifact saver.""" + +from __future__ import annotations + +import concurrent.futures +import json +import os +import tempfile +from typing import TYPE_CHECKING, Awaitable, Sequence + +import wandb +import wandb.filesync.step_prepare +from wandb import util +from wandb.sdk.artifacts.artifact_manifest import ArtifactManifest +from wandb.sdk.lib.hashutil import B64MD5, b64_to_hex_id, md5_file_b64 +from wandb.sdk.lib.paths import URIStr + +if TYPE_CHECKING: + from typing import Protocol + + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + from wandb.sdk.internal.file_pusher import FilePusher + from wandb.sdk.internal.internal_api import Api as InternalApi + from wandb.sdk.internal.progress import ProgressFn + + class SaveFn(Protocol): + def __call__( + self, entry: ArtifactManifestEntry, progress_callback: ProgressFn + ) -> bool: + pass + + class SaveFnAsync(Protocol): + def __call__( + self, entry: ArtifactManifestEntry, progress_callback: ProgressFn + ) -> Awaitable[bool]: + pass + + +class ArtifactSaver: + _server_artifact: dict | None # TODO better define this dict + + def __init__( + self, + api: InternalApi, + digest: str, + manifest_json: dict, + file_pusher: FilePusher, + is_user_created: bool = False, + ) -> None: + self._api = api + self._file_pusher = file_pusher + self._digest = digest + self._manifest = ArtifactManifest.from_manifest_json( + manifest_json, + api=self._api, + ) + self._is_user_created = is_user_created + self._server_artifact = None + + def save( + self, + type: str, + name: str, + client_id: str, + sequence_client_id: str, + distributed_id: str | None = None, + finalize: bool = True, + metadata: dict | None = None, + ttl_duration_seconds: int | None = None, + description: str | None = None, + aliases: Sequence[str] | None = None, + tags: Sequence[str] | None = None, + use_after_commit: bool = False, + incremental: bool = False, + history_step: int | None = None, + base_id: str | None = None, + ) -> dict | None: + return self._save_internal( + type, + name, + client_id, + sequence_client_id, + distributed_id, + finalize, + metadata, + ttl_duration_seconds, + description, + aliases, + tags, + use_after_commit, + incremental, + history_step, + base_id, + ) + + def _save_internal( + self, + type: str, + name: str, + client_id: str, + sequence_client_id: str, + distributed_id: str | None = None, + finalize: bool = True, + metadata: dict | None = None, + ttl_duration_seconds: int | None = None, + description: str | None = None, + aliases: Sequence[str] | None = None, + tags: Sequence[str] | None = None, + use_after_commit: bool = False, + incremental: bool = False, + history_step: int | None = None, + base_id: str | None = None, + ) -> dict | None: + alias_specs = [] + for alias in aliases or []: + alias_specs.append({"artifactCollectionName": name, "alias": alias}) + + tag_specs = [{"tagName": tag} for tag in tags or []] + + """Returns the server artifact.""" + self._server_artifact, latest = self._api.create_artifact( + type, + name, + self._digest, + metadata=metadata, + ttl_duration_seconds=ttl_duration_seconds, + aliases=alias_specs, + tags=tag_specs, + description=description, + is_user_created=self._is_user_created, + distributed_id=distributed_id, + client_id=client_id, + sequence_client_id=sequence_client_id, + history_step=history_step, + ) + + assert self._server_artifact is not None # mypy optionality unwrapper + artifact_id = self._server_artifact["id"] + if base_id is None and latest: + base_id = latest["id"] + if self._server_artifact["state"] == "COMMITTED": + if use_after_commit: + self._api.use_artifact(artifact_id) + return self._server_artifact + if ( + self._server_artifact["state"] != "PENDING" + # For old servers, see https://github.com/wandb/wandb/pull/6190 + and self._server_artifact["state"] != "DELETED" + ): + raise Exception( + 'Unknown artifact state "{}"'.format(self._server_artifact["state"]) + ) + + manifest_type = "FULL" + manifest_filename = "wandb_manifest.json" + if incremental: + manifest_type = "INCREMENTAL" + manifest_filename = "wandb_manifest.incremental.json" + elif distributed_id: + manifest_type = "PATCH" + manifest_filename = "wandb_manifest.patch.json" + artifact_manifest_id, _ = self._api.create_artifact_manifest( + manifest_filename, + "", + artifact_id, + base_artifact_id=base_id, + include_upload=False, + type=manifest_type, + ) + + step_prepare = wandb.filesync.step_prepare.StepPrepare( + self._api, 0.1, 0.01, 1000 + ) # TODO: params + step_prepare.start() + + # Upload Artifact "L1" files, the actual artifact contents + self._file_pusher.store_manifest_files( + self._manifest, + artifact_id, + lambda entry, progress_callback: self._manifest.storage_policy.store_file( + artifact_id, + artifact_manifest_id, + entry, + step_prepare, + progress_callback=progress_callback, + ), + ) + + def before_commit() -> None: + self._resolve_client_id_manifest_references() + with tempfile.NamedTemporaryFile("w+", suffix=".json", delete=False) as fp: + path = os.path.abspath(fp.name) + json.dump(self._manifest.to_manifest_json(), fp, indent=4) + digest = md5_file_b64(path) + if distributed_id or incremental: + # If we're in the distributed flow, we want to update the + # patch manifest we created with our finalized digest. + _, resp = self._api.update_artifact_manifest( + artifact_manifest_id, + digest=digest, + ) + else: + # In the regular flow, we can recreate the full manifest with the + # updated digest. + # + # NOTE: We do this for backwards compatibility with older backends + # that don't support the 'updateArtifactManifest' API. + _, resp = self._api.create_artifact_manifest( + manifest_filename, + digest, + artifact_id, + base_artifact_id=base_id, + ) + + # We're duplicating the file upload logic a little, which isn't great. + upload_url = resp["uploadUrl"] + upload_headers = resp["uploadHeaders"] + extra_headers = {} + for upload_header in upload_headers: + key, val = upload_header.split(":", 1) + extra_headers[key] = val + with open(path, "rb") as fp2: + self._api.upload_file_retry( + upload_url, + fp2, + extra_headers=extra_headers, + ) + + commit_result: concurrent.futures.Future[None] = concurrent.futures.Future() + + # This will queue the commit. It will only happen after all the file uploads are done + self._file_pusher.commit_artifact( + artifact_id, + finalize=finalize, + before_commit=before_commit, + result_future=commit_result, + ) + + # Block until all artifact files are uploaded and the + # artifact is committed. + try: + commit_result.result() + finally: + step_prepare.shutdown() + + if finalize and use_after_commit: + self._api.use_artifact(artifact_id) + + return self._server_artifact + + def _resolve_client_id_manifest_references(self) -> None: + for entry_path in self._manifest.entries: + entry = self._manifest.entries[entry_path] + if entry.ref is not None: + if entry.ref.startswith("wandb-client-artifact:"): + client_id = util.host_from_path(entry.ref) + artifact_file_path = util.uri_from_path(entry.ref) + artifact_id = self._api._resolve_client_id(client_id) + if artifact_id is None: + raise RuntimeError(f"Could not resolve client id {client_id}") + entry.ref = URIStr( + "wandb-artifact://{}/{}".format( + b64_to_hex_id(B64MD5(artifact_id)), artifact_file_path + ) + ) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_state.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_state.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb4a0f887a6bf1e80a7a287bcda5418f19ffa9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_state.py @@ -0,0 +1,11 @@ +"""Artifact state.""" + +from enum import Enum + + +class ArtifactState(Enum): + PENDING = "PENDING" + COMMITTED = "COMMITTED" + DELETED = "DELETED" + GARBAGE_COLLECTED = "GARBAGE_COLLECTED" + PENDING_DELETION = "PENDING_DELETION" diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_ttl.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_ttl.py new file mode 100644 index 0000000000000000000000000000000000000000..cde7599badef7449da3b43e5a38ee022037e4ae8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/artifact_ttl.py @@ -0,0 +1,7 @@ +"""Artifact TTL.""" + +from enum import Enum + + +class ArtifactTTL(Enum): + INHERIT = 0 diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/exceptions.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..06ab6578212e26a09c2d90f180599535083949a5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/exceptions.py @@ -0,0 +1,57 @@ +"""Artifact exceptions.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, TypeVar + +from wandb import errors + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact import Artifact + + ArtifactT = TypeVar("ArtifactT", bound=Artifact) + + +class ArtifactStatusError(AttributeError): + """Raised when an artifact is in an invalid state for the requested operation.""" + + def __init__( + self, + msg: str = "Artifact is in an invalid state for the requested operation.", + name: str | None = None, + obj: ArtifactT | None = None, + ): + # Follow the same pattern as AttributeError in python 3.10+ by `name/obj` attributes + # See: https://docs.python.org/3/library/exceptions.html#AttributeError + try: + super().__init__(msg, name=name, obj=obj) + except TypeError: + # The `name`/`obj` keyword args and attributes were only added in python >= 3.10 + super().__init__(msg) + self.name = name or "" + self.obj = obj + + +class ArtifactNotLoggedError(ArtifactStatusError): + """Raised for Artifact methods or attributes only available after logging.""" + + def __init__(self, fullname: str, obj: ArtifactT): + *_, name = fullname.split(".") + msg = ( + f"{fullname!r} used prior to logging artifact or while in offline mode. " + f"Call {type(obj).wait.__qualname__}() before accessing logged artifact properties." + ) + super().__init__(msg=msg, name=name, obj=obj) + + +class ArtifactFinalizedError(ArtifactStatusError): + """Raised for Artifact methods or attributes that can't be changed after logging.""" + + def __init__(self, fullname: str, obj: ArtifactT): + *_, name = fullname.split(".") + msg = f"{fullname!r} used on logged artifact. Can't modify finalized artifact." + super().__init__(msg=msg, name=name, obj=obj) + + +class WaitTimeoutError(errors.Error): + """Raised when wait() timeout occurs before process is finished.""" diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/staging.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/staging.py new file mode 100644 index 0000000000000000000000000000000000000000..d970d71b02f381fe7985e1cc8aed9b9d551ec890 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/staging.py @@ -0,0 +1,25 @@ +"""Manages artifact file staging. + +Artifact files are copied to the staging area as soon as they are added to an artifact +in order to avoid file changes corrupting the artifact. Once the upload is complete, the +file should be moved to the artifact cache. +""" + +import os + +from wandb import env +from wandb.sdk.lib.filesystem import mkdir_exists_ok +from wandb.sdk.lib.paths import FilePathStr + + +def get_staging_dir() -> FilePathStr: + path = os.path.join(env.get_data_dir(), "artifacts", "staging") + try: + mkdir_exists_ok(path) + except OSError as e: + raise PermissionError( + f"Unable to write staging files to {path}. To fix this problem, please set " + f"{env.DATA_DIR} to a directory where you have the necessary write access." + ) from e + + return FilePathStr(os.path.abspath(os.path.expanduser(path))) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..0f3abd02c93dd87497fbbfc413dbc9695a259e06 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handler.py @@ -0,0 +1,62 @@ +"""Storage handler.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Sequence + +from wandb.sdk.lib.paths import FilePathStr, URIStr + +if TYPE_CHECKING: + from urllib.parse import ParseResult + + from wandb.sdk.artifacts.artifact import Artifact + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + +DEFAULT_MAX_OBJECTS = 10**7 + + +class StorageHandler: + def can_handle(self, parsed_url: ParseResult) -> bool: + """Checks whether this handler can handle the given url. + + Returns: + Whether this handler can handle the given url. + """ + raise NotImplementedError + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + """Load a file or directory given the corresponding index entry. + + Args: + manifest_entry: The index entry to load + local: Whether to load the file locally or not + + Returns: + A path to the file represented by `index_entry` + """ + raise NotImplementedError + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: str | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + """Store the file or directory at the given path to the specified artifact. + + Args: + path: The path to store + name: If specified, the logical name that should map to `path` + checksum: Whether to compute the checksum of the file + max_objects: The maximum number of objects to store + + Returns: + A list of manifest entries to store within the artifact + """ + raise NotImplementedError diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__init__.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7969d0d74daf1327287a2c796b8c477f9f4a6e73 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/azure_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/azure_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..273dd00b0fdd29722dbb211c0ca8cea0d65f9e6f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/azure_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/gcs_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/gcs_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0412171bd1f73ff4e5a734553f66f807048a8ef0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/gcs_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/http_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/http_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75997eab4865dc093df4e2fe91b4beeef6e92ac3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/http_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/local_file_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/local_file_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..159dece7b2d1d2257e39d9818a8c78240d0f5509 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/local_file_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/multi_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/multi_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea9ad85a0ea7459c6bebc86a049490101a416b00 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/multi_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/s3_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/s3_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d2983efe3c640b2a8b695f6be64eba99a987ed Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/s3_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/tracking_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/tracking_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..141de2842283c53c42bfe96da24856a140f3c9e0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/tracking_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_artifact_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_artifact_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2352133968bf144c361f07f50ea898a3b124e15b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_artifact_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_local_artifact_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_local_artifact_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b74fb4e0c52383270850961227039566ee4ff899 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/__pycache__/wb_local_artifact_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/azure_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/azure_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..0106f9159644f1c6e9e222b82788fdebc4110c60 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/azure_handler.py @@ -0,0 +1,213 @@ +"""Azure storage handler.""" + +from __future__ import annotations + +from pathlib import PurePosixPath +from types import ModuleType +from typing import TYPE_CHECKING, Sequence +from urllib.parse import ParseResult, parse_qsl, urlparse + +import wandb +from wandb import util +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import DEFAULT_MAX_OBJECTS, StorageHandler +from wandb.sdk.lib.hashutil import ETag +from wandb.sdk.lib.paths import FilePathStr, LogicalPath, StrPath, URIStr + +if TYPE_CHECKING: + import azure.identity # type: ignore + import azure.storage.blob # type: ignore + + from wandb.sdk.artifacts.artifact import Artifact + + +class AzureHandler(StorageHandler): + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == "https" and parsed_url.netloc.endswith( + ".blob.core.windows.net" + ) + + def __init__(self, scheme: str | None = None) -> None: + self._cache = get_artifact_file_cache() + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + assert manifest_entry.ref is not None + if not local: + return manifest_entry.ref + + path, hit, cache_open = self._cache.check_etag_obj_path( + URIStr(manifest_entry.ref), + ETag(manifest_entry.digest), + manifest_entry.size or 0, + ) + if hit: + return path + + account_url, container_name, blob_name, query = self._parse_uri( + manifest_entry.ref + ) + version_id = manifest_entry.extra.get("versionID") + blob_service_client = self._get_module("azure.storage.blob").BlobServiceClient( + account_url, credential=self._get_credential(account_url) + ) + blob_client = blob_service_client.get_blob_client( + container=container_name, blob=blob_name + ) + if version_id is None: + # Try current version, then all versions. + try: + downloader = blob_client.download_blob( + etag=manifest_entry.digest, + match_condition=self._get_module( + "azure.core" + ).MatchConditions.IfNotModified, + ) + except self._get_module("azure.core.exceptions").ResourceModifiedError: + container_client = blob_service_client.get_container_client( + container_name + ) + for blob_properties in container_client.walk_blobs( + name_starts_with=blob_name, include=["versions"] + ): + if ( + blob_properties.name == blob_name + and blob_properties.etag == manifest_entry.digest + and blob_properties.version_id is not None + ): + downloader = blob_client.download_blob( + version_id=blob_properties.version_id + ) + break + else: # didn't break + raise ValueError( + f"Couldn't find blob version for {manifest_entry.ref} matching " + f"etag {manifest_entry.digest}." + ) + else: + downloader = blob_client.download_blob(version_id=version_id) + with cache_open(mode="wb") as f: + downloader.readinto(f) + return path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + account_url, container_name, blob_name, query = self._parse_uri(path) + path = URIStr(f"{account_url}/{container_name}/{blob_name}") + + if not checksum: + return [ + ArtifactManifestEntry(path=name or blob_name, digest=path, ref=path) + ] + + blob_service_client = self._get_module("azure.storage.blob").BlobServiceClient( + account_url, credential=self._get_credential(account_url) + ) + blob_client = blob_service_client.get_blob_client( + container=container_name, blob=blob_name + ) + if blob_client.exists(version_id=query.get("versionId")): + blob_properties = blob_client.get_blob_properties( + version_id=query.get("versionId") + ) + + if not self._is_directory_stub(blob_properties): + return [ + self._create_entry( + blob_properties, + path=name or PurePosixPath(blob_name).name, + ref=URIStr( + f"{account_url}/{container_name}/{blob_properties.name}" + ), + ) + ] + + entries: list[ArtifactManifestEntry] = [] + container_client = blob_service_client.get_container_client(container_name) + max_objects = max_objects or DEFAULT_MAX_OBJECTS + for blob_properties in container_client.list_blobs( + name_starts_with=f"{blob_name}/" + ): + if len(entries) >= max_objects: + wandb.termwarn( + f"Found more than {max_objects} objects under path, limiting upload " + f"to {max_objects} objects. Increase max_objects to upload more" + ) + break + if not self._is_directory_stub(blob_properties): + suffix = PurePosixPath(blob_properties.name).relative_to(blob_name) + entries.append( + self._create_entry( + blob_properties, + path=LogicalPath(name) / suffix if name else suffix, + ref=URIStr( + f"{account_url}/{container_name}/{blob_properties.name}" + ), + ) + ) + + return entries + + def _get_module(self, name: str) -> ModuleType: + module = util.get_module( + name, + lazy=False, + required="Azure references require the azure library, run " + "pip install wandb[azure]", + ) + assert isinstance(module, ModuleType) + return module + + def _get_credential( + self, account_url: str + ) -> azure.identity.DefaultAzureCredential | str: + if ( + wandb.run + and wandb.run.settings.azure_account_url_to_access_key is not None + and account_url in wandb.run.settings.azure_account_url_to_access_key + ): + return wandb.run.settings.azure_account_url_to_access_key[account_url] + return self._get_module("azure.identity").DefaultAzureCredential() + + def _parse_uri(self, uri: str) -> tuple[str, str, str, dict[str, str]]: + parsed_url = urlparse(uri) + query = dict(parse_qsl(parsed_url.query)) + account_url = f"{parsed_url.scheme}://{parsed_url.netloc}" + _, container_name, blob_name = parsed_url.path.split("/", 2) + return account_url, container_name, blob_name, query + + def _create_entry( + self, + blob_properties: azure.storage.blob.BlobProperties, + path: StrPath, + ref: URIStr, + ) -> ArtifactManifestEntry: + extra = {"etag": blob_properties.etag.strip('"')} + if blob_properties.version_id: + extra["versionID"] = blob_properties.version_id + return ArtifactManifestEntry( + path=path, + ref=ref, + digest=blob_properties.etag.strip('"'), + size=blob_properties.size, + extra=extra, + ) + + def _is_directory_stub( + self, blob_properties: azure.storage.blob.BlobProperties + ) -> bool: + return ( + blob_properties.has_key("metadata") + and "hdi_isfolder" in blob_properties.metadata + and blob_properties.metadata["hdi_isfolder"] == "true" + ) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/gcs_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/gcs_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..b0600bb6a2f8277e48224525099c02aecb01ab8a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/gcs_handler.py @@ -0,0 +1,224 @@ +"""GCS storage handler.""" + +from __future__ import annotations + +import time +from pathlib import PurePosixPath +from typing import TYPE_CHECKING, Sequence +from urllib.parse import ParseResult, urlparse + +from wandb import util +from wandb.errors.term import termlog +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import DEFAULT_MAX_OBJECTS, StorageHandler +from wandb.sdk.lib.hashutil import ETag +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + import google.cloud.storage as gcs_module # type: ignore + + from wandb.sdk.artifacts.artifact import Artifact + + +class _GCSIsADirectoryError(Exception): + """Raised when we try to download a GCS folder.""" + + +class GCSHandler(StorageHandler): + _client: gcs_module.client.Client | None + + def __init__(self, scheme: str | None = None) -> None: + self._scheme = scheme or "gs" + self._client = None + self._cache = get_artifact_file_cache() + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def init_gcs(self) -> gcs_module.client.Client: + if self._client is not None: + return self._client + storage = util.get_module( + "google.cloud.storage", + required="gs:// references requires the google-cloud-storage library, run pip install wandb[gcp]", + ) + self._client = storage.Client() + return self._client + + def _parse_uri(self, uri: str) -> tuple[str, str, str | None]: + url = urlparse(uri) + bucket = url.netloc + key = url.path[1:] + version = url.fragment if url.fragment else None + return bucket, key, version + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + assert manifest_entry.ref is not None + if not local: + return manifest_entry.ref + + path, hit, cache_open = self._cache.check_etag_obj_path( + url=URIStr(manifest_entry.ref), + etag=ETag(manifest_entry.digest), + size=manifest_entry.size if manifest_entry.size is not None else 0, + ) + if hit: + return path + + self.init_gcs() + assert self._client is not None # mypy: unwraps optionality + assert manifest_entry.ref is not None + bucket, key, _ = self._parse_uri(manifest_entry.ref) + version = manifest_entry.extra.get("versionID") + + if self._is_dir(manifest_entry): + raise _GCSIsADirectoryError( + f"Unable to download GCS folder {manifest_entry.ref!r}, skipping" + ) + + obj = None + # First attempt to get the generation specified, this will return None if versioning is not enabled + if version is not None: + obj = self._client.bucket(bucket).get_blob(key, generation=version) + + if obj is None: + # Object versioning is disabled on the bucket, so just get + # the latest version and make sure the MD5 matches. + obj = self._client.bucket(bucket).get_blob(key) + if obj is None: + raise ValueError( + f"Unable to download object {manifest_entry.ref} with generation {version}" + ) + if obj.etag != manifest_entry.digest: + raise ValueError( + f"Digest mismatch for object {manifest_entry.ref}: " + f"expected {manifest_entry.digest} but found {obj.etag}" + ) + + with cache_open(mode="wb") as f: + obj.download_to_file(f) + return path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + self.init_gcs() + assert self._client is not None # mypy: unwraps optionality + + # After parsing any query params / fragments for additional context, + # such as version identifiers, pare down the path to just the bucket + # and key. + bucket, key, version = self._parse_uri(path) + path = URIStr(f"{self._scheme}://{bucket}/{key}") + max_objects = max_objects or DEFAULT_MAX_OBJECTS + + if not checksum: + return [ArtifactManifestEntry(path=name or key, ref=path, digest=path)] + + start_time = None + obj = self._client.bucket(bucket).get_blob(key, generation=version) + if obj is None and version is not None: + raise ValueError(f"Object does not exist: {path}#{version}") + multi = obj is None + if multi: + start_time = time.time() + termlog( + f'Generating checksum for up to {max_objects} objects with prefix "{key}"... ', + newline=False, + ) + objects = self._client.bucket(bucket).list_blobs( + prefix=key, max_results=max_objects + ) + else: + objects = [obj] + + entries = [ + self._entry_from_obj(obj, path, name, prefix=key, multi=multi) + for obj in objects + if not obj.name.endswith("/") + ] + if start_time is not None: + termlog("Done. %.1fs" % (time.time() - start_time), prefix=False) + if len(entries) > max_objects: + raise ValueError( + f"Exceeded {max_objects} objects tracked, pass max_objects to add_reference" + ) + return entries + + def _entry_from_obj( + self, + obj: gcs_module.blob.Blob, + path: str, + name: StrPath | None = None, + prefix: str = "", + multi: bool = False, + ) -> ArtifactManifestEntry: + """Create an ArtifactManifestEntry from a GCS object. + + Args: + obj: The GCS object + path: The GCS-style path (e.g.: "gs://bucket/file.txt") + name: The user assigned name, or None if not specified + prefix: The prefix to add (will be the same as `path` for directories) + multi: Whether or not this is a multi-object add. + """ + bucket, key, _ = self._parse_uri(path) + + # Always use posix paths, since that's what S3 uses. + posix_key = PurePosixPath(obj.name) # the bucket key + posix_path = PurePosixPath(bucket) / PurePosixPath( + key + ) # the path, with the scheme stripped + posix_prefix = PurePosixPath(prefix) # the prefix, if adding a prefix + posix_name = PurePosixPath(name or "") + posix_ref = posix_path + + if name is None: + # We're adding a directory (prefix), so calculate a relative path. + if str(posix_prefix) in str(posix_key) and posix_prefix != posix_key: + posix_name = posix_key.relative_to(posix_prefix) + posix_ref = posix_path / posix_name + else: + posix_name = PurePosixPath(posix_key.name) + posix_ref = posix_path + elif multi: + # We're adding a directory with a name override. + relpath = posix_key.relative_to(posix_prefix) + posix_name = posix_name / relpath + posix_ref = posix_path / relpath + return ArtifactManifestEntry( + path=posix_name, + ref=URIStr(f"{self._scheme}://{str(posix_ref)}"), + digest=obj.etag, + size=obj.size, + extra={"versionID": obj.generation}, + ) + + def _is_dir( + self, + manifest_entry: ArtifactManifestEntry, + ) -> bool: + assert self._client is not None + assert manifest_entry.ref is not None + bucket, key, _ = self._parse_uri(manifest_entry.ref) + bucket_obj = self._client.bucket(bucket) + # A gcs bucket key should end with a forward slash on gcloud, but + # we save these refs without the forward slash in the manifest entry + # so we check the size and extension, make sure its not referring to + # an actual file with this reference, and that the ref with the slash + # exists on gcloud + return key.endswith("/") or ( + not (manifest_entry.size or PurePosixPath(key).suffix) + and bucket_obj.get_blob(key) is None + and bucket_obj.get_blob(f"{key}/") is not None + ) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/http_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/http_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a06af2683439925ded44e0ebf4b4bcd0dca1a018 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/http_handler.py @@ -0,0 +1,114 @@ +"""HTTP storage handler.""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING, Sequence +from urllib.parse import ParseResult + +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import StorageHandler +from wandb.sdk.internal.thread_local_settings import _thread_local_api_settings +from wandb.sdk.lib.hashutil import ETag +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + import requests + from requests.structures import CaseInsensitiveDict + + from wandb.sdk.artifacts.artifact import Artifact + + +class HTTPHandler(StorageHandler): + def __init__(self, session: requests.Session, scheme: str | None = None) -> None: + self._scheme = scheme or "http" + self._cache = get_artifact_file_cache() + self._session = session + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + if not local: + assert manifest_entry.ref is not None + return manifest_entry.ref + + assert manifest_entry.ref is not None + + path, hit, cache_open = self._cache.check_etag_obj_path( + URIStr(manifest_entry.ref), + ETag(manifest_entry.digest), # TODO(spencerpearson): unsafe cast + manifest_entry.size if manifest_entry.size is not None else 0, + ) + if hit: + return path + + response = self._session.get( + manifest_entry.ref, + stream=True, + cookies=_thread_local_api_settings.cookies, + headers=_thread_local_api_settings.headers, + ) + response.raise_for_status() + + digest: ETag | FilePathStr | URIStr | None + digest, size, extra = self._entry_from_headers(response.headers) + digest = digest or manifest_entry.ref + if manifest_entry.digest != digest: + raise ValueError( + f"Digest mismatch for url {manifest_entry.ref}: expected {manifest_entry.digest} but found {digest}" + ) + + with cache_open(mode="wb") as file: + for data in response.iter_content(chunk_size=16 * 1024): + file.write(data) + return path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + name = name or os.path.basename(path) + if not checksum: + return [ArtifactManifestEntry(path=name, ref=path, digest=path)] + + with self._session.get( + path, + stream=True, + cookies=_thread_local_api_settings.cookies, + headers=_thread_local_api_settings.headers, + ) as response: + response.raise_for_status() + digest: ETag | FilePathStr | URIStr | None + digest, size, extra = self._entry_from_headers(response.headers) + digest = digest or path + return [ + ArtifactManifestEntry( + path=name, ref=path, digest=digest, size=size, extra=extra + ) + ] + + def _entry_from_headers( + self, headers: CaseInsensitiveDict + ) -> tuple[ETag | None, int | None, dict[str, str]]: + response_headers = {k.lower(): v for k, v in headers.items()} + size = None + if response_headers.get("content-length", None): + size = int(response_headers["content-length"]) + + digest = response_headers.get("etag", None) + extra = {} + if digest: + extra["etag"] = digest + if digest and digest[:1] == '"' and digest[-1:] == '"': + digest = digest[1:-1] # trim leading and trailing quotes around etag + return digest, size, extra diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/local_file_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/local_file_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..21c1b6fa0367e7a9d9a93c8b3169a99d6fe3ed08 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/local_file_handler.py @@ -0,0 +1,139 @@ +"""Local file storage handler.""" + +from __future__ import annotations + +import os +import shutil +import time +from typing import TYPE_CHECKING, Sequence +from urllib.parse import ParseResult + +from wandb import util +from wandb.errors.term import termlog +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import DEFAULT_MAX_OBJECTS, StorageHandler +from wandb.sdk.lib import filesystem +from wandb.sdk.lib.hashutil import B64MD5, md5_file_b64, md5_string +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact import Artifact + + +class LocalFileHandler(StorageHandler): + """Handles file:// references.""" + + def __init__(self, scheme: str | None = None) -> None: + """Track files or directories on a local filesystem. + + Expand directories to create an entry for each file contained. + """ + self._scheme = scheme or "file" + self._cache = get_artifact_file_cache() + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + if manifest_entry.ref is None: + raise ValueError(f"Cannot add path with no ref: {manifest_entry.path}") + local_path = util.local_file_uri_to_path(str(manifest_entry.ref)) + if not os.path.exists(local_path): + raise ValueError( + "Local file reference: Failed to find file at path {}".format( + local_path + ) + ) + + path, hit, cache_open = self._cache.check_md5_obj_path( + B64MD5(manifest_entry.digest), # TODO(spencerpearson): unsafe cast + manifest_entry.size if manifest_entry.size is not None else 0, + ) + if hit: + return path + + md5 = md5_file_b64(local_path) + if md5 != manifest_entry.digest: + raise ValueError( + f"Local file reference: Digest mismatch for path {local_path}: expected {manifest_entry.digest} but found {md5}" + ) + + filesystem.mkdir_exists_ok(os.path.dirname(path)) + + with cache_open() as f: + shutil.copy(local_path, f.name) + return path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + local_path = util.local_file_uri_to_path(path) + max_objects = max_objects or DEFAULT_MAX_OBJECTS + # We have a single file or directory + # Note, we follow symlinks for files contained within the directory + entries = [] + + def md5(path: str) -> B64MD5: + return ( + md5_file_b64(path) + if checksum + else md5_string(str(os.stat(path).st_size)) + ) + + if os.path.isdir(local_path): + i = 0 + start_time = time.time() + if checksum: + termlog( + f'Generating checksum for up to {max_objects} files in "{local_path}"... ', + newline=False, + ) + for root, _, files in os.walk(local_path): + for sub_path in files: + i += 1 + if i > max_objects: + raise ValueError( + f"Exceeded {max_objects} objects tracked, pass max_objects to add_reference" + ) + physical_path = os.path.join(root, sub_path) + # TODO(spencerpearson): this is not a "logical path" in the sense that + # `LogicalPath` returns a "logical path"; it's a relative path + # **on the local filesystem**. + logical_path = os.path.relpath(physical_path, start=local_path) + if name is not None: + logical_path = os.path.join(name, logical_path) + + entry = ArtifactManifestEntry( + path=logical_path, + ref=FilePathStr(os.path.join(path, logical_path)), + size=os.path.getsize(physical_path), + digest=md5(physical_path), + ) + entries.append(entry) + if checksum: + termlog("Done. %.1fs" % (time.time() - start_time), prefix=False) + elif os.path.isfile(local_path): + name = name or os.path.basename(local_path) + entry = ArtifactManifestEntry( + path=name, + ref=path, + size=os.path.getsize(local_path), + digest=md5(local_path), + ) + entries.append(entry) + else: + # TODO: update error message if we don't allow directories. + raise ValueError( + 'Path "{}" must be a valid file or directory path'.format(path) + ) + return entries diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/multi_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/multi_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..6b574847477365dfeecb2ad5cd5ecaf915318c9c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/multi_handler.py @@ -0,0 +1,56 @@ +"""Multi storage handler.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Sequence +from urllib.parse import urlparse + +from wandb.sdk.artifacts.storage_handler import StorageHandler +from wandb.sdk.lib.paths import FilePathStr, URIStr + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact import Artifact + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + + +class MultiHandler(StorageHandler): + _handlers: list[StorageHandler] + + def __init__( + self, + handlers: list[StorageHandler] | None = None, + default_handler: StorageHandler | None = None, + ) -> None: + self._handlers = handlers or [] + self._default_handler = default_handler + + def _get_handler(self, url: FilePathStr | URIStr) -> StorageHandler: + parsed_url = urlparse(url) + for handler in self._handlers: + if handler.can_handle(parsed_url): + return handler + if self._default_handler is not None: + return self._default_handler + raise ValueError('No storage handler registered for url "{}"'.format(str(url))) + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + assert manifest_entry.ref is not None + handler = self._get_handler(manifest_entry.ref) + return handler.load_path(manifest_entry, local=local) + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: str | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + handler = self._get_handler(path) + return handler.store_path( + artifact, path, name=name, checksum=checksum, max_objects=max_objects + ) diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/s3_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/s3_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..2bae2232588972779f9847d177efabd421b4fc81 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/s3_handler.py @@ -0,0 +1,298 @@ +"""S3 storage handler.""" + +from __future__ import annotations + +import os +import time +from pathlib import PurePosixPath +from typing import TYPE_CHECKING, Sequence +from urllib.parse import parse_qsl, urlparse + +from wandb import util +from wandb.errors import CommError +from wandb.errors.term import termlog +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import DEFAULT_MAX_OBJECTS, StorageHandler +from wandb.sdk.lib.hashutil import ETag +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + from urllib.parse import ParseResult + + # We could probably use https://pypi.org/project/boto3-stubs/ or something + # instead of `type:ignore`ing these boto imports, but it's nontrivial: + # for some reason, despite being actively maintained as of 2022-09-30, + # the latest release of boto3-stubs doesn't include all the features we use. + import boto3 # type: ignore + import boto3.resources.base # type: ignore + import boto3.s3 # type: ignore + import boto3.session # type: ignore + + from wandb.sdk.artifacts.artifact import Artifact + + +class S3Handler(StorageHandler): + _s3: boto3.resources.base.ServiceResource | None + _scheme: str + + def __init__(self, scheme: str | None = None) -> None: + self._scheme = scheme or "s3" + self._s3 = None + self._cache = get_artifact_file_cache() + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def init_boto(self) -> boto3.resources.base.ServiceResource: + if self._s3 is not None: + return self._s3 + boto: boto3 = util.get_module( + "boto3", + required="s3:// references requires the boto3 library, run pip install wandb[aws]", + lazy=False, + ) + self._s3 = boto.session.Session().resource( + "s3", + endpoint_url=os.getenv("AWS_S3_ENDPOINT_URL"), + region_name=os.getenv("AWS_REGION"), + ) + self._botocore = util.get_module("botocore") + return self._s3 + + def _parse_uri(self, uri: str) -> tuple[str, str, str | None]: + url = urlparse(uri) + query = dict(parse_qsl(url.query)) + + bucket = url.netloc + key = url.path[1:] # strip leading slash + version = query.get("versionId") + + return bucket, key, version + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + if not local: + assert manifest_entry.ref is not None + return manifest_entry.ref + + assert manifest_entry.ref is not None + + path, hit, cache_open = self._cache.check_etag_obj_path( + URIStr(manifest_entry.ref), + ETag(manifest_entry.digest), # TODO(spencerpearson): unsafe cast + manifest_entry.size if manifest_entry.size is not None else 0, + ) + if hit: + return path + + self.init_boto() + assert self._s3 is not None # mypy: unwraps optionality + bucket, key, _ = self._parse_uri(manifest_entry.ref) + version = manifest_entry.extra.get("versionID") + + extra_args = {} + if version: + obj_version = self._s3.ObjectVersion(bucket, key, version) + extra_args["VersionId"] = version + obj = obj_version.Object() + else: + obj = self._s3.Object(bucket, key) + + try: + etag = ( + obj_version.head()["ETag"][1:-1] # escape leading and trailing + if version + else self._etag_from_obj(obj) + ) + except self._botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + raise FileNotFoundError( + f"Unable to find {manifest_entry.path} at s3://{bucket}/{key}" + ) from e + raise + + if etag != manifest_entry.digest: + # Try to match the etag with some other version. + if version: + raise ValueError( + f"Digest mismatch for object {manifest_entry.ref} with version {version}: expected {manifest_entry.digest} but found {etag}" + ) + obj = None + object_versions = self._s3.Bucket(bucket).object_versions.filter(Prefix=key) + for object_version in object_versions: + if manifest_entry.extra.get("etag") == self._etag_from_obj( + object_version + ): + obj = object_version.Object() + extra_args["VersionId"] = object_version.version_id + break + if obj is None: + raise FileNotFoundError( + "Couldn't find object version for {}/{} matching etag {}".format( + bucket, key, manifest_entry.extra.get("etag") + ) + ) + + with cache_open(mode="wb") as f: + obj.download_fileobj(f, ExtraArgs=extra_args) + return path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + self.init_boto() + assert self._s3 is not None # mypy: unwraps optionality + + # The passed in path might have query string parameters. + # We only need to care about a subset, like version, when + # parsing. Once we have that, we can store the rest of the + # metadata in the artifact entry itself. + bucket, key, version = self._parse_uri(path) + path = URIStr(f"{self._scheme}://{bucket}/{key}") + + max_objects = max_objects or DEFAULT_MAX_OBJECTS + if not checksum: + entry_path = name or (key if key != "" else bucket) + return [ArtifactManifestEntry(path=entry_path, ref=path, digest=path)] + + # If an explicit version is specified, use that. Otherwise, use the head version. + objs = ( + [self._s3.ObjectVersion(bucket, key, version).Object()] + if version + else [self._s3.Object(bucket, key)] + ) + start_time = None + multi = False + if key != "": + try: + objs[0].load() + # S3 doesn't have real folders, however there are cases where the folder key has a valid file which will not + # trigger a recursive upload. + # we should check the object's metadata says it is a directory and do a multi file upload if it is + if "x-directory" in objs[0].content_type: + multi = True + except self._botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + multi = True + else: + raise CommError( + f"Unable to connect to S3 ({e.response['Error']['Code']}): " + f"{e.response['Error']['Message']}. Check that your " + "authentication credentials are valid and that your region is " + "set correctly." + ) + else: + multi = True + + if multi: + start_time = time.time() + termlog( + f'Generating checksum for up to {max_objects} objects in "{bucket}/{key}"... ', + newline=False, + ) + if key != "": + objs = ( + self._s3.Bucket(bucket) + .objects.filter(Prefix=key) + .limit(max_objects) + ) + else: + objs = self._s3.Bucket(bucket).objects.limit(max_objects) + # Weird iterator scoping makes us assign this to a local function + size = self._size_from_obj + entries = [ + self._entry_from_obj(obj, path, name, prefix=key, multi=multi) + for obj in objs + if size(obj) > 0 + ] + if start_time is not None: + termlog("Done. %.1fs" % (time.time() - start_time), prefix=False) + if len(entries) > max_objects: + raise ValueError( + f"Exceeded {max_objects} objects tracked, pass max_objects to add_reference" + ) + return entries + + def _size_from_obj(self, obj: boto3.s3.Object | boto3.s3.ObjectSummary) -> int: + # ObjectSummary has size, Object has content_length + size: int + if hasattr(obj, "size"): + size = obj.size + else: + size = obj.content_length + return size + + def _entry_from_obj( + self, + obj: boto3.s3.Object | boto3.s3.ObjectSummary, + path: str, + name: StrPath | None = None, + prefix: str = "", + multi: bool = False, + ) -> ArtifactManifestEntry: + """Create an ArtifactManifestEntry from an S3 object. + + Args: + obj: The S3 object + path: The S3-style path (e.g.: "s3://bucket/file.txt") + name: The user assigned name, or None if not specified + prefix: The prefix to add (will be the same as `path` for directories) + multi: Whether or not this is a multi-object add. + """ + bucket, key, _ = self._parse_uri(path) + + # Always use posix paths, since that's what S3 uses. + posix_key = PurePosixPath(obj.key) # the bucket key + posix_path = PurePosixPath(bucket) / key # the path, with the scheme stripped + posix_prefix = PurePosixPath(prefix) # the prefix, if adding a prefix + posix_name = PurePosixPath(name or "") + posix_ref = posix_path + + if name is None: + # We're adding a directory (prefix), so calculate a relative path. + if str(posix_prefix) in str(posix_key) and posix_prefix != posix_key: + posix_name = posix_key.relative_to(posix_prefix) + posix_ref = posix_path / posix_name + else: + posix_name = PurePosixPath(posix_key.name) + posix_ref = posix_path + elif multi: + # We're adding a directory with a name override. + relpath = posix_key.relative_to(posix_prefix) + posix_name = posix_name / relpath + posix_ref = posix_path / relpath + return ArtifactManifestEntry( + path=posix_name, + ref=URIStr(f"{self._scheme}://{str(posix_ref)}"), + digest=ETag(self._etag_from_obj(obj)), + size=self._size_from_obj(obj), + extra=self._extra_from_obj(obj), + ) + + @staticmethod + def _etag_from_obj(obj: boto3.s3.Object | boto3.s3.ObjectSummary) -> ETag: + etag: ETag + etag = obj.e_tag[1:-1] # escape leading and trailing quote + return etag + + def _extra_from_obj( + self, obj: boto3.s3.Object | boto3.s3.ObjectSummary + ) -> dict[str, str]: + extra = { + "etag": obj.e_tag[1:-1], # escape leading and trailing quote + } + if not hasattr(obj, "version_id"): + # Convert ObjectSummary to Object to get the version_id. + obj = self._s3.Object(obj.bucket_name, obj.key) # type: ignore[union-attr] + if hasattr(obj, "version_id") and obj.version_id and obj.version_id != "null": + extra["versionID"] = obj.version_id + return extra diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/tracking_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/tracking_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..3e59773341681d541fdf4bc63f7637c86e996e6d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/tracking_handler.py @@ -0,0 +1,72 @@ +"""Tracking storage handler.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Sequence +from urllib.parse import urlparse + +from wandb.errors.term import termwarn +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import StorageHandler +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + from urllib.parse import ParseResult + + from wandb.sdk.artifacts.artifact import Artifact + + +class TrackingHandler(StorageHandler): + def __init__(self, scheme: str | None = None) -> None: + """Track paths with no modification or special processing. + + Useful when paths being tracked are on file systems mounted at a standardized + location. + + For example, if the data to track is located on an NFS share mounted on + `/data`, then it is sufficient to just track the paths. + """ + self._scheme = scheme or "" + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + if local: + # Likely a user error. The tracking handler is + # oblivious to the underlying paths, so it has + # no way of actually loading it. + url = urlparse(manifest_entry.ref) + raise ValueError( + f"Cannot download file at path {str(manifest_entry.ref)}, scheme {str(url.scheme)} not recognized" + ) + # TODO(spencerpearson): should this go through util.to_native_slash_path + # instead of just getting typecast? + return FilePathStr(manifest_entry.path) + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + url = urlparse(path) + if name is None: + raise ValueError( + 'You must pass name="" when tracking references with unknown schemes. ref: {}'.format( + path + ) + ) + termwarn( + "Artifact references with unsupported schemes cannot be checksummed: {}".format( + path + ) + ) + name = name or url.path[1:] # strip leading slash + return [ArtifactManifestEntry(path=name, ref=path, digest=path)] diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_artifact_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_artifact_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ea1d0eb56aeba3b809b35acaf3e27ded968fa1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_artifact_handler.py @@ -0,0 +1,135 @@ +"""WB artifact storage handler.""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING, Sequence +from urllib.parse import urlparse + +import wandb +from wandb import util +from wandb.apis import PublicApi +from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import StorageHandler +from wandb.sdk.lib.hashutil import B64MD5, b64_to_hex_id, hex_to_b64_id +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + from urllib.parse import ParseResult + + from wandb.sdk.artifacts.artifact import Artifact + + +class WBArtifactHandler(StorageHandler): + """Handles loading and storing Artifact reference-type files.""" + + _client: PublicApi | None + + def __init__(self) -> None: + self._scheme = "wandb-artifact" + self._cache = get_artifact_file_cache() + self._client = None + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + @property + def client(self) -> PublicApi: + if self._client is None: + self._client = PublicApi() + return self._client + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + """Load the file in the specified artifact given its corresponding entry. + + Download the referenced artifact; create and return a new symlink to the caller. + + Args: + manifest_entry (ArtifactManifestEntry): The index entry to load + + Returns: + (os.PathLike): A path to the file represented by `index_entry` + """ + # We don't check for cache hits here. Since we have 0 for size (since this + # is a cross-artifact reference which and we've made the choice to store 0 + # in the size field), we can't confirm if the file is complete. So we just + # rely on the dep_artifact entry's download() method to do its own cache + # check. + + # Parse the reference path and download the artifact if needed + artifact_id = util.host_from_path(manifest_entry.ref) + artifact_file_path = util.uri_from_path(manifest_entry.ref) + + dep_artifact = wandb.Artifact._from_id( + hex_to_b64_id(artifact_id), self.client.client + ) + assert dep_artifact is not None + link_target_path: URIStr | FilePathStr + if local: + link_target_path = dep_artifact.get_entry(artifact_file_path).download() + else: + link_target_path = dep_artifact.get_entry(artifact_file_path).ref_target() + + return link_target_path + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + """Store the file or directory at the given path into the specified artifact. + + Recursively resolves the reference until the result is a concrete asset. + + Args: + artifact: The artifact doing the storing path (str): The path to store name + (str): If specified, the logical name that should map to `path` + + Returns: + (list[ArtifactManifestEntry]): A list of manifest entries to store within + the artifact + """ + # Recursively resolve the reference until a concrete asset is found + # TODO: Consider resolving server-side for performance improvements. + iter_path: URIStr | FilePathStr | None = path + while iter_path is not None and urlparse(iter_path).scheme == self._scheme: + artifact_id = util.host_from_path(iter_path) + artifact_file_path = util.uri_from_path(iter_path) + target_artifact = wandb.Artifact._from_id( + hex_to_b64_id(artifact_id), self.client.client + ) + assert target_artifact is not None + + entry = target_artifact.manifest.get_entry_by_path(artifact_file_path) + assert entry is not None + iter_path = entry.ref + + # Create the path reference + assert target_artifact is not None + assert target_artifact.id is not None + path = URIStr( + "{}://{}/{}".format( + self._scheme, + b64_to_hex_id(B64MD5(target_artifact.id)), + artifact_file_path, + ) + ) + + # Return the new entry + assert entry is not None + return [ + ArtifactManifestEntry( + path=name or os.path.basename(path), + ref=path, + size=0, + digest=entry.digest, + ) + ] diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..28ab27d8ec5e9d6c74ff6c669b3ca40874def842 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py @@ -0,0 +1,74 @@ +"""WB local artifact storage handler.""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING, Sequence + +import wandb +from wandb import util +from wandb.sdk.artifacts.artifact_instance_cache import artifact_instance_cache +from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry +from wandb.sdk.artifacts.storage_handler import StorageHandler +from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr + +if TYPE_CHECKING: + from urllib.parse import ParseResult + + from wandb.sdk.artifacts.artifact import Artifact + + +class WBLocalArtifactHandler(StorageHandler): + """Handles loading and storing Artifact reference-type files.""" + + def __init__(self) -> None: + self._scheme = "wandb-client-artifact" + + def can_handle(self, parsed_url: ParseResult) -> bool: + return parsed_url.scheme == self._scheme + + def load_path( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + ) -> URIStr | FilePathStr: + raise NotImplementedError( + "Should not be loading a path for an artifact entry with unresolved client id." + ) + + def store_path( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: StrPath | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + """Store the file or directory at the given path within the specified artifact. + + Args: + artifact: The artifact doing the storing + path (str): The path to store + name (str): If specified, the logical name that should map to `path` + + Returns: + (list[ArtifactManifestEntry]): A list of manifest entries to store within the artifact + """ + client_id = util.host_from_path(path) + target_path = util.uri_from_path(path) + target_artifact = artifact_instance_cache.get(client_id) + if not isinstance(target_artifact, wandb.Artifact): + raise RuntimeError("Local Artifact not found - invalid reference") + target_entry = target_artifact._manifest.entries[target_path] # type: ignore + if target_entry is None: + raise RuntimeError("Local entry not found - invalid reference") + + # Return the new entry + return [ + ArtifactManifestEntry( + path=name or os.path.basename(path), + ref=path, + size=0, + digest=target_entry.digest, + ) + ] diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_layout.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_layout.py new file mode 100644 index 0000000000000000000000000000000000000000..b07cbd8e50fdc507e6f2787611514f09f653b851 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_layout.py @@ -0,0 +1,6 @@ +"""Storage layout.""" + + +class StorageLayout: + V1 = "V1" + V2 = "V2" diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/__init__.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d409bcf913dfde9b84f6cc53d3c980a2cbe02e2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/__init__.py @@ -0,0 +1,4 @@ +from wandb.sdk.artifacts.storage_policies.register import WANDB_STORAGE_POLICY +from wandb.sdk.artifacts.storage_policies.wandb_storage_policy import WandbStoragePolicy + +__all__ = ["WANDB_STORAGE_POLICY", "WandbStoragePolicy"] diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/register.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/register.py new file mode 100644 index 0000000000000000000000000000000000000000..84542d8a70fca03538985fa8aa9c6ef7ea86c2b5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/register.py @@ -0,0 +1 @@ +WANDB_STORAGE_POLICY = "wandb-storage-policy-v1" diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/wandb_storage_policy.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/wandb_storage_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..67b832d8ff3530ea92680cebca661e49371a922a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policies/wandb_storage_policy.py @@ -0,0 +1,378 @@ +"""WandB storage policy.""" + +from __future__ import annotations + +import hashlib +import math +import os +import shutil +from typing import TYPE_CHECKING, Any, Sequence +from urllib.parse import quote + +import requests +import urllib3 + +from wandb.errors.term import termwarn +from wandb.sdk.artifacts.artifact_file_cache import ( + ArtifactFileCache, + get_artifact_file_cache, +) +from wandb.sdk.artifacts.staging import get_staging_dir +from wandb.sdk.artifacts.storage_handlers.azure_handler import AzureHandler +from wandb.sdk.artifacts.storage_handlers.gcs_handler import GCSHandler +from wandb.sdk.artifacts.storage_handlers.http_handler import HTTPHandler +from wandb.sdk.artifacts.storage_handlers.local_file_handler import LocalFileHandler +from wandb.sdk.artifacts.storage_handlers.multi_handler import MultiHandler +from wandb.sdk.artifacts.storage_handlers.s3_handler import S3Handler +from wandb.sdk.artifacts.storage_handlers.tracking_handler import TrackingHandler +from wandb.sdk.artifacts.storage_handlers.wb_artifact_handler import WBArtifactHandler +from wandb.sdk.artifacts.storage_handlers.wb_local_artifact_handler import ( + WBLocalArtifactHandler, +) +from wandb.sdk.artifacts.storage_layout import StorageLayout +from wandb.sdk.artifacts.storage_policies.register import WANDB_STORAGE_POLICY +from wandb.sdk.artifacts.storage_policy import StoragePolicy +from wandb.sdk.internal.internal_api import Api as InternalApi +from wandb.sdk.internal.thread_local_settings import _thread_local_api_settings +from wandb.sdk.lib.hashutil import B64MD5, b64_to_hex_id, hex_to_b64_id +from wandb.sdk.lib.paths import FilePathStr, URIStr + +if TYPE_CHECKING: + from wandb.filesync.step_prepare import StepPrepare + from wandb.sdk.artifacts.artifact import Artifact + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + from wandb.sdk.internal import progress + +# Sleep length: 0, 2, 4, 8, 16, 32, 64, 120, 120, 120, 120, 120, 120, 120, 120, 120 +# seconds, i.e. a total of 20min 6s. +_REQUEST_RETRY_STRATEGY = urllib3.util.retry.Retry( + backoff_factor=1, + total=16, + status_forcelist=(308, 408, 409, 429, 500, 502, 503, 504), +) +_REQUEST_POOL_CONNECTIONS = 64 +_REQUEST_POOL_MAXSIZE = 64 + +# AWS S3 max upload parts without having to make additional requests for extra parts +S3_MAX_PART_NUMBERS = 1000 +S3_MIN_MULTI_UPLOAD_SIZE = 2 * 1024**3 +S3_MAX_MULTI_UPLOAD_SIZE = 5 * 1024**4 + + +class WandbStoragePolicy(StoragePolicy): + @classmethod + def name(cls) -> str: + return WANDB_STORAGE_POLICY + + @classmethod + def from_config( + cls, config: dict, api: InternalApi | None = None + ) -> WandbStoragePolicy: + return cls(config=config, api=api) + + def __init__( + self, + config: dict | None = None, + cache: ArtifactFileCache | None = None, + api: InternalApi | None = None, + ) -> None: + self._cache = cache or get_artifact_file_cache() + self._config = config or {} + self._session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + max_retries=_REQUEST_RETRY_STRATEGY, + pool_connections=_REQUEST_POOL_CONNECTIONS, + pool_maxsize=_REQUEST_POOL_MAXSIZE, + ) + self._session.mount("http://", adapter) + self._session.mount("https://", adapter) + + s3 = S3Handler() + gcs = GCSHandler() + azure = AzureHandler() + http = HTTPHandler(self._session) + https = HTTPHandler(self._session, scheme="https") + artifact = WBArtifactHandler() + local_artifact = WBLocalArtifactHandler() + file_handler = LocalFileHandler() + + self._api = api or InternalApi() + self._handler = MultiHandler( + handlers=[ + s3, + gcs, + azure, + http, + https, + artifact, + local_artifact, + file_handler, + ], + default_handler=TrackingHandler(), + ) + + def config(self) -> dict: + return self._config + + def load_file( + self, + artifact: Artifact, + manifest_entry: ArtifactManifestEntry, + dest_path: str | None = None, + ) -> FilePathStr: + if dest_path is not None: + self._cache._override_cache_path = dest_path + + path, hit, cache_open = self._cache.check_md5_obj_path( + B64MD5(manifest_entry.digest), # TODO(spencerpearson): unsafe cast + manifest_entry.size if manifest_entry.size is not None else 0, + ) + if hit: + return path + + if manifest_entry._download_url is not None: + response = self._session.get(manifest_entry._download_url, stream=True) + try: + response.raise_for_status() + except Exception: + # Signed URL might have expired, fall back to fetching it one by one. + manifest_entry._download_url = None + if manifest_entry._download_url is None: + auth = None + http_headers = _thread_local_api_settings.headers or {} + if self._api.access_token is not None: + http_headers["Authorization"] = f"Bearer {self._api.access_token}" + elif _thread_local_api_settings.cookies is None: + auth = ("api", self._api.api_key or "") + + response = self._session.get( + self._file_url(self._api, artifact.entity, manifest_entry), + auth=auth, + cookies=_thread_local_api_settings.cookies, + headers=http_headers, + stream=True, + ) + response.raise_for_status() + + with cache_open(mode="wb") as file: + for data in response.iter_content(chunk_size=16 * 1024): + file.write(data) + return path + + def store_reference( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: str | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + return self._handler.store_path( + artifact, path, name=name, checksum=checksum, max_objects=max_objects + ) + + def load_reference( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + dest_path: str | None = None, + ) -> FilePathStr | URIStr: + assert manifest_entry.ref is not None + used_handler = self._handler._get_handler(manifest_entry.ref) + if hasattr(used_handler, "_cache") and (dest_path is not None): + used_handler._cache._override_cache_path = dest_path + return self._handler.load_path(manifest_entry, local) + + def _file_url( + self, + api: InternalApi, + entity_name: str, + manifest_entry: ArtifactManifestEntry, + ) -> str: + storage_layout = self._config.get("storageLayout", StorageLayout.V1) + storage_region = self._config.get("storageRegion", "default") + md5_hex = b64_to_hex_id(B64MD5(manifest_entry.digest)) + + if storage_layout == StorageLayout.V1: + return "{}/artifacts/{}/{}".format( + api.settings("base_url"), entity_name, md5_hex + ) + elif storage_layout == StorageLayout.V2: + return "{}/artifactsV2/{}/{}/{}/{}".format( + api.settings("base_url"), + storage_region, + entity_name, + quote( + manifest_entry.birth_artifact_id + if manifest_entry.birth_artifact_id is not None + else "" + ), + md5_hex, + ) + else: + raise Exception(f"unrecognized storage layout: {storage_layout}") + + def s3_multipart_file_upload( + self, + file_path: str, + chunk_size: int, + hex_digests: dict[int, str], + multipart_urls: dict[int, str], + extra_headers: dict[str, str], + ) -> list[dict[str, Any]]: + etags = [] + part_number = 1 + + with open(file_path, "rb") as f: + while True: + data = f.read(chunk_size) + if not data: + break + md5_b64_str = str(hex_to_b64_id(hex_digests[part_number])) + upload_resp = self._api.upload_multipart_file_chunk_retry( + multipart_urls[part_number], + data, + extra_headers={ + "content-md5": md5_b64_str, + "content-length": str(len(data)), + "content-type": extra_headers.get("Content-Type", ""), + }, + ) + assert upload_resp is not None + etags.append( + {"partNumber": part_number, "hexMD5": upload_resp.headers["ETag"]} + ) + part_number += 1 + return etags + + def default_file_upload( + self, + upload_url: str, + file_path: str, + extra_headers: dict[str, Any], + progress_callback: progress.ProgressFn | None = None, + ) -> None: + """Upload a file to the artifact store and write to cache.""" + with open(file_path, "rb") as file: + # This fails if we don't send the first byte before the signed URL expires. + self._api.upload_file_retry( + upload_url, + file, + progress_callback, + extra_headers=extra_headers, + ) + + def calc_chunk_size(self, file_size: int) -> int: + # Default to chunk size of 100MiB. S3 has cap of 10,000 upload parts. + # If file size exceeds the default chunk size, recalculate chunk size. + default_chunk_size = 100 * 1024**2 + if default_chunk_size * S3_MAX_PART_NUMBERS < file_size: + return math.ceil(file_size / S3_MAX_PART_NUMBERS) + return default_chunk_size + + def store_file( + self, + artifact_id: str, + artifact_manifest_id: str, + entry: ArtifactManifestEntry, + preparer: StepPrepare, + progress_callback: progress.ProgressFn | None = None, + ) -> bool: + """Upload a file to the artifact store. + + Returns: + True if the file was a duplicate (did not need to be uploaded), + False if it needed to be uploaded or was a reference (nothing to dedupe). + """ + file_size = entry.size if entry.size is not None else 0 + chunk_size = self.calc_chunk_size(file_size) + upload_parts = [] + hex_digests = {} + file_path = entry.local_path if entry.local_path is not None else "" + # Logic for AWS s3 multipart upload. + # Only chunk files if larger than 2 GiB. Currently can only support up to 5TiB. + if ( + file_size >= S3_MIN_MULTI_UPLOAD_SIZE + and file_size <= S3_MAX_MULTI_UPLOAD_SIZE + ): + part_number = 1 + with open(file_path, "rb") as f: + while True: + data = f.read(chunk_size) + if not data: + break + hex_digest = hashlib.md5(data).hexdigest() + upload_parts.append( + {"hexMD5": hex_digest, "partNumber": part_number} + ) + hex_digests[part_number] = hex_digest + part_number += 1 + + resp = preparer.prepare( + { + "artifactID": artifact_id, + "artifactManifestID": artifact_manifest_id, + "name": entry.path, + "md5": entry.digest, + "uploadPartsInput": upload_parts, + } + ).get() + + entry.birth_artifact_id = resp.birth_artifact_id + + multipart_urls = resp.multipart_upload_urls + if resp.upload_url is None: + return True + if entry.local_path is None: + return False + extra_headers = { + header.split(":", 1)[0]: header.split(":", 1)[1] + for header in (resp.upload_headers or {}) + } + + # This multipart upload isn't available, do a regular single url upload + if multipart_urls is None and resp.upload_url: + self.default_file_upload( + resp.upload_url, file_path, extra_headers, progress_callback + ) + else: + if multipart_urls is None: + raise ValueError(f"No multipart urls to upload for file: {file_path}") + # Upload files using s3 multipart upload urls + etags = self.s3_multipart_file_upload( + file_path, + chunk_size, + hex_digests, + multipart_urls, + extra_headers, + ) + assert resp.storage_path is not None + self._api.complete_multipart_upload_artifact( + artifact_id, resp.storage_path, etags, resp.upload_id + ) + self._write_cache(entry) + + return False + + def _write_cache(self, entry: ArtifactManifestEntry) -> None: + if entry.local_path is None: + return + + # Cache upon successful upload. + _, hit, cache_open = self._cache.check_md5_obj_path( + B64MD5(entry.digest), + entry.size if entry.size is not None else 0, + ) + + staging_dir = get_staging_dir() + try: + if not entry.skip_cache and not hit: + with cache_open("wb") as f, open(entry.local_path, "rb") as src: + shutil.copyfileobj(src, f) + if entry.local_path.startswith(staging_dir): + # Delete staged files here instead of waiting till + # all the files are uploaded + os.chmod(entry.local_path, 0o600) + os.remove(entry.local_path) + except OSError as e: + termwarn(f"Failed to cache {entry.local_path}, ignoring {e}") diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policy.py b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..d2faa74fd85093e25887cff6a756de1d702f4af6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/artifacts/storage_policy.py @@ -0,0 +1,72 @@ +"""Storage policy.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Sequence + +from wandb.sdk.internal.internal_api import Api as InternalApi +from wandb.sdk.lib.paths import FilePathStr, URIStr + +if TYPE_CHECKING: + from wandb.filesync.step_prepare import StepPrepare + from wandb.sdk.artifacts.artifact import Artifact + from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry + from wandb.sdk.internal.progress import ProgressFn + + +class StoragePolicy: + @classmethod + def lookup_by_name(cls, name: str) -> type[StoragePolicy]: + import wandb.sdk.artifacts.storage_policies # noqa: F401 + + for sub in cls.__subclasses__(): + if sub.name() == name: + return sub + raise NotImplementedError(f"Failed to find storage policy '{name}'") + + @classmethod + def name(cls) -> str: + raise NotImplementedError + + @classmethod + def from_config(cls, config: dict, api: InternalApi | None = None) -> StoragePolicy: + raise NotImplementedError + + def config(self) -> dict: + raise NotImplementedError + + def load_file( + self, + artifact: Artifact, + manifest_entry: ArtifactManifestEntry, + dest_path: str | None = None, + ) -> FilePathStr: + raise NotImplementedError + + def store_file( + self, + artifact_id: str, + artifact_manifest_id: str, + entry: ArtifactManifestEntry, + preparer: StepPrepare, + progress_callback: ProgressFn | None = None, + ) -> bool: + raise NotImplementedError + + def store_reference( + self, + artifact: Artifact, + path: URIStr | FilePathStr, + name: str | None = None, + checksum: bool = True, + max_objects: int | None = None, + ) -> Sequence[ArtifactManifestEntry]: + raise NotImplementedError + + def load_reference( + self, + manifest_entry: ArtifactManifestEntry, + local: bool = False, + dest_path: str | None = None, + ) -> FilePathStr | URIStr: + raise NotImplementedError diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/__init__.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/port_file.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/port_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41459cddc740c103cffe18a1cac2287017b59bb8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/port_file.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/server.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee991dcc988b0a225629a5ab26266373f6e63e5d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/server.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/service.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/service.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3793410418fc3f02039f6c1827cabe8515b64a2a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/service.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/streams.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cd2cdc22f036374ba7ab9682f908752f8a0a73c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/sdk/service/__pycache__/streams.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/_startup_debug.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/_startup_debug.py new file mode 100644 index 0000000000000000000000000000000000000000..a3ad0770a385daba2f553907f031dee9a59ed9a2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/service/_startup_debug.py @@ -0,0 +1,22 @@ +"""_startup_debug. + +Temporary helper to debug issues with wandb service startup +""" + +import os +import time + + +def is_enabled() -> bool: + # This is very temporary to help diagnose problems seen by some + # customers which we are having trouble reproducing. It should be + # replaced by something more permanent in the future when we have + # proper logging for wandb-service + if os.environ.get("_WANDB_STARTUP_DEBUG"): + return True + return False + + +def print_message(message: str) -> None: + time_now = time.time() + print("WANDB_STARTUP_DEBUG", time_now, message) # noqa: T201 diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/port_file.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/port_file.py new file mode 100644 index 0000000000000000000000000000000000000000..1980a84dd29af08d4c402a932231b7b04615337e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/service/port_file.py @@ -0,0 +1,53 @@ +"""port_file: write/read file containing port info.""" + +import os +import tempfile +from typing import Optional + + +class PortFile: + _sock_port: Optional[int] + _valid: bool + + SOCK_TOKEN = "sock=" + EOF_TOKEN = "EOF" + + def __init__(self, sock_port: Optional[int] = None) -> None: + self._sock_port = sock_port + self._valid = False + + def write(self, fname: str) -> None: + dname, bname = os.path.split(fname) + f = tempfile.NamedTemporaryFile(prefix=bname, dir=dname, mode="w", delete=False) + try: + tmp_filename = f.name + with f: + data = [] + if self._sock_port: + data.append(f"{self.SOCK_TOKEN}{self._sock_port}") + data.append(self.EOF_TOKEN) + port_str = "\n".join(data) + written = f.write(port_str) + assert written == len(port_str) + os.rename(tmp_filename, fname) + except Exception: + os.unlink(tmp_filename) + raise + + def read(self, fname: str) -> None: + with open(fname) as f: + lines = f.readlines() + if lines[-1] != self.EOF_TOKEN: + return + for ln in lines: + if ln.startswith(self.SOCK_TOKEN): + self._sock_port = int(ln[len(self.SOCK_TOKEN) :]) + self._valid = True + + @property + def sock_port(self) -> Optional[int]: + return self._sock_port + + @property + def is_valid(self) -> bool: + return self._valid diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/server.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/server.py new file mode 100644 index 0000000000000000000000000000000000000000..a015164a67920bc40ebd39be3f1f26c0808f1162 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/service/server.py @@ -0,0 +1,107 @@ +"""wandb server. + +Start up socket transport servers. +""" + +import logging +import os +import sys +from typing import Optional + +import wandb + +from . import _startup_debug, port_file +from .server_sock import SocketServer +from .streams import StreamMux + + +class WandbServer: + _pid: Optional[int] + _sock_port: Optional[int] + _debug: bool + _sock_server: Optional[SocketServer] + _startup_debug_enabled: bool + + def __init__( + self, + sock_port: Optional[int] = None, + port_fname: Optional[str] = None, + address: Optional[str] = None, + pid: Optional[int] = None, + debug: bool = True, + ) -> None: + self._sock_port = sock_port + self._port_fname = port_fname + self._address = address + self._pid = pid + self._debug = debug + self._sock_server = None + self._startup_debug_enabled = _startup_debug.is_enabled() + + if debug: + logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + + def _inform_used_ports(self, sock_port: Optional[int]) -> None: + if not self._port_fname: + return + pf = port_file.PortFile(sock_port=sock_port) + pf.write(self._port_fname) + + def _start_sock(self, mux: StreamMux) -> int: + address: str = self._address or "127.0.0.1" + port: int = self._sock_port or 0 + self._sock_server = SocketServer(mux=mux, address=address, port=port) + try: + self._sock_server.start() + port = self._sock_server.port + if self._pid: + mux.set_pid(self._pid) + except KeyboardInterrupt: + mux.cleanup() + raise + except Exception: + mux.cleanup() + raise + return port + + def _stop_servers(self) -> None: + if self._sock_server: + self._sock_server.stop() + + def _startup_debug_print(self, message: str) -> None: + if not self._startup_debug_enabled: + return + _startup_debug.print_message(message) + + def _setup_proctitle(self, sock_port: Optional[int]) -> None: + # TODO: the internal_process should have a better way to have access to + # settings. + disable_setproctitle = os.environ.get("WANDB_X_DISABLE_SETPROCTITLE") + if disable_setproctitle: + return + + setproctitle = wandb.util.get_optional_module("setproctitle") + if setproctitle: + service_ver = 2 + pid = str(self._pid or 0) + transport = "s" if sock_port else "g" + port = sock_port or 0 + # this format is similar to the service token, but it's purely informative now + # (consider unifying this in the future) + service_id = f"{service_ver}-{pid}-{transport}-{port}" + proc_title = f"wandb-service({service_id})" + self._startup_debug_print("before_setproctitle") + setproctitle.setproctitle(proc_title) + self._startup_debug_print("after_setproctitle") + + def serve(self) -> None: + mux = StreamMux() + self._startup_debug_print("before_network") + sock_port = self._start_sock(mux=mux) + self._startup_debug_print("after_network") + self._inform_used_ports(sock_port=sock_port) + self._startup_debug_print("after_inform") + self._setup_proctitle(sock_port=sock_port) + self._startup_debug_print("before_loop") + mux.loop() + self._stop_servers() diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/service.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/service.py new file mode 100644 index 0000000000000000000000000000000000000000..cde7856d5bc8777cdba7ecd4b49f38631b12b3dd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/service/service.py @@ -0,0 +1,242 @@ +"""Reliably launch and connect to backend server process (wandb service). + +Backend server process can be connected to using tcp sockets transport. +""" + +import datetime +import os +import pathlib +import platform +import shutil +import subprocess +import sys +import tempfile +import time +from typing import TYPE_CHECKING, Any, Dict, Optional + +from wandb import _sentry, termlog +from wandb.env import core_debug, error_reporting_enabled, is_require_legacy_service +from wandb.errors import Error, WandbCoreNotAvailableError +from wandb.errors.links import url_registry +from wandb.util import get_core_path, get_module + +from . import _startup_debug, port_file + +if TYPE_CHECKING: + from wandb.sdk.wandb_settings import Settings + + +class ServiceStartProcessError(Error): + """Raised when a known error occurs when launching wandb service.""" + + +class ServiceStartTimeoutError(Error): + """Raised when service start times out.""" + + +class ServiceStartPortError(Error): + """Raised when service start fails to find a port.""" + + +class _Service: + _settings: "Settings" + _sock_port: Optional[int] + _internal_proc: Optional[subprocess.Popen] + _startup_debug_enabled: bool + + def __init__( + self, + settings: "Settings", + ) -> None: + self._settings = settings + self._stub = None + self._sock_port = None + self._internal_proc = None + self._startup_debug_enabled = _startup_debug.is_enabled() + + _sentry.configure_scope(tags=dict(settings), process_context="service") + + def _startup_debug_print(self, message: str) -> None: + if not self._startup_debug_enabled: + return + _startup_debug.print_message(message) + + def _wait_for_ports( + self, fname: str, proc: Optional[subprocess.Popen] = None + ) -> None: + """Wait for the service to write the port file and then read it. + + Args: + fname: The path to the port file. + proc: The process to wait for. + + Raises: + ServiceStartTimeoutError: If the service takes too long to start. + ServiceStartPortError: If the service writes an invalid port file or unable to read it. + ServiceStartProcessError: If the service process exits unexpectedly. + + """ + time_max = time.monotonic() + self._settings.x_service_wait + while time.monotonic() < time_max: + if proc and proc.poll(): + # process finished + # define these variables for sentry context grab: + # command = proc.args + # sys_executable = sys.executable + # which_python = shutil.which("python3") + # proc_out = proc.stdout.read() + # proc_err = proc.stderr.read() + context = dict( + command=proc.args, + sys_executable=sys.executable, + which_python=shutil.which("python3"), + proc_out=proc.stdout.read() if proc.stdout else "", + proc_err=proc.stderr.read() if proc.stderr else "", + ) + raise ServiceStartProcessError( + f"The wandb service process exited with {proc.returncode}. " + "Ensure that `sys.executable` is a valid python interpreter. " + "You can override it with the `_executable` setting " + "or with the `WANDB_X_EXECUTABLE` environment variable." + f"\n{context}", + context=context, + ) + if not os.path.isfile(fname): + time.sleep(0.2) + continue + try: + pf = port_file.PortFile() + pf.read(fname) + if not pf.is_valid: + time.sleep(0.2) + continue + self._sock_port = pf.sock_port + except Exception as e: + # todo: point at the docs. this could be due to a number of reasons, + # for example, being unable to write to the port file etc. + raise ServiceStartPortError( + f"Failed to allocate port for wandb service: {e}." + ) + return + raise ServiceStartTimeoutError( + "Timed out waiting for wandb service to start after " + f"{self._settings.x_service_wait} seconds. " + "Try increasing the timeout with the `_service_wait` setting." + ) + + def _launch_server(self) -> None: + """Launch server and set ports.""" + # References for starting processes + # - https://github.com/wandb/wandb/blob/archive/old-cli/wandb/__init__.py + # - https://stackoverflow.com/questions/1196074/how-to-start-a-background-process-in-python + self._startup_debug_print("launch") + + kwargs: Dict[str, Any] = dict(close_fds=True) + # flags to handle keyboard interrupt signal that is causing a hang + if platform.system() == "Windows": + kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) # type: ignore [attr-defined] + else: + kwargs.update(start_new_session=True) + + pid = str(os.getpid()) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, f"port-{pid}.txt") + + executable = self._settings.x_executable + exec_cmd_list = [executable, "-m"] + + service_args = [] + + if not is_require_legacy_service(): + try: + core_path = get_core_path() + except WandbCoreNotAvailableError as e: + _sentry.reraise(e) + + service_args.extend([core_path]) + + if not error_reporting_enabled(): + service_args.append("--no-observability") + + if core_debug(default="False"): + service_args.extend(["--log-level", "-4"]) + + exec_cmd_list = [] + termlog( + "Using wandb-core as the SDK backend. Please refer to " + f"{url_registry.url('wandb-core')} for more information.", + repeat=False, + ) + else: + service_args.extend(["wandb", "service", "--debug"]) + + service_args += [ + "--port-filename", + fname, + "--pid", + pid, + ] + + if os.environ.get("WANDB_SERVICE_PROFILE") == "memray": + _ = get_module( + "memray", + required=( + "wandb service memory profiling requires memray, " + "install with `pip install memray`" + ), + ) + + time_tag = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + output_file = f"wandb_service.memray.{time_tag}.bin" + cli_executable = ( + pathlib.Path(__file__).parent.parent.parent.parent + / "tools" + / "cli.py" + ) + exec_cmd_list = [ + executable, + "-m", + "memray", + "run", + "-o", + output_file, + ] + service_args[0] = str(cli_executable) + termlog( + f"wandb service memory profiling enabled, output file: {output_file}" + ) + termlog( + f"Convert to flamegraph with: `python -m memray flamegraph {output_file}`" + ) + + try: + internal_proc = subprocess.Popen( + exec_cmd_list + service_args, # type: ignore[arg-type] + env=os.environ, + **kwargs, + ) + except Exception as e: + _sentry.reraise(e) + + self._startup_debug_print("wait_ports") + try: + self._wait_for_ports(fname, proc=internal_proc) + except Exception as e: + _sentry.reraise(e) + self._startup_debug_print("wait_ports_done") + self._internal_proc = internal_proc + self._startup_debug_print("launch_done") + + def start(self) -> None: + self._launch_server() + + @property + def sock_port(self) -> Optional[int]: + return self._sock_port + + def join(self) -> int: + ret = 0 + if self._internal_proc: + ret = self._internal_proc.wait() + return ret diff --git a/vllm/lib/python3.10/site-packages/wandb/sdk/service/streams.py b/vllm/lib/python3.10/site-packages/wandb/sdk/service/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c385363ba2dd38ad8988812a915cec9da67c47 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/sdk/service/streams.py @@ -0,0 +1,427 @@ +"""streams: class that manages internal threads for each run. + +StreamThread: Thread that runs internal.wandb_internal() +StreamRecord: All the external state for the internal thread (queues, etc) +StreamAction: Lightweight record for stream ops for thread safety +StreamMux: Container for dictionary of stream threads per runid +""" + +from __future__ import annotations + +import functools +import multiprocessing +import queue +import threading +import time +from threading import Event +from typing import Any, Callable + +import psutil + +import wandb +import wandb.util +from wandb.proto import wandb_internal_pb2 as pb +from wandb.sdk.internal.settings_static import SettingsStatic +from wandb.sdk.lib import printer as printerlib +from wandb.sdk.lib import progress +from wandb.sdk.lib.mailbox import ( + Mailbox, + MailboxProbe, + MailboxProgress, + MailboxProgressAll, +) +from wandb.sdk.wandb_run import Run + +from ..interface.interface_relay import InterfaceRelay + +# from wandb.sdk.wandb_settings import Settings + + +class StreamThread(threading.Thread): + """Class to running internal process as a thread.""" + + def __init__(self, target: Callable, kwargs: dict[str, Any]) -> None: + threading.Thread.__init__(self) + self.name = "StreamThr" + self._target = target + self._kwargs = kwargs + self.daemon = True + + def run(self) -> None: + # TODO: catch exceptions and report errors to scheduler + self._target(**self._kwargs) + + +class StreamRecord: + _record_q: queue.Queue[pb.Record] + _result_q: queue.Queue[pb.Result] + _relay_q: queue.Queue[pb.Result] + _iface: InterfaceRelay + _thread: StreamThread + _settings: SettingsStatic + _started: bool + + def __init__(self, settings: SettingsStatic, mailbox: Mailbox) -> None: + self._started = False + self._mailbox = mailbox + self._record_q = queue.Queue() + self._result_q = queue.Queue() + self._relay_q = queue.Queue() + process = multiprocessing.current_process() + self._iface = InterfaceRelay( + record_q=self._record_q, + result_q=self._result_q, + relay_q=self._relay_q, + process=process, + process_check=False, + mailbox=self._mailbox, + ) + self._settings = settings + + def start_thread(self, thread: StreamThread) -> None: + self._thread = thread + thread.start() + self._wait_thread_active() + + def _wait_thread_active(self) -> None: + self._iface.deliver_status().wait(timeout=-1) + + def join(self) -> None: + self._iface.join() + if self._thread: + self._thread.join() + + def drop(self) -> None: + self._iface._drop = True + + @property + def interface(self) -> InterfaceRelay: + return self._iface + + def mark_started(self) -> None: + self._started = True + + def update(self, settings: SettingsStatic) -> None: + # Note: Currently just overriding the _settings attribute + # once we use Settings Class we might want to properly update it + self._settings = settings + + +class StreamAction: + _action: str + _stream_id: str + _processed: Event + _data: Any + + def __init__(self, action: str, stream_id: str, data: Any | None = None): + self._action = action + self._stream_id = stream_id + self._data = data + self._processed = Event() + + def __repr__(self) -> str: + return f"StreamAction({self._action},{self._stream_id})" + + def wait_handled(self) -> None: + self._processed.wait() + + def set_handled(self) -> None: + self._processed.set() + + @property + def stream_id(self) -> str: + return self._stream_id + + +class StreamMux: + _streams_lock: threading.Lock + _streams: dict[str, StreamRecord] + _port: int | None + _pid: int | None + _action_q: queue.Queue[StreamAction] + _stopped: Event + _pid_checked_ts: float | None + _mailbox: Mailbox + + def __init__(self) -> None: + self._streams_lock = threading.Lock() + self._streams = dict() + self._port = None + self._pid = None + self._stopped = Event() + self._action_q = queue.Queue() + self._pid_checked_ts = None + self._mailbox = Mailbox() + self._mailbox.enable_keepalive() + + def _get_stopped_event(self) -> Event: + # TODO: clean this up, there should be a better way to abstract this + return self._stopped + + def set_port(self, port: int) -> None: + self._port = port + + def set_pid(self, pid: int) -> None: + self._pid = pid + + def add_stream(self, stream_id: str, settings: SettingsStatic) -> None: + action = StreamAction(action="add", stream_id=stream_id, data=settings) + self._action_q.put(action) + action.wait_handled() + + def start_stream(self, stream_id: str) -> None: + action = StreamAction(action="start", stream_id=stream_id) + self._action_q.put(action) + action.wait_handled() + + def update_stream(self, stream_id: str, settings: SettingsStatic) -> None: + action = StreamAction(action="update", stream_id=stream_id, data=settings) + self._action_q.put(action) + action.wait_handled() + + def del_stream(self, stream_id: str) -> None: + action = StreamAction(action="del", stream_id=stream_id) + self._action_q.put(action) + action.wait_handled() + + def drop_stream(self, stream_id: str) -> None: + action = StreamAction(action="drop", stream_id=stream_id) + self._action_q.put(action) + action.wait_handled() + + def teardown(self, exit_code: int) -> None: + action = StreamAction(action="teardown", stream_id="na", data=exit_code) + self._action_q.put(action) + action.wait_handled() + + def stream_names(self) -> list[str]: + with self._streams_lock: + names = list(self._streams.keys()) + return names + + def has_stream(self, stream_id: str) -> bool: + with self._streams_lock: + return stream_id in self._streams + + def get_stream(self, stream_id: str) -> StreamRecord: + with self._streams_lock: + stream = self._streams[stream_id] + return stream + + def _process_add(self, action: StreamAction) -> None: + stream = StreamRecord(action._data, mailbox=self._mailbox) + # run_id = action.stream_id # will want to fix if a streamid != runid + settings = action._data + thread = StreamThread( + target=wandb.wandb_sdk.internal.internal.wandb_internal, # type: ignore + kwargs=dict( + settings=settings, + record_q=stream._record_q, + result_q=stream._result_q, + port=self._port, + user_pid=self._pid, + ), + ) + stream.start_thread(thread) + with self._streams_lock: + self._streams[action._stream_id] = stream + + def _process_start(self, action: StreamAction) -> None: + with self._streams_lock: + self._streams[action._stream_id].mark_started() + + def _process_update(self, action: StreamAction) -> None: + with self._streams_lock: + self._streams[action._stream_id].update(action._data) + + def _process_del(self, action: StreamAction) -> None: + with self._streams_lock: + stream = self._streams.pop(action._stream_id) + stream.join() + # TODO: we assume stream has already been shutdown. should we verify? + + def _process_drop(self, action: StreamAction) -> None: + with self._streams_lock: + if action._stream_id in self._streams: + stream = self._streams.pop(action._stream_id) + stream.drop() + stream.join() + + def _on_probe_exit(self, probe_handle: MailboxProbe, stream: StreamRecord) -> None: + handle = probe_handle.get_mailbox_handle() + if handle: + result = handle.wait(timeout=0, release=False) + if not result: + return + probe_handle.set_probe_result(result) + handle = stream.interface.deliver_poll_exit() + probe_handle.set_mailbox_handle(handle) + + def _on_progress_exit(self, progress_handle: MailboxProgress) -> None: + pass + + def _on_progress_exit_all( + self, + progress_printer: progress.ProgressPrinter, + progress_all_handle: MailboxProgressAll, + ) -> None: + probe_handles: list[MailboxProbe] = [] + progress_handles = progress_all_handle.get_progress_handles() + for progress_handle in progress_handles: + probe_handles.extend(progress_handle.get_probe_handles()) + + assert probe_handles + + if self._check_orphaned(): + self._stopped.set() + + poll_exit_responses: list[pb.PollExitResponse] = [] + for probe_handle in probe_handles: + result = probe_handle.get_probe_result() + if result: + poll_exit_responses.append(result.response.poll_exit_response) + + progress_printer.update(poll_exit_responses) + + def _finish_all(self, streams: dict[str, StreamRecord], exit_code: int) -> None: + if not streams: + return + + printer = printerlib.new_printer() + + # fixme: for now we have a single printer for all streams, + # and jupyter is disabled if at least single stream's setting set `_jupyter` to false + exit_handles = [] + + # only finish started streams, non started streams failed early + started_streams: dict[str, StreamRecord] = {} + not_started_streams: dict[str, StreamRecord] = {} + for stream_id, stream in streams.items(): + d = started_streams if stream._started else not_started_streams + d[stream_id] = stream + + for stream in started_streams.values(): + handle = stream.interface.deliver_exit(exit_code) + handle.add_progress(self._on_progress_exit) + handle.add_probe(functools.partial(self._on_probe_exit, stream=stream)) + exit_handles.append(handle) + + # this message is confusing, we should remove it + # Run._footer_exit_status_info( + # exit_code, settings=stream._settings, printer=printer # type: ignore + # ) + + with progress.progress_printer(printer) as progress_printer: + # todo: should we wait for the max timeout (?) of all exit handles or just wait forever? + # timeout = max(stream._settings._exit_timeout for stream in streams.values()) + got_result = self._mailbox.wait_all( + handles=exit_handles, + timeout=-1, + on_progress_all=functools.partial( + self._on_progress_exit_all, + progress_printer, + ), + ) + assert got_result + + # These could be done in parallel in the future + for _sid, stream in started_streams.items(): + # dispatch all our final requests + poll_exit_handle = stream.interface.deliver_poll_exit() + final_summary_handle = stream.interface.deliver_get_summary() + sampled_history_handle = stream.interface.deliver_request_sampled_history() + internal_messages_handle = stream.interface.deliver_internal_messages() + + result = internal_messages_handle.wait(timeout=-1) + assert result + internal_messages_response = result.response.internal_messages_response + + result = poll_exit_handle.wait(timeout=-1) + assert result + poll_exit_response = result.response.poll_exit_response + + result = sampled_history_handle.wait(timeout=-1) + assert result + sampled_history = result.response.sampled_history_response + + result = final_summary_handle.wait(timeout=-1) + assert result + final_summary = result.response.get_summary_response + + Run._footer( + sampled_history=sampled_history, + final_summary=final_summary, + poll_exit_response=poll_exit_response, + internal_messages_response=internal_messages_response, + settings=stream._settings, # type: ignore + printer=printer, + ) + stream.join() + + # not started streams need to be cleaned up + for stream in not_started_streams.values(): + stream.join() + + def _process_teardown(self, action: StreamAction) -> None: + exit_code: int = action._data + with self._streams_lock: + # TODO: mark streams to prevent new modifications? + streams_copy = self._streams.copy() + self._finish_all(streams_copy, exit_code) + with self._streams_lock: + self._streams = dict() + self._stopped.set() + + def _process_action(self, action: StreamAction) -> None: + if action._action == "add": + self._process_add(action) + return + if action._action == "update": + self._process_update(action) + return + if action._action == "start": + self._process_start(action) + return + if action._action == "del": + self._process_del(action) + return + if action._action == "drop": + self._process_drop(action) + return + if action._action == "teardown": + self._process_teardown(action) + return + raise AssertionError(f"Unsupported action: {action._action}") + + def _check_orphaned(self) -> bool: + if not self._pid: + return False + time_now = time.time() + # if we have checked already and it was less than 2 seconds ago + if self._pid_checked_ts and time_now < self._pid_checked_ts + 2: + return False + self._pid_checked_ts = time_now + return not psutil.pid_exists(self._pid) + + def _loop(self) -> None: + while not self._stopped.is_set(): + if self._check_orphaned(): + # parent process is gone, let other threads know we need to shut down + self._stopped.set() + try: + action = self._action_q.get(timeout=1) + except queue.Empty: + continue + self._process_action(action) + action.set_handled() + self._action_q.task_done() + self._action_q.join() + + def loop(self) -> None: + try: + self._loop() + except Exception as e: + raise e + + def cleanup(self) -> None: + pass