diff --git a/.gitattributes b/.gitattributes index 1a6228e6e12ae732e6d05a697f5f60daa06c9481..5007c91ec8f7c42ad8dde69dd035d81be4e9b854 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1491,3 +1491,4 @@ parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-li parrot/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b005340c2b5cbabb3bd0b73f4761f58988257a8a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cast_Long { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Long") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Long(Tensor self, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h new file mode 100644 index 0000000000000000000000000000000000000000..67272e5ac6dc7f3c31b60826fe3a5d622c2d346b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sinh(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_sinh(at::TensorList self) { + return at::_ops::_foreach_sinh::call(self); +} + +// aten::_foreach_sinh_(Tensor(a!)[] self) -> () +inline void _foreach_sinh_(at::TensorList self) { + return at::_ops::_foreach_sinh_::call(self); +} + +// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sinh_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sinh_out::call(self, out); +} +// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sinh_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sinh_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc.h new file mode 100644 index 0000000000000000000000000000000000000000..48fcbeb0ca9ba1d9ab20817def4d1241a454d1f7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_trunc(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_trunc(at::TensorList self) { + return at::_ops::_foreach_trunc::call(self); +} + +// aten::_foreach_trunc_(Tensor(a!)[] self) -> () +inline void _foreach_trunc_(at::TensorList self) { + return at::_ops::_foreach_trunc_::call(self); +} + +// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_trunc_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_trunc_out::call(self, out); +} +// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_trunc_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_trunc_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9fd0f9ba350eef414b477217538093f52167c82 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..db194f99a9e2d912e5ef7f3f4e80848e9950bf03 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax.h @@ -0,0 +1,49 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_int::call(self, dim, dtype); +} + +// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor _sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype); +} + +// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor +inline at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax::call(self, dim, half_to_float); +} + +// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out); +} +// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..09f8298a4f0b8bdcdcfe31cf7cb2e6091129744f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_bitwise_or_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e504a5eaf81ddf258dd7682f86be0f21731d5c2e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim=0); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3150bf035b19ac5042c0fb1eba6642fdb191fea9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor +inline at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { + return at::_ops::cummaxmin_backward::call(grad, input, indices, dim); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..502520a7bb66020d631daf8397bd4a4e42db55dd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_hfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor fft_hfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_hfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_hfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cb5fbe2fd42160a74fd6a7c28925c3b2f0fa3fae --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_irfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_irfft_symint_out(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0bae4be03f2045d0d49c37c675cc669a1f7276f2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft_symint_out(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7090885b67804b4a1aab60661260a7bf6c4ddba4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..724242bd5929ba52fe380f361cbdbe6cb32b9a6a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e9b83024877bd1a3c895d79d5da61d991ff86f06 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52225f1698ab5f577980a103947dc9577c883cc0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a65e13304eee5a0cc875109a15a61596338787a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c82029f0839c00fd1c2be32092ab83307a1a434 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); +TORCH_API ::std::tuple mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nanmean.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nanmean.h new file mode 100644 index 0000000000000000000000000000000000000000..e029be3a8c10b22e78d053c687f1c59261f2bc41 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nanmean.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean::call(self, dim, keepdim, dtype); +} + +// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanmean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out); +} +// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanmean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1623adcf06e4f7d9b3be3d08270f0dc2c994781 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6939122aec61a6209d10a976933c1756b29188f9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sign : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h new file mode 100644 index 0000000000000000000000000000000000000000..7e873bcccf5b3bc3a6c730dbb0be8c8987560bbd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_bessel_y1(Tensor self) -> Tensor +inline at::Tensor special_bessel_y1(const at::Tensor & self) { + return at::_ops::special_bessel_y1::call(self); +} + +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y1_out::call(self, out); +} +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y1_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c3c3b83389343c1198cb84baddcba7fd9dc8d71e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_expm1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_expm1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d0870996dcb34312e8ca882f368f6d804da25523 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_logit(const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & special_logit_out(const at::Tensor & self, c10::optional eps, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..88f4499b3d97b453dd604c2082827793aaccf3b3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e260aaf393dbde5eb16acebd676da1255cc9ca1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..533113c43f0fdae65703d1221af15b4e30aa8f57 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & zero_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e994a6cae11696feccf0779a4c4308d0325dd8a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA @@ -0,0 +1,246 @@ +Metadata-Version: 2.4 +Name: attrs +Version: 24.3.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Provides-Extra: benchmark +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark' +Requires-Dist: hypothesis; extra == 'benchmark' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark' +Requires-Dist: pympler; extra == 'benchmark' +Requires-Dist: pytest-codspeed; extra == 'benchmark' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark' +Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark' +Requires-Dist: pytest>=4.3.0; extra == 'benchmark' +Provides-Extra: cov +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Requires-Dist: hypothesis; extra == 'cov' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov' +Requires-Dist: pympler; extra == 'cov' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov' +Requires-Dist: pytest-xdist[psutil]; extra == 'cov' +Requires-Dist: pytest>=4.3.0; extra == 'cov' +Provides-Extra: dev +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev' +Requires-Dist: hypothesis; extra == 'dev' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev' +Requires-Dist: pre-commit-uv; extra == 'dev' +Requires-Dist: pympler; extra == 'dev' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev' +Requires-Dist: pytest-xdist[psutil]; extra == 'dev' +Requires-Dist: pytest>=4.3.0; extra == 'dev' +Provides-Extra: docs +Requires-Dist: cogapp; extra == 'docs' +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier<24.7; extra == 'docs' +Provides-Extra: tests +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests' +Requires-Dist: hypothesis; extra == 'tests' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests' +Requires-Dist: pympler; extra == 'tests' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests' +Requires-Dist: pytest>=4.3.0; extra == 'tests' +Provides-Extra: tests-mypy +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + + + +

+ + + + + + + + + + +

+ + + +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation! + + +### Hate Type Annotations!? + +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types: + +```python +from attrs import define, field + +@define +class SomeClass: + a_number = field(default=42) + list_of_numbers = field(factory=list) +``` + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice. + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek). + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. + +## Release Information + +### Backwards-incompatible Changes + +- Python 3.7 has been dropped. + [#1340](https://github.com/python-attrs/attrs/issues/1340) + + +### Changes + +- Introduce `attrs.NothingType`, for annotating types consistent with `attrs.NOTHING`. + [#1358](https://github.com/python-attrs/attrs/issues/1358) +- Allow mutating `__suppress_context__` and `__notes__` on frozen exceptions. + [#1365](https://github.com/python-attrs/attrs/issues/1365) +- `attrs.converters.optional()` works again when taking `attrs.converters.pipe()` or another Converter as its argument. + [#1372](https://github.com/python-attrs/attrs/issues/1372) +- *attrs* instances now support [`copy.replace()`](https://docs.python.org/3/library/copy.html#copy.replace). + [#1383](https://github.com/python-attrs/attrs/issues/1383) +- `attrs.validators.instance_of()`'s type hints now allow for union types. + For example: `instance_of(str | int)` + [#1385](https://github.com/python-attrs/attrs/issues/1385) + + + +--- + +[Full changelog →](https://www.attrs.org/en/stable/changelog.html) diff --git a/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..55bab74f9a2e5489a5f49b1fb6afd1e91acac075 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD @@ -0,0 +1,56 @@ +attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057 +attr/__init__.pyi,sha256=QIXnnHPoucmDWkbpNsWTP-cgJ1bn8le7DjyRa_wYdew,11281 +attr/__pycache__/__init__.cpython-310.pyc,, +attr/__pycache__/_cmp.cpython-310.pyc,, +attr/__pycache__/_compat.cpython-310.pyc,, +attr/__pycache__/_config.cpython-310.pyc,, +attr/__pycache__/_funcs.cpython-310.pyc,, +attr/__pycache__/_make.cpython-310.pyc,, +attr/__pycache__/_next_gen.cpython-310.pyc,, +attr/__pycache__/_version_info.cpython-310.pyc,, +attr/__pycache__/converters.cpython-310.pyc,, +attr/__pycache__/exceptions.cpython-310.pyc,, +attr/__pycache__/filters.cpython-310.pyc,, +attr/__pycache__/setters.cpython-310.pyc,, +attr/__pycache__/validators.cpython-310.pyc,, +attr/_cmp.py,sha256=3umHiBtgsEYtvNP_8XrQwTCdFoZIX4DEur76N-2a3X8,4123 +attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368 +attr/_compat.py,sha256=4hlXbWhdDjQCDK6FKF1EgnZ3POiHgtpp54qE0nxaGHg,2704 +attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843 +attr/_funcs.py,sha256=5-tUKJtp3h5El55EcDl6GWXFp68fT8D8U7uCRN6497I,15854 +attr/_make.py,sha256=orKSf6C-B1eZfpat4lbAtxvmSyE_yxlG8zY9115ufWk,94157 +attr/_next_gen.py,sha256=7FRkbtl_N017SuBhf_Vw3mw2c2pGZhtCGOzadgz7tp4,24395 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861 +attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795 +attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617 +attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584 +attr/validators.py,sha256=WaB1HLAHHqRHWsrv_K9H-sJ7ESil3H3Cmv2d8TtVZx4,20046 +attr/validators.pyi,sha256=s2WhKPqskxbsckJfKk8zOuuB088GfgpyxcCYSNFLqNU,2603 +attrs-24.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-24.3.0.dist-info/METADATA,sha256=f9hhECeAUyS7iewHPRuMLDy1tpJ6vyy8R_TKUnCmiA8,11654 +attrs-24.3.0.dist-info/RECORD,, +attrs-24.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs-24.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +attrs-24.3.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=qeQJZ4O08yczSn840v9bYOaZyRE81WsVi-QCrY3krCU,1107 +attrs/__init__.pyi,sha256=nZmInocjM7tHV4AQw0vxO_fo6oJjL_PonlV9zKKW8DY,7931 +attrs/__pycache__/__init__.cpython-310.pyc,, +attrs/__pycache__/converters.cpython-310.pyc,, +attrs/__pycache__/exceptions.cpython-310.pyc,, +attrs/__pycache__/filters.cpython-310.pyc,, +attrs/__pycache__/setters.cpython-310.pyc,, +attrs/__pycache__/validators.cpython-310.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2bd6453d255e19b973f19b128596a8b6dd65b2c3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..323354e922ddb98bda8291f777148693ac46a5e7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/RECORD b/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ad4dd6141958f110fe6118120dc2b66047318631 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/RECORD @@ -0,0 +1,46 @@ +tokenizers-0.21.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tokenizers-0.21.0.dist-info/METADATA,sha256=oNldYkLKpnavqOq1XABp8c_yNaR65mGu_qaFlD0St2M,6719 +tokenizers-0.21.0.dist-info/RECORD,, +tokenizers-0.21.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tokenizers-0.21.0.dist-info/WHEEL,sha256=PuLiPGpD0eVcoUkb9lueobt7VbCYShlDtLaTRPpT7Z0,127 +tokenizers/__init__.py,sha256=ZE5ZagUvobBScrHBQdEobhx4wqM0bsq9F9aLYkBNjYQ,2615 +tokenizers/__init__.pyi,sha256=jw34WZXaYu8NBBJ2_cypfOqJYxI7CXKPzlveisXw4XQ,40182 +tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/decoders/__init__.py,sha256=hfwM6CFUDvlMGGL4-xsaaYz81K9P5rQI5ZL5UHWK8Y4,372 +tokenizers/decoders/__init__.pyi,sha256=U0dfPVxoGpb-RmNKzZMZebe0fK2riRMbxQh9yJMHjYE,7378 +tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310 +tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,, +tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,, +tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,, +tokenizers/implementations/base_tokenizer.py,sha256=2TFZhLupaJiMDYGJuUNmxYJv-cnR8bDHmbMzaYpFROs,14206 +tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520 +tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289 +tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466 +tokenizers/implementations/sentencepiece_bpe.py,sha256=LwrofoohnUfME2lK2lQYoyQIhP84RP0CIlHRaj0hyNs,3738 +tokenizers/implementations/sentencepiece_unigram.py,sha256=SYiVXL8ZtqLXKpuqwnwmrfxgGotu8yAkOu7dLztEXIo,7580 +tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176 +tokenizers/models/__init__.pyi,sha256=clPTwiyjz7FlVdEuwo_3Wa_TmQrbZhW0SGmnNylepnY,16929 +tokenizers/models/__pycache__/__init__.cpython-310.pyc,, +tokenizers/normalizers/__init__.py,sha256=_06w4cqRItveEgIddYaLMScgkSOkIAMIzYCesb5AA4U,841 +tokenizers/normalizers/__init__.pyi,sha256=dwfVsvg0YbeYoAaBSmKsImqL-tyxiDyHaaTFsZK4aZw,20897 +tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557 +tokenizers/pre_tokenizers/__init__.pyi,sha256=dLtaxOgcBa85vQC6byvfKGCOWTEi4c42IcqimfatksQ,23602 +tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307 +tokenizers/processors/__init__.pyi,sha256=hx767ZY8SHhxb_hiXPRxm-f_KcoR4XDx7vfK2c0lR-Q,11357 +tokenizers/processors/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tokenizers.abi3.so,sha256=uPe1mVjvIUFcXkPyyik0lcgOjPT3LKlTtAOTuDhZAN0,8942016 +tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55 +tokenizers/tools/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,, +tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850 +tokenizers/tools/visualizer.py,sha256=gi-E2NCP7FuG6ujpQOdalSTXUlaV85V6NI-ZPPTvA_4,14625 +tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248 +tokenizers/trainers/__init__.pyi,sha256=3TwFKts4me7zQfVRcSTmtXYiP4XwcRjfAYtwqoZVtoQ,5382 +tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,, diff --git a/vllm/lib/python3.10/site-packages/torchvision/_C.so b/vllm/lib/python3.10/site-packages/torchvision/_C.so new file mode 100644 index 0000000000000000000000000000000000000000..67c670ee4d20374d4af669fddf43a94602419fcc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/_C.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d19d6a3341eb3c0e248ae08c06bff9ec797d1fca30659dc7ae2e527d55206c2a +size 7746688 diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2570c16b0804c0d753467e3d3873b97112fe9ff9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7b50f1d9e9f614b948c9977802c91305dd32515 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_meta_registrations.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_meta_registrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8227c574cb01b333c2e8a75a4e9492fe0f4265dc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_meta_registrations.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29663fb700846e8988c4e6334b41fb8f7c3fd6cd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/extension.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1563161e8be15cec64f7aaae7e60fd5af46870d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/extension.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9773606136c472ed217c689be4f134ecfa32712 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ad5a4dc8e92c3f63e95f2d56ef22d68fb63cd98 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_api.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73ef6f0a91cb2005e667b929342199f6f07c602d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_api.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5749d9544a11de1548ba54be0f8813bc898cb62 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6df3c658cb92061f1209fced4c9c36bfd410ec01 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a3c4acbf188e44a8fd399e5d3df3348339a35eb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4dfa4403d0ffc4bc1a5804d1aecbd3647f15941 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc7fea60a5414c61b123e4ddbeffeeea22e4724b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75058210f14251317eee7cd93e2e2634d3c4d167 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/swin_transformer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/swin_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b437b94dd3c2abf465aafc8e131a3dc85d26692a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/swin_transformer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2955010bcca57dd8c71bccc20c1b5c01786eaa6b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4146651c737971cc5a883b6750f2ded3051bc8ea --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py @@ -0,0 +1,7 @@ +from .faster_rcnn import * +from .fcos import * +from .keypoint_rcnn import * +from .mask_rcnn import * +from .retinanet import * +from .ssd import * +from .ssdlite import * diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a0070f6449f54d3323aecfb805a9325a94dadc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85c38669787a7b596c8f3e65b0af3dc95be15559 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/anchor_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/anchor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06c1608cb0a0e841f405980fe4a7bcb3a9f161ec Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/anchor_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/backbone_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/backbone_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ee4045b59a2b7137f96c7c7ca657db3f03f9d55 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/backbone_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3731a00a06cb0bd81f2251ac0143f9ff3b0bf24 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..289d754e5a27c651a573bd4aa53475acb8cd687f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/mask_rcnn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/mask_rcnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c8b8f698ef8fbcffd677c267d560eaf10138221 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/mask_rcnn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0a7ee54644a4fb19b14626917516fb4d263ed3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b0781785815d90242ce43abd56c3e683d448fc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8cba2f6e4b28d9e0cc1d08b1a1657955c9725f6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e82ddeaebc218a8e5dfc5a0e0d6ad8b8c3ea4f27 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/transform.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..439fbf5d9ca2b7a2ef57506cb4d908c1612332b4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/transform.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/_utils.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..559db858ac32f3b9f157aff3c22da83abece2a73 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/_utils.py @@ -0,0 +1,540 @@ +import math +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple + +import torch +from torch import nn, Tensor +from torch.nn import functional as F +from torchvision.ops import complete_box_iou_loss, distance_box_iou_loss, FrozenBatchNorm2d, generalized_box_iou_loss + + +class BalancedPositiveNegativeSampler: + """ + This class samples batches, ensuring that they contain a fixed proportion of positives + """ + + def __init__(self, batch_size_per_image: int, positive_fraction: float) -> None: + """ + Args: + batch_size_per_image (int): number of elements to be selected per image + positive_fraction (float): percentage of positive elements per batch + """ + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + + def __call__(self, matched_idxs: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: + """ + Args: + matched_idxs: list of tensors containing -1, 0 or positive values. + Each tensor corresponds to a specific image. + -1 values are ignored, 0 are considered as negatives and > 0 as + positives. + + Returns: + pos_idx (list[tensor]) + neg_idx (list[tensor]) + + Returns two lists of binary masks for each image. + The first list contains the positive elements that were selected, + and the second list the negative example. + """ + pos_idx = [] + neg_idx = [] + for matched_idxs_per_image in matched_idxs: + positive = torch.where(matched_idxs_per_image >= 1)[0] + negative = torch.where(matched_idxs_per_image == 0)[0] + + num_pos = int(self.batch_size_per_image * self.positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = self.batch_size_per_image - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx_per_image = positive[perm1] + neg_idx_per_image = negative[perm2] + + # create binary mask from indices + pos_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.uint8) + neg_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.uint8) + + pos_idx_per_image_mask[pos_idx_per_image] = 1 + neg_idx_per_image_mask[neg_idx_per_image] = 1 + + pos_idx.append(pos_idx_per_image_mask) + neg_idx.append(neg_idx_per_image_mask) + + return pos_idx, neg_idx + + +@torch.jit._script_if_tracing +def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) -> Tensor: + """ + Encode a set of proposals with respect to some + reference boxes + + Args: + reference_boxes (Tensor): reference boxes + proposals (Tensor): boxes to be encoded + weights (Tensor[4]): the weights for ``(x, y, w, h)`` + """ + + # perform some unpacking to make it JIT-fusion friendly + wx = weights[0] + wy = weights[1] + ww = weights[2] + wh = weights[3] + + proposals_x1 = proposals[:, 0].unsqueeze(1) + proposals_y1 = proposals[:, 1].unsqueeze(1) + proposals_x2 = proposals[:, 2].unsqueeze(1) + proposals_y2 = proposals[:, 3].unsqueeze(1) + + reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1) + reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1) + reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1) + reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1) + + # implementation starts here + ex_widths = proposals_x2 - proposals_x1 + ex_heights = proposals_y2 - proposals_y1 + ex_ctr_x = proposals_x1 + 0.5 * ex_widths + ex_ctr_y = proposals_y1 + 0.5 * ex_heights + + gt_widths = reference_boxes_x2 - reference_boxes_x1 + gt_heights = reference_boxes_y2 - reference_boxes_y1 + gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths + gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights + + targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths + targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights + targets_dw = ww * torch.log(gt_widths / ex_widths) + targets_dh = wh * torch.log(gt_heights / ex_heights) + + targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) + return targets + + +class BoxCoder: + """ + This class encodes and decodes a set of bounding boxes into + the representation used for training the regressors. + """ + + def __init__( + self, weights: Tuple[float, float, float, float], bbox_xform_clip: float = math.log(1000.0 / 16) + ) -> None: + """ + Args: + weights (4-element tuple) + bbox_xform_clip (float) + """ + self.weights = weights + self.bbox_xform_clip = bbox_xform_clip + + def encode(self, reference_boxes: List[Tensor], proposals: List[Tensor]) -> List[Tensor]: + boxes_per_image = [len(b) for b in reference_boxes] + reference_boxes = torch.cat(reference_boxes, dim=0) + proposals = torch.cat(proposals, dim=0) + targets = self.encode_single(reference_boxes, proposals) + return targets.split(boxes_per_image, 0) + + def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: + """ + Encode a set of proposals with respect to some + reference boxes + + Args: + reference_boxes (Tensor): reference boxes + proposals (Tensor): boxes to be encoded + """ + dtype = reference_boxes.dtype + device = reference_boxes.device + weights = torch.as_tensor(self.weights, dtype=dtype, device=device) + targets = encode_boxes(reference_boxes, proposals, weights) + + return targets + + def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: + torch._assert( + isinstance(boxes, (list, tuple)), + "This function expects boxes of type list or tuple.", + ) + torch._assert( + isinstance(rel_codes, torch.Tensor), + "This function expects rel_codes of type torch.Tensor.", + ) + boxes_per_image = [b.size(0) for b in boxes] + concat_boxes = torch.cat(boxes, dim=0) + box_sum = 0 + for val in boxes_per_image: + box_sum += val + if box_sum > 0: + rel_codes = rel_codes.reshape(box_sum, -1) + pred_boxes = self.decode_single(rel_codes, concat_boxes) + if box_sum > 0: + pred_boxes = pred_boxes.reshape(box_sum, -1, 4) + return pred_boxes + + def decode_single(self, rel_codes: Tensor, boxes: Tensor) -> Tensor: + """ + From a set of original boxes and encoded relative box offsets, + get the decoded boxes. + + Args: + rel_codes (Tensor): encoded boxes + boxes (Tensor): reference boxes. + """ + + boxes = boxes.to(rel_codes.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = rel_codes[:, 0::4] / wx + dy = rel_codes[:, 1::4] / wy + dw = rel_codes[:, 2::4] / ww + dh = rel_codes[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.bbox_xform_clip) + dh = torch.clamp(dh, max=self.bbox_xform_clip) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + # Distance from center to box's corner. + c_to_c_h = torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h + c_to_c_w = torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w + + pred_boxes1 = pred_ctr_x - c_to_c_w + pred_boxes2 = pred_ctr_y - c_to_c_h + pred_boxes3 = pred_ctr_x + c_to_c_w + pred_boxes4 = pred_ctr_y + c_to_c_h + pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1) + return pred_boxes + + +class BoxLinearCoder: + """ + The linear box-to-box transform defined in FCOS. The transformation is parameterized + by the distance from the center of (square) src box to 4 edges of the target box. + """ + + def __init__(self, normalize_by_size: bool = True) -> None: + """ + Args: + normalize_by_size (bool): normalize deltas by the size of src (anchor) boxes. + """ + self.normalize_by_size = normalize_by_size + + def encode(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: + """ + Encode a set of proposals with respect to some reference boxes + + Args: + reference_boxes (Tensor): reference boxes + proposals (Tensor): boxes to be encoded + + Returns: + Tensor: the encoded relative box offsets that can be used to + decode the boxes. + + """ + + # get the center of reference_boxes + reference_boxes_ctr_x = 0.5 * (reference_boxes[..., 0] + reference_boxes[..., 2]) + reference_boxes_ctr_y = 0.5 * (reference_boxes[..., 1] + reference_boxes[..., 3]) + + # get box regression transformation deltas + target_l = reference_boxes_ctr_x - proposals[..., 0] + target_t = reference_boxes_ctr_y - proposals[..., 1] + target_r = proposals[..., 2] - reference_boxes_ctr_x + target_b = proposals[..., 3] - reference_boxes_ctr_y + + targets = torch.stack((target_l, target_t, target_r, target_b), dim=-1) + + if self.normalize_by_size: + reference_boxes_w = reference_boxes[..., 2] - reference_boxes[..., 0] + reference_boxes_h = reference_boxes[..., 3] - reference_boxes[..., 1] + reference_boxes_size = torch.stack( + (reference_boxes_w, reference_boxes_h, reference_boxes_w, reference_boxes_h), dim=-1 + ) + targets = targets / reference_boxes_size + return targets + + def decode(self, rel_codes: Tensor, boxes: Tensor) -> Tensor: + + """ + From a set of original boxes and encoded relative box offsets, + get the decoded boxes. + + Args: + rel_codes (Tensor): encoded boxes + boxes (Tensor): reference boxes. + + Returns: + Tensor: the predicted boxes with the encoded relative box offsets. + + .. note:: + This method assumes that ``rel_codes`` and ``boxes`` have same size for 0th dimension. i.e. ``len(rel_codes) == len(boxes)``. + + """ + + boxes = boxes.to(dtype=rel_codes.dtype) + + ctr_x = 0.5 * (boxes[..., 0] + boxes[..., 2]) + ctr_y = 0.5 * (boxes[..., 1] + boxes[..., 3]) + + if self.normalize_by_size: + boxes_w = boxes[..., 2] - boxes[..., 0] + boxes_h = boxes[..., 3] - boxes[..., 1] + + list_box_size = torch.stack((boxes_w, boxes_h, boxes_w, boxes_h), dim=-1) + rel_codes = rel_codes * list_box_size + + pred_boxes1 = ctr_x - rel_codes[..., 0] + pred_boxes2 = ctr_y - rel_codes[..., 1] + pred_boxes3 = ctr_x + rel_codes[..., 2] + pred_boxes4 = ctr_y + rel_codes[..., 3] + + pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=-1) + return pred_boxes + + +class Matcher: + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be assigned to zero or more predicted elements. + + Matching is based on the MxN match_quality_matrix, that characterizes how well + each (ground-truth, predicted)-pair match. For example, if the elements are + boxes, the matrix may contain box IoU overlap values. + + The matcher returns a tensor of size N containing the index of the ground-truth + element m that matches to prediction n. If there is no match, a negative value + is returned. + """ + + BELOW_LOW_THRESHOLD = -1 + BETWEEN_THRESHOLDS = -2 + + __annotations__ = { + "BELOW_LOW_THRESHOLD": int, + "BETWEEN_THRESHOLDS": int, + } + + def __init__(self, high_threshold: float, low_threshold: float, allow_low_quality_matches: bool = False) -> None: + """ + Args: + high_threshold (float): quality values greater than or equal to + this value are candidate matches. + low_threshold (float): a lower quality threshold used to stratify + matches into three levels: + 1) matches >= high_threshold + 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold) + 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold) + allow_low_quality_matches (bool): if True, produce additional matches + for predictions that have only low-quality match candidates. See + set_low_quality_matches_ for more details. + """ + self.BELOW_LOW_THRESHOLD = -1 + self.BETWEEN_THRESHOLDS = -2 + torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold") + self.high_threshold = high_threshold + self.low_threshold = low_threshold + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix: Tensor) -> Tensor: + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted elements. + + Returns: + matches (Tensor[int64]): an N tensor where N[i] is a matched gt in + [0, M - 1] or a negative value indicating that prediction i could not + be matched. + """ + if match_quality_matrix.numel() == 0: + # empty targets or proposals not supported during training + if match_quality_matrix.shape[0] == 0: + raise ValueError("No ground-truth boxes available for one of the images during training") + else: + raise ValueError("No proposal boxes available for one of the images during training") + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + if self.allow_low_quality_matches: + all_matches = matches.clone() + else: + all_matches = None # type: ignore[assignment] + + # Assign candidate matches with low quality to negative (unassigned) values + below_low_threshold = matched_vals < self.low_threshold + between_thresholds = (matched_vals >= self.low_threshold) & (matched_vals < self.high_threshold) + matches[below_low_threshold] = self.BELOW_LOW_THRESHOLD + matches[between_thresholds] = self.BETWEEN_THRESHOLDS + + if self.allow_low_quality_matches: + if all_matches is None: + torch._assert(False, "all_matches should not be None") + else: + self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) + + return matches + + def set_low_quality_matches_(self, matches: Tensor, all_matches: Tensor, match_quality_matrix: Tensor) -> None: + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth with which it has the highest + quality value. + """ + # For each gt, find the prediction with which it has the highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find the highest quality match available, even if it is low, including ties + gt_pred_pairs_of_highest_quality = torch.where(match_quality_matrix == highest_quality_foreach_gt[:, None]) + # Example gt_pred_pairs_of_highest_quality: + # (tensor([0, 1, 1, 2, 2, 3, 3, 4, 5, 5]), + # tensor([39796, 32055, 32070, 39190, 40255, 40390, 41455, 45470, 45325, 46390])) + # Each element in the first tensor is a gt index, and each element in second tensor is a prediction index + # Note how gt items 1, 2, 3, and 5 each have two ties + + pred_inds_to_update = gt_pred_pairs_of_highest_quality[1] + matches[pred_inds_to_update] = all_matches[pred_inds_to_update] + + +class SSDMatcher(Matcher): + def __init__(self, threshold: float) -> None: + super().__init__(threshold, threshold, allow_low_quality_matches=False) + + def __call__(self, match_quality_matrix: Tensor) -> Tensor: + matches = super().__call__(match_quality_matrix) + + # For each gt, find the prediction with which it has the highest quality + _, highest_quality_pred_foreach_gt = match_quality_matrix.max(dim=1) + matches[highest_quality_pred_foreach_gt] = torch.arange( + highest_quality_pred_foreach_gt.size(0), dtype=torch.int64, device=highest_quality_pred_foreach_gt.device + ) + + return matches + + +def overwrite_eps(model: nn.Module, eps: float) -> None: + """ + This method overwrites the default eps values of all the + FrozenBatchNorm2d layers of the model with the provided value. + This is necessary to address the BC-breaking change introduced + by the bug-fix at pytorch/vision#2933. The overwrite is applied + only when the pretrained weights are loaded to maintain compatibility + with previous versions. + + Args: + model (nn.Module): The model on which we perform the overwrite. + eps (float): The new value of eps. + """ + for module in model.modules(): + if isinstance(module, FrozenBatchNorm2d): + module.eps = eps + + +def retrieve_out_channels(model: nn.Module, size: Tuple[int, int]) -> List[int]: + """ + This method retrieves the number of output channels of a specific model. + + Args: + model (nn.Module): The model for which we estimate the out_channels. + It should return a single Tensor or an OrderedDict[Tensor]. + size (Tuple[int, int]): The size (wxh) of the input. + + Returns: + out_channels (List[int]): A list of the output channels of the model. + """ + in_training = model.training + model.eval() + + with torch.no_grad(): + # Use dummy data to retrieve the feature map sizes to avoid hard-coding their values + device = next(model.parameters()).device + tmp_img = torch.zeros((1, 3, size[1], size[0]), device=device) + features = model(tmp_img) + if isinstance(features, torch.Tensor): + features = OrderedDict([("0", features)]) + out_channels = [x.size(1) for x in features.values()] + + if in_training: + model.train() + + return out_channels + + +@torch.jit.unused +def _fake_cast_onnx(v: Tensor) -> int: + return v # type: ignore[return-value] + + +def _topk_min(input: Tensor, orig_kval: int, axis: int) -> int: + """ + ONNX spec requires the k-value to be less than or equal to the number of inputs along + provided dim. Certain models use the number of elements along a particular axis instead of K + if K exceeds the number of elements along that axis. Previously, python's min() function was + used to determine whether to use the provided k-value or the specified dim axis value. + + However, in cases where the model is being exported in tracing mode, python min() is + static causing the model to be traced incorrectly and eventually fail at the topk node. + In order to avoid this situation, in tracing mode, torch.min() is used instead. + + Args: + input (Tensor): The original input tensor. + orig_kval (int): The provided k-value. + axis(int): Axis along which we retrieve the input size. + + Returns: + min_kval (int): Appropriately selected k-value. + """ + if not torch.jit.is_tracing(): + return min(orig_kval, input.size(axis)) + axis_dim_val = torch._shape_as_tensor(input)[axis].unsqueeze(0) + min_kval = torch.min(torch.cat((torch.tensor([orig_kval], dtype=axis_dim_val.dtype), axis_dim_val), 0)) + return _fake_cast_onnx(min_kval) + + +def _box_loss( + type: str, + box_coder: BoxCoder, + anchors_per_image: Tensor, + matched_gt_boxes_per_image: Tensor, + bbox_regression_per_image: Tensor, + cnf: Optional[Dict[str, float]] = None, +) -> Tensor: + torch._assert(type in ["l1", "smooth_l1", "ciou", "diou", "giou"], f"Unsupported loss: {type}") + + if type == "l1": + target_regression = box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + return F.l1_loss(bbox_regression_per_image, target_regression, reduction="sum") + elif type == "smooth_l1": + target_regression = box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + beta = cnf["beta"] if cnf is not None and "beta" in cnf else 1.0 + return F.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction="sum", beta=beta) + else: + bbox_per_image = box_coder.decode_single(bbox_regression_per_image, anchors_per_image) + eps = cnf["eps"] if cnf is not None and "eps" in cnf else 1e-7 + if type == "ciou": + return complete_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps) + if type == "diou": + return distance_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps) + # otherwise giou + return generalized_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps) diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..253f6502a9b6344f5a3da239f2394179a256424e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py @@ -0,0 +1,268 @@ +import math +from typing import List, Optional + +import torch +from torch import nn, Tensor + +from .image_list import ImageList + + +class AnchorGenerator(nn.Module): + """ + Module that generates anchors for a set of feature maps and + image sizes. + + The module support computing anchors at multiple sizes and aspect ratios + per feature map. This module assumes aspect ratio = height / width for + each anchor. + + sizes and aspect_ratios should have the same number of elements, and it should + correspond to the number of feature maps. + + sizes[i] and aspect_ratios[i] can have an arbitrary number of elements, + and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors + per spatial location for feature map i. + + Args: + sizes (Tuple[Tuple[int]]): + aspect_ratios (Tuple[Tuple[float]]): + """ + + __annotations__ = { + "cell_anchors": List[torch.Tensor], + } + + def __init__( + self, + sizes=((128, 256, 512),), + aspect_ratios=((0.5, 1.0, 2.0),), + ): + super().__init__() + + if not isinstance(sizes[0], (list, tuple)): + # TODO change this + sizes = tuple((s,) for s in sizes) + if not isinstance(aspect_ratios[0], (list, tuple)): + aspect_ratios = (aspect_ratios,) * len(sizes) + + self.sizes = sizes + self.aspect_ratios = aspect_ratios + self.cell_anchors = [ + self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(sizes, aspect_ratios) + ] + + # TODO: https://github.com/pytorch/pytorch/issues/26792 + # For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values. + # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios) + # This method assumes aspect ratio = height / width for an anchor. + def generate_anchors( + self, + scales: List[int], + aspect_ratios: List[float], + dtype: torch.dtype = torch.float32, + device: torch.device = torch.device("cpu"), + ) -> Tensor: + scales = torch.as_tensor(scales, dtype=dtype, device=device) + aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) + h_ratios = torch.sqrt(aspect_ratios) + w_ratios = 1 / h_ratios + + ws = (w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h_ratios[:, None] * scales[None, :]).view(-1) + + base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2 + return base_anchors.round() + + def set_cell_anchors(self, dtype: torch.dtype, device: torch.device): + self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors] + + def num_anchors_per_location(self) -> List[int]: + return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)] + + # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2), + # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a. + def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: + anchors = [] + cell_anchors = self.cell_anchors + torch._assert(cell_anchors is not None, "cell_anchors should not be None") + torch._assert( + len(grid_sizes) == len(strides) == len(cell_anchors), + "Anchors should be Tuple[Tuple[int]] because each feature " + "map could potentially have different sizes and aspect ratios. " + "There needs to be a match between the number of " + "feature maps passed and the number of sizes / aspect ratios specified.", + ) + + for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): + grid_height, grid_width = size + stride_height, stride_width = stride + device = base_anchors.device + + # For output anchor, compute [x_center, y_center, x_center, y_center] + shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width + shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + # For every (base anchor, output anchor) pair, + # offset each zero-centered base anchor by the center of the output anchor. + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + strides = [ + [ + torch.empty((), dtype=torch.int64, device=device).fill_(image_size[0] // g[0]), + torch.empty((), dtype=torch.int64, device=device).fill_(image_size[1] // g[1]), + ] + for g in grid_sizes + ] + self.set_cell_anchors(dtype, device) + anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides) + anchors: List[List[torch.Tensor]] = [] + for _ in range(len(image_list.image_sizes)): + anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps] + anchors.append(anchors_in_image) + anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors] + return anchors + + +class DefaultBoxGenerator(nn.Module): + """ + This module generates the default boxes of SSD for a set of feature maps and image sizes. + + Args: + aspect_ratios (List[List[int]]): A list with all the aspect ratios used in each feature map. + min_ratio (float): The minimum scale :math:`\text{s}_{\text{min}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + max_ratio (float): The maximum scale :math:`\text{s}_{\text{max}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + scales (List[float]], optional): The scales of the default boxes. If not provided it will be estimated using + the ``min_ratio`` and ``max_ratio`` parameters. + steps (List[int]], optional): It's a hyper-parameter that affects the tiling of default boxes. If not provided + it will be estimated from the data. + clip (bool): Whether the standardized values of default boxes should be clipped between 0 and 1. The clipping + is applied while the boxes are encoded in format ``(cx, cy, w, h)``. + """ + + def __init__( + self, + aspect_ratios: List[List[int]], + min_ratio: float = 0.15, + max_ratio: float = 0.9, + scales: Optional[List[float]] = None, + steps: Optional[List[int]] = None, + clip: bool = True, + ): + super().__init__() + if steps is not None and len(aspect_ratios) != len(steps): + raise ValueError("aspect_ratios and steps should have the same length") + self.aspect_ratios = aspect_ratios + self.steps = steps + self.clip = clip + num_outputs = len(aspect_ratios) + + # Estimation of default boxes scales + if scales is None: + if num_outputs > 1: + range_ratio = max_ratio - min_ratio + self.scales = [min_ratio + range_ratio * k / (num_outputs - 1.0) for k in range(num_outputs)] + self.scales.append(1.0) + else: + self.scales = [min_ratio, max_ratio] + else: + self.scales = scales + + self._wh_pairs = self._generate_wh_pairs(num_outputs) + + def _generate_wh_pairs( + self, num_outputs: int, dtype: torch.dtype = torch.float32, device: torch.device = torch.device("cpu") + ) -> List[Tensor]: + _wh_pairs: List[Tensor] = [] + for k in range(num_outputs): + # Adding the 2 default width-height pairs for aspect ratio 1 and scale s'k + s_k = self.scales[k] + s_prime_k = math.sqrt(self.scales[k] * self.scales[k + 1]) + wh_pairs = [[s_k, s_k], [s_prime_k, s_prime_k]] + + # Adding 2 pairs for each aspect ratio of the feature map k + for ar in self.aspect_ratios[k]: + sq_ar = math.sqrt(ar) + w = self.scales[k] * sq_ar + h = self.scales[k] / sq_ar + wh_pairs.extend([[w, h], [h, w]]) + + _wh_pairs.append(torch.as_tensor(wh_pairs, dtype=dtype, device=device)) + return _wh_pairs + + def num_anchors_per_location(self) -> List[int]: + # Estimate num of anchors based on aspect ratios: 2 default boxes + 2 * ratios of feaure map. + return [2 + 2 * len(r) for r in self.aspect_ratios] + + # Default Boxes calculation based on page 6 of SSD paper + def _grid_default_boxes( + self, grid_sizes: List[List[int]], image_size: List[int], dtype: torch.dtype = torch.float32 + ) -> Tensor: + default_boxes = [] + for k, f_k in enumerate(grid_sizes): + # Now add the default boxes for each width-height pair + if self.steps is not None: + x_f_k = image_size[1] / self.steps[k] + y_f_k = image_size[0] / self.steps[k] + else: + y_f_k, x_f_k = f_k + + shifts_x = ((torch.arange(0, f_k[1]) + 0.5) / x_f_k).to(dtype=dtype) + shifts_y = ((torch.arange(0, f_k[0]) + 0.5) / y_f_k).to(dtype=dtype) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + + shifts = torch.stack((shift_x, shift_y) * len(self._wh_pairs[k]), dim=-1).reshape(-1, 2) + # Clipping the default boxes while the boxes are encoded in format (cx, cy, w, h) + _wh_pair = self._wh_pairs[k].clamp(min=0, max=1) if self.clip else self._wh_pairs[k] + wh_pairs = _wh_pair.repeat((f_k[0] * f_k[1]), 1) + + default_box = torch.cat((shifts, wh_pairs), dim=1) + + default_boxes.append(default_box) + + return torch.cat(default_boxes, dim=0) + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"aspect_ratios={self.aspect_ratios}" + f", clip={self.clip}" + f", scales={self.scales}" + f", steps={self.steps}" + ")" + ) + return s + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + default_boxes = self._grid_default_boxes(grid_sizes, image_size, dtype=dtype) + default_boxes = default_boxes.to(device) + + dboxes = [] + x_y_size = torch.tensor([image_size[1], image_size[0]], device=default_boxes.device) + for _ in image_list.image_sizes: + dboxes_in_image = default_boxes + dboxes_in_image = torch.cat( + [ + (dboxes_in_image[:, :2] - 0.5 * dboxes_in_image[:, 2:]) * x_y_size, + (dboxes_in_image[:, :2] + 0.5 * dboxes_in_image[:, 2:]) * x_y_size, + ], + -1, + ) + dboxes.append(dboxes_in_image) + return dboxes diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/backbone_utils.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/backbone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..668e6b31696eb949513d07878eada9d468dc99cd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/backbone_utils.py @@ -0,0 +1,244 @@ +import warnings +from typing import Callable, Dict, List, Optional, Union + +from torch import nn, Tensor +from torchvision.ops import misc as misc_nn_ops +from torchvision.ops.feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool + +from .. import mobilenet, resnet +from .._api import _get_enum_from_fn, WeightsEnum +from .._utils import handle_legacy_interface, IntermediateLayerGetter + + +class BackboneWithFPN(nn.Module): + """ + Adds a FPN on top of a model. + Internally, it uses torchvision.models._utils.IntermediateLayerGetter to + extract a submodel that returns the feature maps specified in return_layers. + The same limitations of IntermediateLayerGetter apply here. + Args: + backbone (nn.Module) + return_layers (Dict[name, new_name]): a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + in_channels_list (List[int]): number of channels for each feature map + that is returned, in the order they are present in the OrderedDict + out_channels (int): number of channels in the FPN. + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + Attributes: + out_channels (int): the number of channels in the FPN + """ + + def __init__( + self, + backbone: nn.Module, + return_layers: Dict[str, str], + in_channels_list: List[int], + out_channels: int, + extra_blocks: Optional[ExtraFPNBlock] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ) -> None: + super().__init__() + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + self.fpn = FeaturePyramidNetwork( + in_channels_list=in_channels_list, + out_channels=out_channels, + extra_blocks=extra_blocks, + norm_layer=norm_layer, + ) + self.out_channels = out_channels + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + x = self.body(x) + x = self.fpn(x) + return x + + +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: _get_enum_from_fn(resnet.__dict__[kwargs["backbone_name"]])["IMAGENET1K_V1"], + ), +) +def resnet_fpn_backbone( + *, + backbone_name: str, + weights: Optional[WeightsEnum], + norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d, + trainable_layers: int = 3, + returned_layers: Optional[List[int]] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, +) -> BackboneWithFPN: + """ + Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone. + + Examples:: + + >>> import torch + >>> from torchvision.models import ResNet50_Weights + >>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone + >>> backbone = resnet_fpn_backbone(backbone_name='resnet50', weights=ResNet50_Weights.DEFAULT, trainable_layers=3) + >>> # get some dummy image + >>> x = torch.rand(1,3,64,64) + >>> # compute the output + >>> output = backbone(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('0', torch.Size([1, 256, 16, 16])), + >>> ('1', torch.Size([1, 256, 8, 8])), + >>> ('2', torch.Size([1, 256, 4, 4])), + >>> ('3', torch.Size([1, 256, 2, 2])), + >>> ('pool', torch.Size([1, 256, 1, 1]))] + + Args: + backbone_name (string): resnet architecture. Possible values are 'resnet18', 'resnet34', 'resnet50', + 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2' + weights (WeightsEnum, optional): The pretrained weights for the model + norm_layer (callable): it is recommended to use the default value. For details visit: + (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267) + trainable_layers (int): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``. + By default, all layers are returned. + extra_blocks (ExtraFPNBlock or None): if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names. By + default, a ``LastLevelMaxPool`` is used. + """ + backbone = resnet.__dict__[backbone_name](weights=weights, norm_layer=norm_layer) + return _resnet_fpn_extractor(backbone, trainable_layers, returned_layers, extra_blocks) + + +def _resnet_fpn_extractor( + backbone: resnet.ResNet, + trainable_layers: int, + returned_layers: Optional[List[int]] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, +) -> BackboneWithFPN: + + # select layers that won't be frozen + if trainable_layers < 0 or trainable_layers > 5: + raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}") + layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers] + if trainable_layers == 5: + layers_to_train.append("bn1") + for name, parameter in backbone.named_parameters(): + if all([not name.startswith(layer) for layer in layers_to_train]): + parameter.requires_grad_(False) + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + if returned_layers is None: + returned_layers = [1, 2, 3, 4] + if min(returned_layers) <= 0 or max(returned_layers) >= 5: + raise ValueError(f"Each returned layer should be in the range [1,4]. Got {returned_layers}") + return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)} + + in_channels_stage2 = backbone.inplanes // 8 + in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] + out_channels = 256 + return BackboneWithFPN( + backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, norm_layer=norm_layer + ) + + +def _validate_trainable_layers( + is_trained: bool, + trainable_backbone_layers: Optional[int], + max_value: int, + default_value: int, +) -> int: + # don't freeze any layers if pretrained model or backbone is not used + if not is_trained: + if trainable_backbone_layers is not None: + warnings.warn( + "Changing trainable_backbone_layers has no effect if " + "neither pretrained nor pretrained_backbone have been set to True, " + f"falling back to trainable_backbone_layers={max_value} so that all layers are trainable" + ) + trainable_backbone_layers = max_value + + # by default freeze first blocks + if trainable_backbone_layers is None: + trainable_backbone_layers = default_value + if trainable_backbone_layers < 0 or trainable_backbone_layers > max_value: + raise ValueError( + f"Trainable backbone layers should be in the range [0,{max_value}], got {trainable_backbone_layers} " + ) + return trainable_backbone_layers + + +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: _get_enum_from_fn(mobilenet.__dict__[kwargs["backbone_name"]])["IMAGENET1K_V1"], + ), +) +def mobilenet_backbone( + *, + backbone_name: str, + weights: Optional[WeightsEnum], + fpn: bool, + norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d, + trainable_layers: int = 2, + returned_layers: Optional[List[int]] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, +) -> nn.Module: + backbone = mobilenet.__dict__[backbone_name](weights=weights, norm_layer=norm_layer) + return _mobilenet_extractor(backbone, fpn, trainable_layers, returned_layers, extra_blocks) + + +def _mobilenet_extractor( + backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3], + fpn: bool, + trainable_layers: int, + returned_layers: Optional[List[int]] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, +) -> nn.Module: + backbone = backbone.features + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + num_stages = len(stage_indices) + + # find the index of the layer from which we won't freeze + if trainable_layers < 0 or trainable_layers > num_stages: + raise ValueError(f"Trainable layers should be in the range [0,{num_stages}], got {trainable_layers} ") + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + out_channels = 256 + if fpn: + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + if returned_layers is None: + returned_layers = [num_stages - 2, num_stages - 1] + if min(returned_layers) < 0 or max(returned_layers) >= num_stages: + raise ValueError(f"Each returned layer should be in the range [0,{num_stages - 1}], got {returned_layers} ") + return_layers = {f"{stage_indices[k]}": str(v) for v, k in enumerate(returned_layers)} + + in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers] + return BackboneWithFPN( + backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, norm_layer=norm_layer + ) + else: + m = nn.Sequential( + backbone, + # depthwise linear combination of channels to reduce their size + nn.Conv2d(backbone[-1].out_channels, out_channels, 1), + ) + m.out_channels = out_channels # type: ignore[assignment] + return m diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py new file mode 100644 index 0000000000000000000000000000000000000000..583866557e4c9ec178e7cc268272db3de1698e41 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import torch +from torch import Tensor + + +class ImageList: + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + + Args: + tensors (tensor): Tensor containing images. + image_sizes (list[tuple[int, int]]): List of Tuples each containing size of images. + """ + + def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]) -> None: + self.tensors = tensors + self.image_sizes = image_sizes + + def to(self, device: torch.device) -> "ImageList": + cast_tensor = self.tensors.to(device) + return ImageList(cast_tensor, self.image_sizes) diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/keypoint_rcnn.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/keypoint_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7ff0ea433a681064a11a22c3e276e253997772 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/keypoint_rcnn.py @@ -0,0 +1,474 @@ +from typing import Any, Optional + +import torch +from torch import nn +from torchvision.ops import MultiScaleRoIAlign + +from ...ops import misc as misc_nn_ops +from ...transforms._presets import ObjectDetection +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..resnet import resnet50, ResNet50_Weights +from ._utils import overwrite_eps +from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers +from .faster_rcnn import FasterRCNN + + +__all__ = [ + "KeypointRCNN", + "KeypointRCNN_ResNet50_FPN_Weights", + "keypointrcnn_resnet50_fpn", +] + + +class KeypointRCNN(FasterRCNN): + """ + Implements Keypoint R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the + format [x, y, visibility], where visibility=0 means that the keypoint is not visible. + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format. + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): Images are rescaled before feeding them to the backbone: + we attempt to preserve the aspect ratio and scale the shorter edge + to ``min_size``. If the resulting longer edge exceeds ``max_size``, + then downscale so that the longer edge does not exceed ``max_size``. + This may result in the shorter edge beeing lower than ``min_size``. + max_size (int): See ``min_size``. + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): only return proposals with an objectness score greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes, which will be used for the keypoint head. + keypoint_head (nn.Module): module that takes the cropped feature maps as input + keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the + heatmap logits + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import KeypointRCNN + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features + >>> # KeypointRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280, + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=14, + >>> sampling_ratio=2) + >>> # put the pieces together inside a KeypointRCNN model + >>> model = KeypointRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler, + >>> keypoint_roi_pool=keypoint_roi_pooler) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + + def __init__( + self, + backbone, + num_classes=None, + # transform parameters + min_size=None, + max_size=1333, + image_mean=None, + image_std=None, + # RPN parameters + rpn_anchor_generator=None, + rpn_head=None, + rpn_pre_nms_top_n_train=2000, + rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, + rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, + rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, + rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, + box_head=None, + box_predictor=None, + box_score_thresh=0.05, + box_nms_thresh=0.5, + box_detections_per_img=100, + box_fg_iou_thresh=0.5, + box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, + box_positive_fraction=0.25, + bbox_reg_weights=None, + # keypoint parameters + keypoint_roi_pool=None, + keypoint_head=None, + keypoint_predictor=None, + num_keypoints=None, + **kwargs, + ): + + if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))): + raise TypeError( + "keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}" + ) + if min_size is None: + min_size = (640, 672, 704, 736, 768, 800) + + if num_keypoints is not None: + if keypoint_predictor is not None: + raise ValueError("num_keypoints should be None when keypoint_predictor is specified") + else: + num_keypoints = 17 + + out_channels = backbone.out_channels + + if keypoint_roi_pool is None: + keypoint_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2) + + if keypoint_head is None: + keypoint_layers = tuple(512 for _ in range(8)) + keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers) + + if keypoint_predictor is None: + keypoint_dim_reduced = 512 # == keypoint_layers[-1] + keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints) + + super().__init__( + backbone, + num_classes, + # transform parameters + min_size, + max_size, + image_mean, + image_std, + # RPN-specific parameters + rpn_anchor_generator, + rpn_head, + rpn_pre_nms_top_n_train, + rpn_pre_nms_top_n_test, + rpn_post_nms_top_n_train, + rpn_post_nms_top_n_test, + rpn_nms_thresh, + rpn_fg_iou_thresh, + rpn_bg_iou_thresh, + rpn_batch_size_per_image, + rpn_positive_fraction, + rpn_score_thresh, + # Box parameters + box_roi_pool, + box_head, + box_predictor, + box_score_thresh, + box_nms_thresh, + box_detections_per_img, + box_fg_iou_thresh, + box_bg_iou_thresh, + box_batch_size_per_image, + box_positive_fraction, + bbox_reg_weights, + **kwargs, + ) + + self.roi_heads.keypoint_roi_pool = keypoint_roi_pool + self.roi_heads.keypoint_head = keypoint_head + self.roi_heads.keypoint_predictor = keypoint_predictor + + +class KeypointRCNNHeads(nn.Sequential): + def __init__(self, in_channels, layers): + d = [] + next_feature = in_channels + for out_channels in layers: + d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1)) + d.append(nn.ReLU(inplace=True)) + next_feature = out_channels + super().__init__(*d) + for m in self.children(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + nn.init.constant_(m.bias, 0) + + +class KeypointRCNNPredictor(nn.Module): + def __init__(self, in_channels, num_keypoints): + super().__init__() + input_features = in_channels + deconv_kernel = 4 + self.kps_score_lowres = nn.ConvTranspose2d( + input_features, + num_keypoints, + deconv_kernel, + stride=2, + padding=deconv_kernel // 2 - 1, + ) + nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu") + nn.init.constant_(self.kps_score_lowres.bias, 0) + self.up_scale = 2 + self.out_channels = num_keypoints + + def forward(self, x): + x = self.kps_score_lowres(x) + return torch.nn.functional.interpolate( + x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False, recompute_scale_factor=False + ) + + +_COMMON_META = { + "categories": _COCO_PERSON_CATEGORIES, + "keypoint_names": _COCO_PERSON_KEYPOINT_NAMES, + "min_size": (1, 1), +} + + +class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): + COCO_LEGACY = Weights( + url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 59137258, + "recipe": "https://github.com/pytorch/vision/issues/1606", + "_metrics": { + "COCO-val2017": { + "box_map": 50.6, + "kp_map": 61.1, + } + }, + "_ops": 133.924, + "_file_size": 226.054, + "_docs": """ + These weights were produced by following a similar training recipe as on the paper but use a checkpoint + from an early epoch. + """, + }, + ) + COCO_V1 = Weights( + url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 59137258, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#keypoint-r-cnn", + "_metrics": { + "COCO-val2017": { + "box_map": 54.6, + "kp_map": 65.0, + } + }, + "_ops": 137.42, + "_file_size": 226.054, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: KeypointRCNN_ResNet50_FPN_Weights.COCO_LEGACY + if kwargs["pretrained"] == "legacy" + else KeypointRCNN_ResNet50_FPN_Weights.COCO_V1, + ), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def keypointrcnn_resnet50_fpn( + *, + weights: Optional[KeypointRCNN_ResNet50_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + num_keypoints: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> KeypointRCNN: + """ + Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone. + + .. betastatus:: detection module + + Reference: `Mask R-CNN `__. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the + format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible. + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detected instances: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each instance + - scores (``Tensor[N]``): the scores or each instance + - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format. + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=KeypointRCNN_ResNet50_FPN_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11) + + Args: + weights (:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int, optional): number of output classes of the model (including the background) + num_keypoints (int, optional): number of keypoints + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is + passed (the default) this value is set to 3. + + .. autoclass:: torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights + :members: + """ + weights = KeypointRCNN_ResNet50_FPN_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + num_keypoints = _ovewrite_value_param("num_keypoints", num_keypoints, len(weights.meta["keypoint_names"])) + else: + if num_classes is None: + num_classes = 2 + if num_keypoints is None: + num_keypoints = 17 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d + + backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer) + backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers) + model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True)) + if weights == KeypointRCNN_ResNet50_FPN_Weights.COCO_V1: + overwrite_eps(model, 0.0) + + return model diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/mask_rcnn.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/mask_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..cdabbfd26ca8bbefaefdb6fb8b098afac217b595 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/mask_rcnn.py @@ -0,0 +1,590 @@ +from collections import OrderedDict +from typing import Any, Callable, Optional + +from torch import nn +from torchvision.ops import MultiScaleRoIAlign + +from ...ops import misc as misc_nn_ops +from ...transforms._presets import ObjectDetection +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..resnet import resnet50, ResNet50_Weights +from ._utils import overwrite_eps +from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers +from .faster_rcnn import _default_anchorgen, FasterRCNN, FastRCNNConvFCHead, RPNHead + + +__all__ = [ + "MaskRCNN", + "MaskRCNN_ResNet50_FPN_Weights", + "MaskRCNN_ResNet50_FPN_V2_Weights", + "maskrcnn_resnet50_fpn", + "maskrcnn_resnet50_fpn_v2", +] + + +class MaskRCNN(FasterRCNN): + """ + Implements Mask R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (mask >= 0.5) + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): Images are rescaled before feeding them to the backbone: + we attempt to preserve the aspect ratio and scale the shorter edge + to ``min_size``. If the resulting longer edge exceeds ``max_size``, + then downscale so that the longer edge does not exceed ``max_size``. + This may result in the shorter edge beeing lower than ``min_size``. + max_size (int): See ``min_size``. + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): only return proposals with an objectness score greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes, which will be used for the mask head. + mask_head (nn.Module): module that takes the cropped feature maps as input + mask_predictor (nn.Module): module that takes the output of the mask_head and returns the + segmentation mask logits + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import MaskRCNN + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features + >>> # MaskRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280 + >>> # so we need to add it here, + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=14, + >>> sampling_ratio=2) + >>> # put the pieces together inside a MaskRCNN model + >>> model = MaskRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler, + >>> mask_roi_pool=mask_roi_pooler) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + + def __init__( + self, + backbone, + num_classes=None, + # transform parameters + min_size=800, + max_size=1333, + image_mean=None, + image_std=None, + # RPN parameters + rpn_anchor_generator=None, + rpn_head=None, + rpn_pre_nms_top_n_train=2000, + rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, + rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, + rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, + rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, + box_head=None, + box_predictor=None, + box_score_thresh=0.05, + box_nms_thresh=0.5, + box_detections_per_img=100, + box_fg_iou_thresh=0.5, + box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, + box_positive_fraction=0.25, + bbox_reg_weights=None, + # Mask parameters + mask_roi_pool=None, + mask_head=None, + mask_predictor=None, + **kwargs, + ): + + if not isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))): + raise TypeError( + f"mask_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(mask_roi_pool)}" + ) + + if num_classes is not None: + if mask_predictor is not None: + raise ValueError("num_classes should be None when mask_predictor is specified") + + out_channels = backbone.out_channels + + if mask_roi_pool is None: + mask_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2) + + if mask_head is None: + mask_layers = (256, 256, 256, 256) + mask_dilation = 1 + mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation) + + if mask_predictor is None: + mask_predictor_in_channels = 256 # == mask_layers[-1] + mask_dim_reduced = 256 + mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, mask_dim_reduced, num_classes) + + super().__init__( + backbone, + num_classes, + # transform parameters + min_size, + max_size, + image_mean, + image_std, + # RPN-specific parameters + rpn_anchor_generator, + rpn_head, + rpn_pre_nms_top_n_train, + rpn_pre_nms_top_n_test, + rpn_post_nms_top_n_train, + rpn_post_nms_top_n_test, + rpn_nms_thresh, + rpn_fg_iou_thresh, + rpn_bg_iou_thresh, + rpn_batch_size_per_image, + rpn_positive_fraction, + rpn_score_thresh, + # Box parameters + box_roi_pool, + box_head, + box_predictor, + box_score_thresh, + box_nms_thresh, + box_detections_per_img, + box_fg_iou_thresh, + box_bg_iou_thresh, + box_batch_size_per_image, + box_positive_fraction, + bbox_reg_weights, + **kwargs, + ) + + self.roi_heads.mask_roi_pool = mask_roi_pool + self.roi_heads.mask_head = mask_head + self.roi_heads.mask_predictor = mask_predictor + + +class MaskRCNNHeads(nn.Sequential): + _version = 2 + + def __init__(self, in_channels, layers, dilation, norm_layer: Optional[Callable[..., nn.Module]] = None): + """ + Args: + in_channels (int): number of input channels + layers (list): feature dimensions of each FCN layer + dilation (int): dilation rate of kernel + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + """ + blocks = [] + next_feature = in_channels + for layer_features in layers: + blocks.append( + misc_nn_ops.Conv2dNormActivation( + next_feature, + layer_features, + kernel_size=3, + stride=1, + padding=dilation, + dilation=dilation, + norm_layer=norm_layer, + ) + ) + next_feature = layer_features + + super().__init__(*blocks) + for layer in self.modules(): + if isinstance(layer, nn.Conv2d): + nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu") + if layer.bias is not None: + nn.init.zeros_(layer.bias) + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + num_blocks = len(self) + for i in range(num_blocks): + for type in ["weight", "bias"]: + old_key = f"{prefix}mask_fcn{i+1}.{type}" + new_key = f"{prefix}{i}.0.{type}" + if old_key in state_dict: + state_dict[new_key] = state_dict.pop(old_key) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + +class MaskRCNNPredictor(nn.Sequential): + def __init__(self, in_channels, dim_reduced, num_classes): + super().__init__( + OrderedDict( + [ + ("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)), + ("relu", nn.ReLU(inplace=True)), + ("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1, 0)), + ] + ) + ) + + for name, param in self.named_parameters(): + if "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + # elif "bias" in name: + # nn.init.constant_(param, 0) + + +_COMMON_META = { + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), +} + + +class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 44401393, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#mask-r-cnn", + "_metrics": { + "COCO-val2017": { + "box_map": 37.9, + "mask_map": 34.6, + } + }, + "_ops": 134.38, + "_file_size": 169.84, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_v2_coco-73cbd019.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 46359409, + "recipe": "https://github.com/pytorch/vision/pull/5773", + "_metrics": { + "COCO-val2017": { + "box_map": 47.4, + "mask_map": 41.8, + } + }, + "_ops": 333.577, + "_file_size": 177.219, + "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", MaskRCNN_ResNet50_FPN_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def maskrcnn_resnet50_fpn( + *, + weights: Optional[MaskRCNN_ResNet50_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> MaskRCNN: + """Mask R-CNN model with a ResNet-50-FPN backbone from the `Mask R-CNN + `_ paper. + + .. betastatus:: detection module + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detected instances: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each instance + - scores (``Tensor[N]``): the scores or each instance + - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (``mask >= 0.5``) + + For more details on the output and on how to plot the masks, you may refer to :ref:`instance_seg_output`. + + Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=MaskRCNN_ResNet50_FPN_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11) + + Args: + weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights + :members: + """ + weights = MaskRCNN_ResNet50_FPN_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d + + backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer) + backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers) + model = MaskRCNN(backbone, num_classes=num_classes, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True)) + if weights == MaskRCNN_ResNet50_FPN_Weights.COCO_V1: + overwrite_eps(model, 0.0) + + return model + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def maskrcnn_resnet50_fpn_v2( + *, + weights: Optional[MaskRCNN_ResNet50_FPN_V2_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = None, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> MaskRCNN: + """Improved Mask R-CNN model with a ResNet-50-FPN backbone from the `Benchmarking Detection Transfer + Learning with Vision Transformers `_ paper. + + .. betastatus:: detection module + + :func:`~torchvision.models.detection.maskrcnn_resnet50_fpn` for more details. + + Args: + weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights + :members: + """ + weights = MaskRCNN_ResNet50_FPN_V2_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + + backbone = resnet50(weights=weights_backbone, progress=progress) + backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers, norm_layer=nn.BatchNorm2d) + rpn_anchor_generator = _default_anchorgen() + rpn_head = RPNHead(backbone.out_channels, rpn_anchor_generator.num_anchors_per_location()[0], conv_depth=2) + box_head = FastRCNNConvFCHead( + (backbone.out_channels, 7, 7), [256, 256, 256, 256], [1024], norm_layer=nn.BatchNorm2d + ) + mask_head = MaskRCNNHeads(backbone.out_channels, [256, 256, 256, 256], 1, norm_layer=nn.BatchNorm2d) + model = MaskRCNN( + backbone, + num_classes=num_classes, + rpn_anchor_generator=rpn_anchor_generator, + rpn_head=rpn_head, + box_head=box_head, + mask_head=mask_head, + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True)) + + return model diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..51b210cb6f368c1f4914ffe99287efef6057cba4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py @@ -0,0 +1,876 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn, Tensor +from torchvision.ops import boxes as box_ops, roi_align + +from . import _utils as det_utils + + +def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] + """ + Computes the loss for Faster R-CNN. + + Args: + class_logits (Tensor) + box_regression (Tensor) + labels (list[BoxList]) + regression_targets (Tensor) + + Returns: + classification_loss (Tensor) + box_loss (Tensor) + """ + + labels = torch.cat(labels, dim=0) + regression_targets = torch.cat(regression_targets, dim=0) + + classification_loss = F.cross_entropy(class_logits, labels) + + # get indices that correspond to the regression targets for + # the corresponding ground truth labels, to be used with + # advanced indexing + sampled_pos_inds_subset = torch.where(labels > 0)[0] + labels_pos = labels[sampled_pos_inds_subset] + N, num_classes = class_logits.shape + box_regression = box_regression.reshape(N, box_regression.size(-1) // 4, 4) + + box_loss = F.smooth_l1_loss( + box_regression[sampled_pos_inds_subset, labels_pos], + regression_targets[sampled_pos_inds_subset], + beta=1 / 9, + reduction="sum", + ) + box_loss = box_loss / labels.numel() + + return classification_loss, box_loss + + +def maskrcnn_inference(x, labels): + # type: (Tensor, List[Tensor]) -> List[Tensor] + """ + From the results of the CNN, post process the masks + by taking the mask corresponding to the class with max + probability (which are of fixed size and directly output + by the CNN) and return the masks in the mask field of the BoxList. + + Args: + x (Tensor): the mask logits + labels (list[BoxList]): bounding boxes that are used as + reference, one for ech image + + Returns: + results (list[BoxList]): one BoxList for each image, containing + the extra field mask + """ + mask_prob = x.sigmoid() + + # select masks corresponding to the predicted classes + num_masks = x.shape[0] + boxes_per_image = [label.shape[0] for label in labels] + labels = torch.cat(labels) + index = torch.arange(num_masks, device=labels.device) + mask_prob = mask_prob[index, labels][:, None] + mask_prob = mask_prob.split(boxes_per_image, dim=0) + + return mask_prob + + +def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): + # type: (Tensor, Tensor, Tensor, int) -> Tensor + """ + Given segmentation masks and the bounding boxes corresponding + to the location of the masks in the image, this function + crops and resizes the masks in the position defined by the + boxes. This prepares the masks for them to be fed to the + loss computation as the targets. + """ + matched_idxs = matched_idxs.to(boxes) + rois = torch.cat([matched_idxs[:, None], boxes], dim=1) + gt_masks = gt_masks[:, None].to(rois) + return roi_align(gt_masks, rois, (M, M), 1.0)[:, 0] + + +def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor + """ + Args: + proposals (list[BoxList]) + mask_logits (Tensor) + targets (list[BoxList]) + + Return: + mask_loss (Tensor): scalar tensor containing the loss + """ + + discretization_size = mask_logits.shape[-1] + labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)] + mask_targets = [ + project_masks_on_boxes(m, p, i, discretization_size) for m, p, i in zip(gt_masks, proposals, mask_matched_idxs) + ] + + labels = torch.cat(labels, dim=0) + mask_targets = torch.cat(mask_targets, dim=0) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if mask_targets.numel() == 0: + return mask_logits.sum() * 0 + + mask_loss = F.binary_cross_entropy_with_logits( + mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets + ) + return mask_loss + + +def keypoints_to_heatmap(keypoints, rois, heatmap_size): + # type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor] + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +def _onnx_heatmaps_to_keypoints( + maps, maps_i, roi_map_width, roi_map_height, widths_i, heights_i, offset_x_i, offset_y_i +): + num_keypoints = torch.scalar_tensor(maps.size(1), dtype=torch.int64) + + width_correction = widths_i / roi_map_width + height_correction = heights_i / roi_map_height + + roi_map = F.interpolate( + maps_i[:, None], size=(int(roi_map_height), int(roi_map_width)), mode="bicubic", align_corners=False + )[:, 0] + + w = torch.scalar_tensor(roi_map.size(2), dtype=torch.int64) + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = pos % w + y_int = (pos - x_int) // w + + x = (torch.tensor(0.5, dtype=torch.float32) + x_int.to(dtype=torch.float32)) * width_correction.to( + dtype=torch.float32 + ) + y = (torch.tensor(0.5, dtype=torch.float32) + y_int.to(dtype=torch.float32)) * height_correction.to( + dtype=torch.float32 + ) + + xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32) + xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32) + xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32) + xy_preds_i = torch.stack( + [ + xy_preds_i_0.to(dtype=torch.float32), + xy_preds_i_1.to(dtype=torch.float32), + xy_preds_i_2.to(dtype=torch.float32), + ], + 0, + ) + + # TODO: simplify when indexing without rank will be supported by ONNX + base = num_keypoints * num_keypoints + num_keypoints + 1 + ind = torch.arange(num_keypoints) + ind = ind.to(dtype=torch.int64) * base + end_scores_i = ( + roi_map.index_select(1, y_int.to(dtype=torch.int64)) + .index_select(2, x_int.to(dtype=torch.int64)) + .view(-1) + .index_select(0, ind.to(dtype=torch.int64)) + ) + + return xy_preds_i, end_scores_i + + +@torch.jit._script_if_tracing +def _onnx_heatmaps_to_keypoints_loop( + maps, rois, widths_ceil, heights_ceil, widths, heights, offset_x, offset_y, num_keypoints +): + xy_preds = torch.zeros((0, 3, int(num_keypoints)), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((0, int(num_keypoints)), dtype=torch.float32, device=maps.device) + + for i in range(int(rois.size(0))): + xy_preds_i, end_scores_i = _onnx_heatmaps_to_keypoints( + maps, maps[i], widths_ceil[i], heights_ceil[i], widths[i], heights[i], offset_x[i], offset_y[i] + ) + xy_preds = torch.cat((xy_preds.to(dtype=torch.float32), xy_preds_i.unsqueeze(0).to(dtype=torch.float32)), 0) + end_scores = torch.cat( + (end_scores.to(dtype=torch.float32), end_scores_i.to(dtype=torch.float32).unsqueeze(0)), 0 + ) + return xy_preds, end_scores + + +def heatmaps_to_keypoints(maps, rois): + """Extract predicted keypoint locations from heatmaps. Output has shape + (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) + for each keypoint. + """ + # This function converts a discrete image coordinate in a HEATMAP_SIZE x + # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain + # consistency with keypoints_to_heatmap_labels by using the conversion from + # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a + # continuous coordinate. + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = rois[:, 2] - rois[:, 0] + heights = rois[:, 3] - rois[:, 1] + widths = widths.clamp(min=1) + heights = heights.clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_keypoints = maps.shape[1] + + if torchvision._is_tracing(): + xy_preds, end_scores = _onnx_heatmaps_to_keypoints_loop( + maps, + rois, + widths_ceil, + heights_ceil, + widths, + heights, + offset_x, + offset_y, + torch.scalar_tensor(num_keypoints, dtype=torch.int64), + ) + return xy_preds.permute(0, 2, 1), end_scores + + xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device) + for i in range(len(rois)): + roi_map_width = int(widths_ceil[i].item()) + roi_map_height = int(heights_ceil[i].item()) + width_correction = widths[i] / roi_map_width + height_correction = heights[i] / roi_map_height + roi_map = F.interpolate( + maps[i][:, None], size=(roi_map_height, roi_map_width), mode="bicubic", align_corners=False + )[:, 0] + # roi_map_probs = scores_to_probs(roi_map.copy()) + w = roi_map.shape[2] + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = pos % w + y_int = torch.div(pos - x_int, w, rounding_mode="floor") + # assert (roi_map_probs[k, y_int, x_int] == + # roi_map_probs[k, :, :].max()) + x = (x_int.float() + 0.5) * width_correction + y = (y_int.float() + 0.5) * height_correction + xy_preds[i, 0, :] = x + offset_x[i] + xy_preds[i, 1, :] = y + offset_y[i] + xy_preds[i, 2, :] = 1 + end_scores[i, :] = roi_map[torch.arange(num_keypoints, device=roi_map.device), y_int, x_int] + + return xy_preds.permute(0, 2, 1), end_scores + + +def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor + N, K, H, W = keypoint_logits.shape + if H != W: + raise ValueError( + f"keypoint_logits height and width (last two elements of shape) should be equal. Instead got H = {H} and W = {W}" + ) + discretization_size = H + heatmaps = [] + valid = [] + for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs): + kp = gt_kp_in_image[midx] + heatmaps_per_image, valid_per_image = keypoints_to_heatmap(kp, proposals_per_image, discretization_size) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + keypoint_targets = torch.cat(heatmaps, dim=0) + valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.where(valid)[0] + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it sepaartely + if keypoint_targets.numel() == 0 or len(valid) == 0: + return keypoint_logits.sum() * 0 + + keypoint_logits = keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid]) + return keypoint_loss + + +def keypointrcnn_inference(x, boxes): + # type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + kp_probs = [] + kp_scores = [] + + boxes_per_image = [box.size(0) for box in boxes] + x2 = x.split(boxes_per_image, dim=0) + + for xx, bb in zip(x2, boxes): + kp_prob, scores = heatmaps_to_keypoints(xx, bb) + kp_probs.append(kp_prob) + kp_scores.append(scores) + + return kp_probs, kp_scores + + +def _onnx_expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half = w_half.to(dtype=torch.float32) * scale + h_half = h_half.to(dtype=torch.float32) * scale + + boxes_exp0 = x_c - w_half + boxes_exp1 = y_c - h_half + boxes_exp2 = x_c + w_half + boxes_exp3 = y_c + h_half + boxes_exp = torch.stack((boxes_exp0, boxes_exp1, boxes_exp2, boxes_exp3), 1) + return boxes_exp + + +# the next two functions should be merged inside Masker +# but are kept here for the moment while we need them +# temporarily for paste_mask_in_image +def expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + if torchvision._is_tracing(): + return _onnx_expand_boxes(boxes, scale) + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + boxes_exp = torch.zeros_like(boxes) + boxes_exp[:, 0] = x_c - w_half + boxes_exp[:, 2] = x_c + w_half + boxes_exp[:, 1] = y_c - h_half + boxes_exp[:, 3] = y_c + h_half + return boxes_exp + + +@torch.jit.unused +def expand_masks_tracing_scale(M, padding): + # type: (int, int) -> float + return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32) + + +def expand_masks(mask, padding): + # type: (Tensor, int) -> Tuple[Tensor, float] + M = mask.shape[-1] + if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why + scale = expand_masks_tracing_scale(M, padding) + else: + scale = float(M + 2 * padding) / M + padded_mask = F.pad(mask, (padding,) * 4) + return padded_mask, scale + + +def paste_mask_in_image(mask, box, im_h, im_w): + # type: (Tensor, Tensor, int, int) -> Tensor + TO_REMOVE = 1 + w = int(box[2] - box[0] + TO_REMOVE) + h = int(box[3] - box[1] + TO_REMOVE) + w = max(w, 1) + h = max(h, 1) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, -1, -1)) + + # Resize mask + mask = F.interpolate(mask, size=(h, w), mode="bilinear", align_corners=False) + mask = mask[0][0] + + im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, im_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, im_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])] + return im_mask + + +def _onnx_paste_mask_in_image(mask, box, im_h, im_w): + one = torch.ones(1, dtype=torch.int64) + zero = torch.zeros(1, dtype=torch.int64) + + w = box[2] - box[0] + one + h = box[3] - box[1] + one + w = torch.max(torch.cat((w, one))) + h = torch.max(torch.cat((h, one))) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, mask.size(0), mask.size(1))) + + # Resize mask + mask = F.interpolate(mask, size=(int(h), int(w)), mode="bilinear", align_corners=False) + mask = mask[0][0] + + x_0 = torch.max(torch.cat((box[0].unsqueeze(0), zero))) + x_1 = torch.min(torch.cat((box[2].unsqueeze(0) + one, im_w.unsqueeze(0)))) + y_0 = torch.max(torch.cat((box[1].unsqueeze(0), zero))) + y_1 = torch.min(torch.cat((box[3].unsqueeze(0) + one, im_h.unsqueeze(0)))) + + unpaded_im_mask = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])] + + # TODO : replace below with a dynamic padding when support is added in ONNX + + # pad y + zeros_y0 = torch.zeros(y_0, unpaded_im_mask.size(1)) + zeros_y1 = torch.zeros(im_h - y_1, unpaded_im_mask.size(1)) + concat_0 = torch.cat((zeros_y0, unpaded_im_mask.to(dtype=torch.float32), zeros_y1), 0)[0:im_h, :] + # pad x + zeros_x0 = torch.zeros(concat_0.size(0), x_0) + zeros_x1 = torch.zeros(concat_0.size(0), im_w - x_1) + im_mask = torch.cat((zeros_x0, concat_0, zeros_x1), 1)[:, :im_w] + return im_mask + + +@torch.jit._script_if_tracing +def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w): + res_append = torch.zeros(0, im_h, im_w) + for i in range(masks.size(0)): + mask_res = _onnx_paste_mask_in_image(masks[i][0], boxes[i], im_h, im_w) + mask_res = mask_res.unsqueeze(0) + res_append = torch.cat((res_append, mask_res)) + return res_append + + +def paste_masks_in_image(masks, boxes, img_shape, padding=1): + # type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor + masks, scale = expand_masks(masks, padding=padding) + boxes = expand_boxes(boxes, scale).to(dtype=torch.int64) + im_h, im_w = img_shape + + if torchvision._is_tracing(): + return _onnx_paste_masks_in_image_loop( + masks, boxes, torch.scalar_tensor(im_h, dtype=torch.int64), torch.scalar_tensor(im_w, dtype=torch.int64) + )[:, None] + res = [paste_mask_in_image(m[0], b, im_h, im_w) for m, b in zip(masks, boxes)] + if len(res) > 0: + ret = torch.stack(res, dim=0)[:, None] + else: + ret = masks.new_empty((0, 1, im_h, im_w)) + return ret + + +class RoIHeads(nn.Module): + __annotations__ = { + "box_coder": det_utils.BoxCoder, + "proposal_matcher": det_utils.Matcher, + "fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler, + } + + def __init__( + self, + box_roi_pool, + box_head, + box_predictor, + # Faster R-CNN training + fg_iou_thresh, + bg_iou_thresh, + batch_size_per_image, + positive_fraction, + bbox_reg_weights, + # Faster R-CNN inference + score_thresh, + nms_thresh, + detections_per_img, + # Mask + mask_roi_pool=None, + mask_head=None, + mask_predictor=None, + keypoint_roi_pool=None, + keypoint_head=None, + keypoint_predictor=None, + ): + super().__init__() + + self.box_similarity = box_ops.box_iou + # assign ground-truth boxes for each proposal + self.proposal_matcher = det_utils.Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False) + + self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction) + + if bbox_reg_weights is None: + bbox_reg_weights = (10.0, 10.0, 5.0, 5.0) + self.box_coder = det_utils.BoxCoder(bbox_reg_weights) + + self.box_roi_pool = box_roi_pool + self.box_head = box_head + self.box_predictor = box_predictor + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + + self.mask_roi_pool = mask_roi_pool + self.mask_head = mask_head + self.mask_predictor = mask_predictor + + self.keypoint_roi_pool = keypoint_roi_pool + self.keypoint_head = keypoint_head + self.keypoint_predictor = keypoint_predictor + + def has_mask(self): + if self.mask_roi_pool is None: + return False + if self.mask_head is None: + return False + if self.mask_predictor is None: + return False + return True + + def has_keypoint(self): + if self.keypoint_roi_pool is None: + return False + if self.keypoint_head is None: + return False + if self.keypoint_predictor is None: + return False + return True + + def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): + # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + matched_idxs = [] + labels = [] + for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels): + + if gt_boxes_in_image.numel() == 0: + # Background image + device = proposals_in_image.device + clamped_matched_idxs_in_image = torch.zeros( + (proposals_in_image.shape[0],), dtype=torch.int64, device=device + ) + labels_in_image = torch.zeros((proposals_in_image.shape[0],), dtype=torch.int64, device=device) + else: + # set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands + match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image) + matched_idxs_in_image = self.proposal_matcher(match_quality_matrix) + + clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0) + + labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image] + labels_in_image = labels_in_image.to(dtype=torch.int64) + + # Label background (below the low threshold) + bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD + labels_in_image[bg_inds] = 0 + + # Label ignore proposals (between low and high thresholds) + ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS + labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler + + matched_idxs.append(clamped_matched_idxs_in_image) + labels.append(labels_in_image) + return matched_idxs, labels + + def subsample(self, labels): + # type: (List[Tensor]) -> List[Tensor] + sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) + sampled_inds = [] + for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)): + img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0] + sampled_inds.append(img_sampled_inds) + return sampled_inds + + def add_gt_proposals(self, proposals, gt_boxes): + # type: (List[Tensor], List[Tensor]) -> List[Tensor] + proposals = [torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes)] + + return proposals + + def check_targets(self, targets): + # type: (Optional[List[Dict[str, Tensor]]]) -> None + if targets is None: + raise ValueError("targets should not be None") + if not all(["boxes" in t for t in targets]): + raise ValueError("Every element of targets should have a boxes key") + if not all(["labels" in t for t in targets]): + raise ValueError("Every element of targets should have a labels key") + if self.has_mask(): + if not all(["masks" in t for t in targets]): + raise ValueError("Every element of targets should have a masks key") + + def select_training_samples( + self, + proposals, # type: List[Tensor] + targets, # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]] + self.check_targets(targets) + if targets is None: + raise ValueError("targets should not be None") + dtype = proposals[0].dtype + device = proposals[0].device + + gt_boxes = [t["boxes"].to(dtype) for t in targets] + gt_labels = [t["labels"] for t in targets] + + # append ground-truth bboxes to propos + proposals = self.add_gt_proposals(proposals, gt_boxes) + + # get matching gt indices for each proposal + matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels) + # sample a fixed proportion of positive-negative proposals + sampled_inds = self.subsample(labels) + matched_gt_boxes = [] + num_images = len(proposals) + for img_id in range(num_images): + img_sampled_inds = sampled_inds[img_id] + proposals[img_id] = proposals[img_id][img_sampled_inds] + labels[img_id] = labels[img_id][img_sampled_inds] + matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds] + + gt_boxes_in_image = gt_boxes[img_id] + if gt_boxes_in_image.numel() == 0: + gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device) + matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]]) + + regression_targets = self.box_coder.encode(matched_gt_boxes, proposals) + return proposals, matched_idxs, labels, regression_targets + + def postprocess_detections( + self, + class_logits, # type: Tensor + box_regression, # type: Tensor + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]] + device = class_logits.device + num_classes = class_logits.shape[-1] + + boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals] + pred_boxes = self.box_coder.decode(box_regression, proposals) + + pred_scores = F.softmax(class_logits, -1) + + pred_boxes_list = pred_boxes.split(boxes_per_image, 0) + pred_scores_list = pred_scores.split(boxes_per_image, 0) + + all_boxes = [] + all_scores = [] + all_labels = [] + for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes): + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + # create labels for each prediction + labels = torch.arange(num_classes, device=device) + labels = labels.view(1, -1).expand_as(scores) + + # remove predictions with the background label + boxes = boxes[:, 1:] + scores = scores[:, 1:] + labels = labels[:, 1:] + + # batch everything, by making every class prediction be a separate instance + boxes = boxes.reshape(-1, 4) + scores = scores.reshape(-1) + labels = labels.reshape(-1) + + # remove low scoring boxes + inds = torch.where(scores > self.score_thresh)[0] + boxes, scores, labels = boxes[inds], scores[inds], labels[inds] + + # remove empty boxes + keep = box_ops.remove_small_boxes(boxes, min_size=1e-2) + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + # non-maximum suppression, independently done per class + keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh) + # keep only topk scoring predictions + keep = keep[: self.detections_per_img] + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + all_boxes.append(boxes) + all_scores.append(scores) + all_labels.append(labels) + + return all_boxes, all_scores, all_labels + + def forward( + self, + features, # type: Dict[str, Tensor] + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + targets=None, # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]] + """ + Args: + features (List[Tensor]) + proposals (List[Tensor[N, 4]]) + image_shapes (List[Tuple[H, W]]) + targets (List[Dict]) + """ + if targets is not None: + for t in targets: + # TODO: https://github.com/pytorch/pytorch/issues/26731 + floating_point_types = (torch.float, torch.double, torch.half) + if not t["boxes"].dtype in floating_point_types: + raise TypeError(f"target boxes must of float type, instead got {t['boxes'].dtype}") + if not t["labels"].dtype == torch.int64: + raise TypeError(f"target labels must of int64 type, instead got {t['labels'].dtype}") + if self.has_keypoint(): + if not t["keypoints"].dtype == torch.float32: + raise TypeError(f"target keypoints must of float type, instead got {t['keypoints'].dtype}") + + if self.training: + proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets) + else: + labels = None + regression_targets = None + matched_idxs = None + + box_features = self.box_roi_pool(features, proposals, image_shapes) + box_features = self.box_head(box_features) + class_logits, box_regression = self.box_predictor(box_features) + + result: List[Dict[str, torch.Tensor]] = [] + losses = {} + if self.training: + if labels is None: + raise ValueError("labels cannot be None") + if regression_targets is None: + raise ValueError("regression_targets cannot be None") + loss_classifier, loss_box_reg = fastrcnn_loss(class_logits, box_regression, labels, regression_targets) + losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg} + else: + boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes) + num_images = len(boxes) + for i in range(num_images): + result.append( + { + "boxes": boxes[i], + "labels": labels[i], + "scores": scores[i], + } + ) + + if self.has_mask(): + mask_proposals = [p["boxes"] for p in result] + if self.training: + if matched_idxs is None: + raise ValueError("if in training, matched_idxs should not be None") + + # during training, only focus on positive boxes + num_images = len(proposals) + mask_proposals = [] + pos_matched_idxs = [] + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + mask_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + if self.mask_roi_pool is not None: + mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes) + mask_features = self.mask_head(mask_features) + mask_logits = self.mask_predictor(mask_features) + else: + raise Exception("Expected mask_roi_pool to be not None") + + loss_mask = {} + if self.training: + if targets is None or pos_matched_idxs is None or mask_logits is None: + raise ValueError("targets, pos_matched_idxs, mask_logits cannot be None when training") + + gt_masks = [t["masks"] for t in targets] + gt_labels = [t["labels"] for t in targets] + rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs) + loss_mask = {"loss_mask": rcnn_loss_mask} + else: + labels = [r["labels"] for r in result] + masks_probs = maskrcnn_inference(mask_logits, labels) + for mask_prob, r in zip(masks_probs, result): + r["masks"] = mask_prob + + losses.update(loss_mask) + + # keep none checks in if conditional so torchscript will conditionally + # compile each branch + if ( + self.keypoint_roi_pool is not None + and self.keypoint_head is not None + and self.keypoint_predictor is not None + ): + keypoint_proposals = [p["boxes"] for p in result] + if self.training: + # during training, only focus on positive boxes + num_images = len(proposals) + keypoint_proposals = [] + pos_matched_idxs = [] + if matched_idxs is None: + raise ValueError("if in trainning, matched_idxs should not be None") + + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + keypoint_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes) + keypoint_features = self.keypoint_head(keypoint_features) + keypoint_logits = self.keypoint_predictor(keypoint_features) + + loss_keypoint = {} + if self.training: + if targets is None or pos_matched_idxs is None: + raise ValueError("both targets and pos_matched_idxs should not be None when in training mode") + + gt_keypoints = [t["keypoints"] for t in targets] + rcnn_loss_keypoint = keypointrcnn_loss( + keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs + ) + loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint} + else: + if keypoint_logits is None or keypoint_proposals is None: + raise ValueError( + "both keypoint_logits and keypoint_proposals should not be None when not in training mode" + ) + + keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals) + for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result): + r["keypoints"] = keypoint_prob + r["keypoints_scores"] = kps + losses.update(loss_keypoint) + + return result, losses diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py new file mode 100644 index 0000000000000000000000000000000000000000..87062d2bc88a5bf17625e0530116aba22941c538 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py @@ -0,0 +1,682 @@ +import warnings +from collections import OrderedDict +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from ...ops import boxes as box_ops +from ...transforms._presets import ObjectDetection +from ...utils import _log_api_usage_once +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..vgg import VGG, vgg16, VGG16_Weights +from . import _utils as det_utils +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .transform import GeneralizedRCNNTransform + + +__all__ = [ + "SSD300_VGG16_Weights", + "ssd300_vgg16", +] + + +class SSD300_VGG16_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/ssd300_vgg16_coco-b556d3b4.pth", + transforms=ObjectDetection, + meta={ + "num_params": 35641826, + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16", + "_metrics": { + "COCO-val2017": { + "box_map": 25.1, + } + }, + "_ops": 34.858, + "_file_size": 135.988, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +def _xavier_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.xavier_uniform_(layer.weight) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDHead(nn.Module): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + super().__init__() + self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes) + self.regression_head = SSDRegressionHead(in_channels, num_anchors) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + "bbox_regression": self.regression_head(x), + "cls_logits": self.classification_head(x), + } + + +class SSDScoringHead(nn.Module): + def __init__(self, module_list: nn.ModuleList, num_columns: int): + super().__init__() + self.module_list = module_list + self.num_columns = num_columns + + def _get_result_from_module_list(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.module_list[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.module_list) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.module_list): + if i == idx: + out = module(x) + return out + + def forward(self, x: List[Tensor]) -> Tensor: + all_results = [] + + for i, features in enumerate(x): + results = self._get_result_from_module_list(features, i) + + # Permute output from (N, A * K, H, W) to (N, HWA, K). + N, _, H, W = results.shape + results = results.view(N, -1, self.num_columns, H, W) + results = results.permute(0, 3, 4, 1, 2) + results = results.reshape(N, -1, self.num_columns) # Size=(N, HWA, K) + + all_results.append(results) + + return torch.cat(all_results, dim=1) + + +class SSDClassificationHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1)) + _xavier_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1)) + _xavier_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSD(nn.Module): + """ + Implements SSD architecture from `"SSD: Single Shot MultiBox Detector" `_. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes, but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute with the list of the output channels of + each feature map. The backbone should return a single Tensor or an OrderedDict[Tensor]. + anchor_generator (DefaultBoxGenerator): module that generates the default boxes for a + set of feature maps. + size (Tuple[int, int]): the width and height to which images will be rescaled before feeding them + to the backbone. + num_classes (int): number of output classes of the model (including the background). + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + head (nn.Module, optional): Module run on top of the backbone features. Defaults to a module containing + a classification and regression module. + score_thresh (float): Score threshold used for postprocessing the detections. + nms_thresh (float): NMS threshold used for postprocessing the detections. + detections_per_img (int): Number of best detections to keep after NMS. + iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training. + topk_candidates (int): Number of best detections to keep before NMS. + positive_fraction (float): a number between 0 and 1 which indicates the proportion of positive + proposals used during the training of the classification head. It is used to estimate the negative to + positive ratio. + """ + + __annotations__ = { + "box_coder": det_utils.BoxCoder, + "proposal_matcher": det_utils.Matcher, + } + + def __init__( + self, + backbone: nn.Module, + anchor_generator: DefaultBoxGenerator, + size: Tuple[int, int], + num_classes: int, + image_mean: Optional[List[float]] = None, + image_std: Optional[List[float]] = None, + head: Optional[nn.Module] = None, + score_thresh: float = 0.01, + nms_thresh: float = 0.45, + detections_per_img: int = 200, + iou_thresh: float = 0.5, + topk_candidates: int = 400, + positive_fraction: float = 0.25, + **kwargs: Any, + ): + super().__init__() + _log_api_usage_once(self) + + self.backbone = backbone + + self.anchor_generator = anchor_generator + + self.box_coder = det_utils.BoxCoder(weights=(10.0, 10.0, 5.0, 5.0)) + + if head is None: + if hasattr(backbone, "out_channels"): + out_channels = backbone.out_channels + else: + out_channels = det_utils.retrieve_out_channels(backbone, size) + + if len(out_channels) != len(anchor_generator.aspect_ratios): + raise ValueError( + f"The length of the output channels from the backbone ({len(out_channels)}) do not match the length of the anchor generator aspect ratios ({len(anchor_generator.aspect_ratios)})" + ) + + num_anchors = self.anchor_generator.num_anchors_per_location() + head = SSDHead(out_channels, num_anchors, num_classes) + self.head = head + + self.proposal_matcher = det_utils.SSDMatcher(iou_thresh) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + self.transform = GeneralizedRCNNTransform( + min(size), max(size), image_mean, image_std, size_divisible=1, fixed_size=size, **kwargs + ) + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + self.topk_candidates = topk_candidates + self.neg_to_pos_ratio = (1.0 - positive_fraction) / positive_fraction + + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs( + self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]] + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training: + return losses + + return detections + + def compute_loss( + self, + targets: List[Dict[str, Tensor]], + head_outputs: Dict[str, Tensor], + anchors: List[Tensor], + matched_idxs: List[Tensor], + ) -> Dict[str, Tensor]: + bbox_regression = head_outputs["bbox_regression"] + cls_logits = head_outputs["cls_logits"] + + # Match original targets with default boxes + num_foreground = 0 + bbox_loss = [] + cls_targets = [] + for ( + targets_per_image, + bbox_regression_per_image, + cls_logits_per_image, + anchors_per_image, + matched_idxs_per_image, + ) in zip(targets, bbox_regression, cls_logits, anchors, matched_idxs): + # produce the matching between boxes and targets + foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0] + foreground_matched_idxs_per_image = matched_idxs_per_image[foreground_idxs_per_image] + num_foreground += foreground_matched_idxs_per_image.numel() + + # Calculate regression loss + matched_gt_boxes_per_image = targets_per_image["boxes"][foreground_matched_idxs_per_image] + bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :] + anchors_per_image = anchors_per_image[foreground_idxs_per_image, :] + target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + bbox_loss.append( + torch.nn.functional.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction="sum") + ) + + # Estimate ground truth for class targets + gt_classes_target = torch.zeros( + (cls_logits_per_image.size(0),), + dtype=targets_per_image["labels"].dtype, + device=targets_per_image["labels"].device, + ) + gt_classes_target[foreground_idxs_per_image] = targets_per_image["labels"][ + foreground_matched_idxs_per_image + ] + cls_targets.append(gt_classes_target) + + bbox_loss = torch.stack(bbox_loss) + cls_targets = torch.stack(cls_targets) + + # Calculate classification loss + num_classes = cls_logits.size(-1) + cls_loss = F.cross_entropy(cls_logits.view(-1, num_classes), cls_targets.view(-1), reduction="none").view( + cls_targets.size() + ) + + # Hard Negative Sampling + foreground_idxs = cls_targets > 0 + num_negative = self.neg_to_pos_ratio * foreground_idxs.sum(1, keepdim=True) + # num_negative[num_negative < self.neg_to_pos_ratio] = self.neg_to_pos_ratio + negative_loss = cls_loss.clone() + negative_loss[foreground_idxs] = -float("inf") # use -inf to detect positive values that creeped in the sample + values, idx = negative_loss.sort(1, descending=True) + # background_idxs = torch.logical_and(idx.sort(1)[1] < num_negative, torch.isfinite(values)) + background_idxs = idx.sort(1)[1] < num_negative + + N = max(1, num_foreground) + return { + "bbox_regression": bbox_loss.sum() / N, + "classification": (cls_loss[foreground_idxs].sum() + cls_loss[background_idxs].sum()) / N, + } + + def forward( + self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training: + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) + else: + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + + # get the original image sizes + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) + original_image_sizes.append((val[0], val[1])) + + # transform the input + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + torch._assert( + False, + "All bounding boxes should have positive height and width." + f" Found invalid box {degen_bb} for target at index {target_idx}.", + ) + + # get the features from the backbone + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([("0", features)]) + + features = list(features.values()) + + # compute the ssd heads outputs using the features + head_outputs = self.head(features) + + # create the set of anchors + anchors = self.anchor_generator(images, features) + + losses = {} + detections: List[Dict[str, Tensor]] = [] + if self.training: + matched_idxs = [] + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image["boxes"].numel() == 0: + matched_idxs.append( + torch.full( + (anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device + ) + ) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs) + else: + detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("SSD always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) + + def postprocess_detections( + self, head_outputs: Dict[str, Tensor], image_anchors: List[Tensor], image_shapes: List[Tuple[int, int]] + ) -> List[Dict[str, Tensor]]: + bbox_regression = head_outputs["bbox_regression"] + pred_scores = F.softmax(head_outputs["cls_logits"], dim=-1) + + num_classes = pred_scores.size(-1) + device = pred_scores.device + + detections: List[Dict[str, Tensor]] = [] + + for boxes, scores, anchors, image_shape in zip(bbox_regression, pred_scores, image_anchors, image_shapes): + boxes = self.box_coder.decode_single(boxes, anchors) + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + image_boxes = [] + image_scores = [] + image_labels = [] + for label in range(1, num_classes): + score = scores[:, label] + + keep_idxs = score > self.score_thresh + score = score[keep_idxs] + box = boxes[keep_idxs] + + # keep only topk scoring predictions + num_topk = det_utils._topk_min(score, self.topk_candidates, 0) + score, idxs = score.topk(num_topk) + box = box[idxs] + + image_boxes.append(box) + image_scores.append(score) + image_labels.append(torch.full_like(score, fill_value=label, dtype=torch.int64, device=device)) + + image_boxes = torch.cat(image_boxes, dim=0) + image_scores = torch.cat(image_scores, dim=0) + image_labels = torch.cat(image_labels, dim=0) + + # non-maximum suppression + keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh) + keep = keep[: self.detections_per_img] + + detections.append( + { + "boxes": image_boxes[keep], + "scores": image_scores[keep], + "labels": image_labels[keep], + } + ) + return detections + + +class SSDFeatureExtractorVGG(nn.Module): + def __init__(self, backbone: nn.Module, highres: bool): + super().__init__() + + _, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d)) + + # Patch ceil_mode for maxpool3 to get the same WxH output sizes as the paper + backbone[maxpool3_pos].ceil_mode = True + + # parameters used for L2 regularization + rescaling + self.scale_weight = nn.Parameter(torch.ones(512) * 20) + + # Multiple Feature maps - page 4, Fig 2 of SSD paper + self.features = nn.Sequential(*backbone[:maxpool4_pos]) # until conv4_3 + + # SSD300 case - page 4, Fig 2 of SSD paper + extra = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), # conv8_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(512, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # conv9_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv10_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv11_2 + nn.ReLU(inplace=True), + ), + ] + ) + if highres: + # Additional layers for the SSD512 case. See page 11, footernote 5. + extra.append( + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=4), # conv12_2 + nn.ReLU(inplace=True), + ) + ) + _xavier_init(extra) + + fc = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=False), # add modified maxpool5 + nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6), # FC6 with atrous + nn.ReLU(inplace=True), + nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1), # FC7 + nn.ReLU(inplace=True), + ) + _xavier_init(fc) + extra.insert( + 0, + nn.Sequential( + *backbone[maxpool4_pos:-1], # until conv5_3, skip maxpool5 + fc, + ), + ) + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # L2 regularization + Rescaling of 1st block's feature map + x = self.features(x) + rescaled = self.scale_weight.view(1, -1, 1, 1) * F.normalize(x) + output = [rescaled] + + # Calculating Feature maps for the rest blocks + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int): + backbone = backbone.features + # Gather the indices of maxpools. These are the locations of output blocks. + stage_indices = [0] + [i for i, b in enumerate(backbone) if isinstance(b, nn.MaxPool2d)][:-1] + num_stages = len(stage_indices) + + # find the index of the layer from which we won't freeze + torch._assert( + 0 <= trainable_layers <= num_stages, + f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}", + ) + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDFeatureExtractorVGG(backbone, highres) + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", SSD300_VGG16_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", VGG16_Weights.IMAGENET1K_FEATURES), +) +def ssd300_vgg16( + *, + weights: Optional[SSD300_VGG16_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[VGG16_Weights] = VGG16_Weights.IMAGENET1K_FEATURES, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> SSD: + """The SSD300 model is based on the `SSD: Single Shot MultiBox Detector + `_ paper. + + .. betastatus:: detection module + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes, but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Example: + + >>> model = torchvision.models.detection.ssd300_vgg16(weights=SSD300_VGG16_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 300), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.SSD300_VGG16_Weights`, optional): The pretrained + weights to use. See + :class:`~torchvision.models.detection.SSD300_VGG16_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool, optional): If True, displays a progress bar of the download to stderr + Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.VGG16_Weights`, optional): The pretrained weights for the + backbone + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is + passed (the default) this value is set to 4. + **kwargs: parameters passed to the ``torchvision.models.detection.SSD`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.SSD300_VGG16_Weights + :members: + """ + weights = SSD300_VGG16_Weights.verify(weights) + weights_backbone = VGG16_Weights.verify(weights_backbone) + + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the parameter.") + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + trainable_backbone_layers = _validate_trainable_layers( + weights is not None or weights_backbone is not None, trainable_backbone_layers, 5, 4 + ) + + # Use custom backbones more appropriate for SSD + backbone = vgg16(weights=weights_backbone, progress=progress) + backbone = _vgg_extractor(backbone, False, trainable_backbone_layers) + anchor_generator = DefaultBoxGenerator( + [[2], [2, 3], [2, 3], [2, 3], [2], [2]], + scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05], + steps=[8, 16, 32, 64, 100, 300], + ) + + defaults = { + # Rescale the input in a way compatible to the backbone + "image_mean": [0.48235, 0.45882, 0.40784], + "image_std": [1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0], # undo the 0-1 scaling of toTensor + } + kwargs: Any = {**defaults, **kwargs} + model = SSD(backbone, anchor_generator, (300, 300), num_classes, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True)) + + return model diff --git a/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py b/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py new file mode 100644 index 0000000000000000000000000000000000000000..eda21bf941ef0d4a9051312ebdba6911c6760e8d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py @@ -0,0 +1,331 @@ +import warnings +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from torch import nn, Tensor + +from ...ops.misc import Conv2dNormActivation +from ...transforms._presets import ObjectDetection +from ...utils import _log_api_usage_once +from .. import mobilenet +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights +from . import _utils as det_utils +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .ssd import SSD, SSDScoringHead + + +__all__ = [ + "SSDLite320_MobileNet_V3_Large_Weights", + "ssdlite320_mobilenet_v3_large", +] + + +# Building blocks of SSDlite as described in section 6.2 of MobileNetV2 paper +def _prediction_block( + in_channels: int, out_channels: int, kernel_size: int, norm_layer: Callable[..., nn.Module] +) -> nn.Sequential: + return nn.Sequential( + # 3x3 depthwise with stride 1 and padding 1 + Conv2dNormActivation( + in_channels, + in_channels, + kernel_size=kernel_size, + groups=in_channels, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ), + # 1x1 projetion to output channels + nn.Conv2d(in_channels, out_channels, 1), + ) + + +def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[..., nn.Module]) -> nn.Sequential: + activation = nn.ReLU6 + intermediate_channels = out_channels // 2 + return nn.Sequential( + # 1x1 projection to half output channels + Conv2dNormActivation( + in_channels, intermediate_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation + ), + # 3x3 depthwise with stride 2 and padding 1 + Conv2dNormActivation( + intermediate_channels, + intermediate_channels, + kernel_size=3, + stride=2, + groups=intermediate_channels, + norm_layer=norm_layer, + activation_layer=activation, + ), + # 1x1 projetion to output channels + Conv2dNormActivation( + intermediate_channels, out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation + ), + ) + + +def _normal_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0.0, std=0.03) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDLiteHead(nn.Module): + def __init__( + self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] + ): + super().__init__() + self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer) + self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + "bbox_regression": self.regression_head(x), + "cls_logits": self.classification_head(x), + } + + +class SSDLiteClassificationHead(SSDScoringHead): + def __init__( + self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] + ): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer)) + _normal_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDLiteRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer)) + _normal_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSDLiteFeatureExtractorMobileNet(nn.Module): + def __init__( + self, + backbone: nn.Module, + c4_pos: int, + norm_layer: Callable[..., nn.Module], + width_mult: float = 1.0, + min_depth: int = 16, + ): + super().__init__() + _log_api_usage_once(self) + + if backbone[c4_pos].use_res_connect: + raise ValueError("backbone[c4_pos].use_res_connect should be False") + + self.features = nn.Sequential( + # As described in section 6.3 of MobileNetV3 paper + nn.Sequential(*backbone[:c4_pos], backbone[c4_pos].block[0]), # from start until C4 expansion layer + nn.Sequential(backbone[c4_pos].block[1:], *backbone[c4_pos + 1 :]), # from C4 depthwise until end + ) + + get_depth = lambda d: max(min_depth, int(d * width_mult)) # noqa: E731 + extra = nn.ModuleList( + [ + _extra_block(backbone[-1].out_channels, get_depth(512), norm_layer), + _extra_block(get_depth(512), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(128), norm_layer), + ] + ) + _normal_init(extra) + + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # Get feature maps from backbone and extra. Can't be refactored due to JIT limitations. + output = [] + for block in self.features: + x = block(x) + output.append(x) + + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _mobilenet_extractor( + backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3], + trainable_layers: int, + norm_layer: Callable[..., nn.Module], +): + backbone = backbone.features + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + num_stages = len(stage_indices) + + # find the index of the layer from which we won't freeze + if not 0 <= trainable_layers <= num_stages: + raise ValueError("trainable_layers should be in the range [0, {num_stages}], instead got {trainable_layers}") + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDLiteFeatureExtractorMobileNet(backbone, stage_indices[-2], norm_layer) + + +class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/ssdlite320_mobilenet_v3_large_coco-a79551df.pth", + transforms=ObjectDetection, + meta={ + "num_params": 3440060, + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large", + "_metrics": { + "COCO-val2017": { + "box_map": 21.3, + } + }, + "_ops": 0.583, + "_file_size": 13.418, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", SSDLite320_MobileNet_V3_Large_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1), +) +def ssdlite320_mobilenet_v3_large( + *, + weights: Optional[SSDLite320_MobileNet_V3_Large_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + **kwargs: Any, +) -> SSD: + """SSDlite model architecture with input size 320x320 and a MobileNetV3 Large backbone, as + described at `Searching for MobileNetV3 `__ and + `MobileNetV2: Inverted Residuals and Linear Bottlenecks `__. + + .. betastatus:: detection module + + See :func:`~torchvision.models.detection.ssd300_vgg16` for more details. + + Example: + + >>> model = torchvision.models.detection.ssdlite320_mobilenet_v3_large(weights=SSDLite320_MobileNet_V3_Large_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 320, 320), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model + (including the background). + weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained + weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers + starting from final block. Valid values are between 0 and 6, with 6 meaning all + backbone layers are trainable. If ``None`` is passed (the default) this value is + set to 6. + norm_layer (callable, optional): Module specifying the normalization layer to use. + **kwargs: parameters passed to the ``torchvision.models.detection.ssd.SSD`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights + :members: + """ + + weights = SSDLite320_MobileNet_V3_Large_Weights.verify(weights) + weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone) + + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the parameter.") + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + trainable_backbone_layers = _validate_trainable_layers( + weights is not None or weights_backbone is not None, trainable_backbone_layers, 6, 6 + ) + + # Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper. + reduce_tail = weights_backbone is None + + if norm_layer is None: + norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03) + + backbone = mobilenet_v3_large( + weights=weights_backbone, progress=progress, norm_layer=norm_layer, reduced_tail=reduce_tail, **kwargs + ) + if weights_backbone is None: + # Change the default initialization scheme if not pretrained + _normal_init(backbone) + backbone = _mobilenet_extractor( + backbone, + trainable_backbone_layers, + norm_layer, + ) + + size = (320, 320) + anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95) + out_channels = det_utils.retrieve_out_channels(backbone, size) + num_anchors = anchor_generator.num_anchors_per_location() + if len(out_channels) != len(anchor_generator.aspect_ratios): + raise ValueError( + f"The length of the output channels from the backbone {len(out_channels)} do not match the length of the anchor generator aspect ratios {len(anchor_generator.aspect_ratios)}" + ) + + defaults = { + "score_thresh": 0.001, + "nms_thresh": 0.55, + "detections_per_img": 300, + "topk_candidates": 300, + # Rescale the input in a way compatible to the backbone: + # The following mean/std rescale the data from [0, 1] to [-1, 1] + "image_mean": [0.5, 0.5, 0.5], + "image_std": [0.5, 0.5, 0.5], + } + kwargs: Any = {**defaults, **kwargs} + model = SSD( + backbone, + anchor_generator, + size, + num_classes, + head=SSDLiteHead(out_channels, num_anchors, num_classes, norm_layer), + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True)) + + return model diff --git a/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/__init__.py b/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba47f60a36ad3be8cb2f557adb57f1d2f1ba470 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/__init__.py @@ -0,0 +1,35 @@ +import torch + +from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat +from ._image import Image +from ._mask import Mask +from ._torch_function_helpers import set_return_type +from ._tv_tensor import TVTensor +from ._video import Video + + +# TODO: Fix this. We skip this method as it leads to +# RecursionError: maximum recursion depth exceeded while calling a Python object +# Until `disable` is removed, there will be graph breaks after all calls to functional transforms +@torch.compiler.disable +def wrap(wrappee, *, like, **kwargs): + """Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. + + If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of + ``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``. + + Args: + wrappee (Tensor): The tensor to convert. + like (:class:`~torchvision.tv_tensors.TVTensor`): The reference. + ``wrappee`` will be converted into the same subclass as ``like``. + kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`. + Ignored otherwise. + """ + if isinstance(like, BoundingBoxes): + return BoundingBoxes._wrap( + wrappee, + format=kwargs.get("format", like.format), + canvas_size=kwargs.get("canvas_size", like.canvas_size), + ) + else: + return wrappee.as_subclass(type(like)) diff --git a/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/_image.py b/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/_image.py new file mode 100644 index 0000000000000000000000000000000000000000..2a0a2ec720966f849f8d832a1b9f2e640ba7dc2c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torchvision/tv_tensors/_image.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from typing import Any, Optional, Union + +import PIL.Image +import torch + +from ._tv_tensor import TVTensor + + +class Image(TVTensor): + """:class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``. + + .. note:: + + In the :ref:`transforms `, ``Image`` instances are largely + interchangeable with pure :class:`torch.Tensor`. See + :ref:`this note ` for more details. + + Args: + data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as + well as PIL images. + dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from + ``data``. + device (torch.device, optional): Desired device. If omitted and ``data`` is a + :class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU. + requires_grad (bool, optional): Whether autograd should record operations. If omitted and + ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. + """ + + def __new__( + cls, + data: Any, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, + ) -> Image: + if isinstance(data, PIL.Image.Image): + from torchvision.transforms.v2 import functional as F + + data = F.pil_to_tensor(data) + + tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + if tensor.ndim < 2: + raise ValueError + elif tensor.ndim == 2: + tensor = tensor.unsqueeze(0) + + return tensor.as_subclass(cls) + + def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] + return self._make_repr()