diff --git a/.gitattributes b/.gitattributes index 54f761ec9d7e229835f54daba21d27a1fbfa03eb..11e762047491e2a227675e43ccce46de113720f7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1646,3 +1646,4 @@ parrot/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linu parrot/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a40f8edfc1397152c931fc33f076f8f658621373 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..18400a07c430f224173fd3197230d7d479b9db23 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cudnn_rnn_backward { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, c10::SymInt, c10::SymInt, int64_t, bool, double, bool, bool, c10::SymIntArrayRef, const c10::optional &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])") + static ::std::tuple> call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +}; + +struct TORCH_API _cudnn_rnn_backward_out { + using schema = void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, c10::SymInt, c10::SymInt, int64_t, bool, double, bool, bool, c10::SymIntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()") + static void call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..afa0bd326fc664e5734345e474beb4991258e72a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..65f177a0f4dd35150ef7678330723a1be062aa10 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_symint_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bfd3e0005d275acc723524d8c73dcc96ffabd6f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_ceil(at::TensorList self); +TORCH_API void _foreach_ceil_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a8eb7139c497f858f2971b9e41633fb86899ebd8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_log_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_log_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9d1a3ddcafdb6d82050821eccf15b6dcc92446fc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_moving_avg_obs_fq_helper { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_moving_avg_obs_fq_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +struct TORCH_API _fused_moving_avg_obs_fq_helper_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_moving_avg_obs_fq_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1); +}; + +struct TORCH_API _fused_moving_avg_obs_fq_helper_functional { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_moving_avg_obs_fq_helper_functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh.h new file mode 100644 index 0000000000000000000000000000000000000000..e478b690d98de2ea6570b81e0a01cd8b87c03e1a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh::call(A, UPLO, compute_v); +} + +// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors); +} +// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_scale.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_scale.h new file mode 100644 index 0000000000000000000000000000000000000000..929cbb03218acc08c7716bf142215821a9c56c53 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_scale.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor +inline at::Tensor _masked_scale(const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale::call(self, mask, scale); +} + +// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _masked_scale_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale_out::call(self, mask, scale, out); +} +// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _masked_scale_outf(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { + return at::_ops::_masked_scale_out::call(self, mask, scale, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b0c8ac78e6442e19ae12e655bf04c900a5ad43d5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc262cfd9ce0e0cfefcfb63388fe2e6b2dbeedc6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check=true); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e1b58af2e3971a63cc4d697ddf89bd0558280478 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f24c77aa1f30e59e52be5b0e4260d66559a2b7c6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..9ce383d5c70f0dc6fa728ac7f6f973a986fcb89c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul::call(self, other); +} + +// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sparse_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul_out::call(self, other, out); +} +// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sparse_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_sparse_sparse_matmul_out::call(self, other, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h new file mode 100644 index 0000000000000000000000000000000000000000..64c68f337c627d9322b8a7b3c704ccb6075e282d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +inline at::Tensor _unsafe_index_put(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d069aeea25e4ae5adf608e7a289c1a50fdaad61c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); + } +} + +// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d99cae2da8a555fb2d4b5df5e52f46267f2662b5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool3d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef output_size); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e4d348d2233fe80bec99157752079ad3f203168 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d06d894cbe10f87eb99135fee0d96db70ddde8e4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool allclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c6fb96ef8c2dea0482e71831a7fbd14be60c9d15 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API allclose { + using schema = bool (const at::Tensor &, const at::Tensor &, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::allclose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool") + static bool call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd43b0bbfab2a176c238000147a20db046a6056d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor arcsin(const at::Tensor & self); +TORCH_API at::Tensor & arcsin_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arcsin_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..179abb68f754e9d79e45f18988572ee10dde3723 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple convolution_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd1cb4ab2773760351b33fd456718fd9d11aec2b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple cummax(const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummax_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/einsum.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/einsum.h new file mode 100644 index 0000000000000000000000000000000000000000..834d6ce6d690ebb8dbb63e55b9a3b1f33e9c9126 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/einsum.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor +inline at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=c10::nullopt) { + return at::_ops::einsum::call(equation, tensors, path); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h new file mode 100644 index 0000000000000000000000000000000000000000..be7f26e580c61557c861ad3153f20b13c71502a0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n, dim, norm); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..55e9d94a4a0f5fcaca0349e497297131742a91ed --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor fft_rfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_rfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc7f71fac13aa76443b220aa548b7aa414284cc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input); +} +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) { + return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input); +} + +// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor +inline at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward::call(grad_output, self, approximate); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a454a6da1ee244b5bedbaccdca79d05e7e36411d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..874077418e01e2b8d38a4823e6d2f03feefffdd1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & grid_sampler_3d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +TORCH_API at::Tensor grid_sampler_3d_cpu(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor grid_sampler_3d_cuda(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h new file mode 100644 index 0000000000000000000000000000000000000000..55b7577a8a6e634ce8805a42bbfb4fec9edc5882 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots); +} + +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aee685b18fdc45b216a774bb09d44d2519bc713b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0e163b0b4661f3d3b5518a4999fb305513b455b6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} + +// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor +inline at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6178f74f803a509c00019b98d7fb746d32f327c0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_pool2d_with_indices : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd9b683764ef25f126062b4eef3a0d12bbd6f52 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) +inline ::std::tuple miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); +} + +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); +} +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fbd86438777ae263e2bba8849c73b590abdb93a5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mkldnn_linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple mkldnn_linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h new file mode 100644 index 0000000000000000000000000000000000000000..4a6662c7b0cadc70454309e0962def41f60b6c0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) +inline ::std::tuple mkldnn_linear_backward_weights(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined); +} + +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1); +} +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fee6727424a90532d2288f51c35b8d7ec01f47a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..780cef22d8a893fbddc59ba98278afd7d9db45ae --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length); +TORCH_API at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..47f776d1870ee79c0cb6797ee1bfd13e07a1ab03 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups); +TORCH_API at::Tensor native_channel_shuffle_symint(const at::Tensor & self, c10::SymInt groups); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e046476e70837f951a611b7a2d7b89c4ded5b9b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention.h new file mode 100644 index 0000000000000000000000000000000000000000..264ed71a2fcf8bf838504e1b4cf3d0e3caffbf53 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor +inline at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal, scale); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f19532df803e04b495f8f02703407e73fd9684 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor silu(const at::Tensor & self); +TORCH_API at::Tensor & silu_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4add03076f24177f3151d7d7b1b59f7d1dbbe9c1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor sinc(const at::Tensor & self); +TORCH_API at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinc_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca6528ed3d997b139340a33201045b60afb8f124 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor slice_copy(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1); +TORCH_API at::Tensor slice_copy_symint(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..68f82cf5ff8d46066e4b9dd7800f60f20778fc8b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_smooth_l1_loss_out : public at::meta::structured_smooth_l1_loss { +void impl(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f82570df18593a6168821ad91a54de3704db2b2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b6ed2d12c7d6b5ad04e0ee4ab5b8bbaf4c89f711 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_i1e : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fbfeb0c4047681ade10cf61c1408749b6fdece2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1c1557e313e203122fdf83def21fa937222f2f77 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & unfold_backward_out_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out); +TORCH_API at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real.h new file mode 100644 index 0000000000000000000000000000000000000000..1195c5ba49c43e0f5dd1086e4a46ae66befcd490 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::view_as_real(Tensor(a) self) -> Tensor(a) +inline at::Tensor view_as_real(const at::Tensor & self) { + return at::_ops::view_as_real::call(self); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f47c36a91f2d70d6fc347e2b9940a4ab754202a7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4c4376e0a6a0ef89d67b4f16c13c8d661d5ebe55 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b729c4578945cefb8a154a34c6a8b0e7f1dce8d28970a4c57db435af2bba99c +size 2131864 diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__init__.py b/vllm/lib/python3.10/site-packages/transformers/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5d95a85b538171ec9cf4fa16e892df1efdef6b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/commands/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseTransformersCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abcae2217b09012ab4691d47c9837501fcfe1efb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_fast_image_processor.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_fast_image_processor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca33cee6ee21f2c96e8128155701349c41d3cccb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_fast_image_processor.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59d92ffc9de5b840856dd69cefd4237c87c30083 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/chat.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/chat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2533499b0189283adbb899866d6fa3bd3662c8f8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/chat.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6d1d079b031a4db375db1bbd3fe7e3646a89bb0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2380c0f4dc456248adddcdee8306582c97a9e32b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954bfbd5a066fd7650f2f7b070bed3d8663bfd67 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a313f19c0f9d2a63bb2a2ed06050e3991a65c534 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fe55c99ffd1db558226b70f5bdab175704f50c6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3df918860c40bb0825514b1812f90262b71f044 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8731f7196393628662dd1aa2612d9613c07845b5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/add_fast_image_processor.py b/vllm/lib/python3.10/site-packages/transformers/commands/add_fast_image_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..a78fc2a7cf2d69f786f0ea2f1cb06af7ec5304b5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/commands/add_fast_image_processor.py @@ -0,0 +1,679 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from argparse import ArgumentParser, Namespace +from datetime import date +from pathlib import Path + +from ..utils import logging +from . import BaseTransformersCLICommand + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +CURRENT_YEAR = date.today().year +TRANSFORMERS_PATH = Path(__file__).parent.parent +REPO_PATH = TRANSFORMERS_PATH.parent.parent + + +def add_import_structure_entry_init(content: str, fast_image_processor_name: str, model_name: str): + """ + Add an entry to the `_import_structure` dictionary in the `__init__.py` file of the transformers package. + """ + # Step 1: Find the block + block_regex = re.compile( + r"if not is_torchvision_available\(\):.*?else:\s*(\n(?P\s+)_import_structure\[.*?\].*?\n(?:\s*(?P=indent)_import_structure\[.*?\].*?\n)*)", + re.DOTALL, + ) + match = block_regex.search(content) + + if not match: + raise ValueError("Couldn't find the '_import_structure' block.") + + # Capture the block content and indentation + block_content = match.group(1) + indent = match.group("indent") + + # Step 2: Parse existing entries + lines = block_content.strip().split("\n") + entries = [] + + import_structure_header = indent + lines[0] + entries = lines[1:] + + # Add the new entry, maintaining alphabetical order + new_entry = f'{indent}_import_structure["models.{model_name}"].append("{fast_image_processor_name}")' + if new_entry not in entries: + entries.append(new_entry) + + entries.sort() + entries = [import_structure_header] + entries + + # Step 3: Reconstruct the block + updated_block = "\n".join(entry for entry in entries) + + # Replace the original block in the content + updated_content = content[: match.start(1)] + "\n" + updated_block + "\n" + content[match.end(1) :] + + return updated_content + + +def add_import_statement_init(content: str, fast_image_processor_name: str, model_name: str): + """ + Add an import statement to the `__init__.py` file of the transformers package. + """ + # Step 1: Find the block + block_regex = re.compile( + r"if not is_torchvision_available\(\):\s+raise OptionalDependencyNotAvailable\(\)\s+except OptionalDependencyNotAvailable:\s+from \.utils\.dummy_torchvision_objects import \*\s+else:(?P\s*(\n\s*from .+ import .*\n)+)(?=\s*try:\s+if not \(is_torchvision_available\(\) and is_timm_available\(\)\):)", + re.DOTALL, + ) + match = block_regex.search(content) + + if match: + block_content = match.group("else_block") # The captured import block + else: + print("Couldn't find the import statement block.") + + # Step 2: Parse existing entries + lines = block_content.strip().split("\n") + entries = [] + + indent = " " * (len(lines[1]) - len(lines[1].lstrip())) + import_structure_header = indent + lines[0] + entries = lines[1:] + + # Add the new entry, maintaining alphabetical order + new_entry = f"{indent}from .models.{model_name} import {fast_image_processor_name}" + if new_entry not in entries: + entries.append(new_entry) + + entries.sort() + entries = [import_structure_header] + entries + + # Step 3: Reconstruct the block + updated_block = "\n".join(entry for entry in entries) + + # Replace the original block in the content + updated_content = ( + content[: match.start("else_block")] + "\n" + updated_block + "\n\n" + content[match.end("else_block") :] + ) + + return updated_content + + +def add_fast_image_processor_to_main_init(fast_image_processor_name: str, model_name: str): + """ + Add the fast image processor to the main __init__.py file of the transformers package. + """ + with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f: + content = f.read() + + # add _import_structure entry + content = add_import_structure_entry_init(content, fast_image_processor_name, model_name) + # add import statement + content = add_import_statement_init(content, fast_image_processor_name, model_name) + + # write the updated content + with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f: + f.write(content) + + +def add_fast_image_processor_to_model_init( + fast_image_processing_module_file: str, fast_image_processor_name, model_name: str +): + """ + Add the fast image processor to the __init__.py file of the model. + """ + with open(TRANSFORMERS_PATH / "models" / model_name / "__init__.py", "r", encoding="utf-8") as f: + content = f.read() + + fast_image_processing_module_file = fast_image_processing_module_file.split(os.sep)[-1].replace(".py", "") + + if "import *" in content: + # we have an init file in the updated format + # get the indented block after if TYPE_CHECKING: and before else:, append the new import, sort the imports and write the updated content + # Step 1: Find the block + block_regex = re.compile( + r"if TYPE_CHECKING:\n(?P.*?)(?=\s*else:)", + re.DOTALL, + ) + match = block_regex.search(content) + + if not match: + raise ValueError("Couldn't find the 'if TYPE_CHECKING' block.") + + block_content = match.group("if_block") # The captured import block + + # Step 2: Parse existing entries + entries = block_content.split("\n") + indent = " " * (len(entries[0]) - len(entries[0].lstrip())) + new_entry = f"{indent}from .{fast_image_processing_module_file} import *" + if new_entry not in entries: + entries.append(new_entry) + entries.sort() + updated_block = "\n".join(entry for entry in entries) + + # Replace the original block in the content + updated_content = content[: match.start("if_block")] + updated_block + content[match.end("if_block") :] + else: + # we have an init file in the old format + + # add "is_torchvision_available" import to from ...utils import ( + # Regex to match import statements from transformers.utils + pattern = r""" + from\s+\.\.\.utils\s+import\s+ + (?: # Non-capturing group for either: + ([\w, ]+) # 1. Single-line imports (e.g., 'a, b') + | # OR + \((.*?)\) # 2. Multi-line imports (e.g., '(a, ... b)') + ) + """ + regex = re.compile(pattern, re.VERBOSE | re.DOTALL) + + def replacement_function(match): + # Extract existing imports + imports = (match.group(1) or match.group(2)).split(",") + imports = imports[:-1] if imports[-1] == "\n" else imports + imports = [imp.strip() for imp in imports] + + # Add the new import if not already present + if "is_torchvision_available" not in imports: + imports.append("is_torchvision_available") + imports.sort() + + # Convert to multi-line import in all cases + updated_imports = "(\n " + ",\n ".join(imports) + ",\n)" + + return f"from ...utils import {updated_imports}" + + # Replace all matches in the file content + updated_content = regex.sub(replacement_function, content) + + vision_import_structure_block = f' _import_structure["{fast_image_processing_module_file[:-5]}"] = ["{fast_image_processor_name[:-4]}"]\n' + + added_import_structure_block = ( + "try:\n if not is_torchvision_available():\n" + " raise OptionalDependencyNotAvailable()\n" + "except OptionalDependencyNotAvailable:\n" + " pass\n" + "else:\n" + f' _import_structure["{fast_image_processing_module_file}"] = ["{fast_image_processor_name}"]\n' + ) + + if vision_import_structure_block not in updated_content: + raise ValueError("Couldn't find the 'vision _import_structure block' block.") + + if added_import_structure_block not in updated_content: + updated_content = updated_content.replace( + vision_import_structure_block, vision_import_structure_block + "\n" + added_import_structure_block + ) + + vision_import_statement_block = ( + f" from .{fast_image_processing_module_file[:-5]} import {fast_image_processor_name[:-4]}\n" + ) + + added_import_statement_block = ( + " try:\n if not is_torchvision_available():\n" + " raise OptionalDependencyNotAvailable()\n" + " except OptionalDependencyNotAvailable:\n" + " pass\n" + " else:\n" + f" from .{fast_image_processing_module_file} import {fast_image_processor_name}\n" + ) + + if vision_import_statement_block not in updated_content: + raise ValueError("Couldn't find the 'vision _import_structure block' block.") + + if added_import_statement_block not in updated_content: + updated_content = updated_content.replace( + vision_import_statement_block, vision_import_statement_block + "\n" + added_import_statement_block + ) + + # write the updated content + with open(TRANSFORMERS_PATH / "models" / model_name / "__init__.py", "w", encoding="utf-8") as f: + f.write(updated_content) + + +def add_fast_image_processor_to_auto(image_processor_name: str, fast_image_processor_name: str): + """ + Add the fast image processor to the auto module. + """ + with open(TRANSFORMERS_PATH / "models" / "auto" / "image_processing_auto.py", "r", encoding="utf-8") as f: + content = f.read() + + # get all lines containing the image processor name + updated_content = content.replace( + f'("{image_processor_name}",)', f'("{image_processor_name}", "{fast_image_processor_name}")' + ) + + # write the updated content + with open(TRANSFORMERS_PATH / "models" / "auto" / "image_processing_auto.py", "w", encoding="utf-8") as f: + f.write(updated_content) + + +def add_fast_image_processor_to_dummy(fast_image_processor_name: str): + """ + Add the fast image processor to the dummy torchvision objects file. + """ + dummy_torchvision_objects_file = TRANSFORMERS_PATH / "utils" / "dummy_torchvision_objects.py" + with open(dummy_torchvision_objects_file, "r", encoding="utf-8") as f: + content = f.read() + + # regex to find objects starting with "class " and ending with "ImageProcessorFast", including "ImageProcessorFast" in the match + image_processor_names = re.findall(r"class (\w*ImageProcessorFast)", content) + image_processor_names.append(fast_image_processor_name) + image_processor_names.sort() + index_new = image_processor_names.index(fast_image_processor_name) + + new_dummy_object = ( + f"class {fast_image_processor_name}(metaclass=DummyObject):\n" + ' _backends = ["torchvision"]\n\n' + " def __init__(self, *args, **kwargs):\n" + ' requires_backends(self, ["torchvision"])\n' + ) + if new_dummy_object not in content: + if index_new != len(image_processor_names) - 1: + # add the dummy object just before the next ImageProcessorFast + first_line = f"class {image_processor_names[index_new+1]}(metaclass=DummyObject):" + updated_content = content.replace(first_line, new_dummy_object + "\n\n" + first_line) + else: + # add the dummy object at the very end + updated_content = content + "\n\n" + new_dummy_object + + # write the updated content + with open(dummy_torchvision_objects_file, "w", encoding="utf-8") as f: + f.write(updated_content) + + +def add_fast_image_processor_to_doc(fast_image_processor_name: str, model_name: str): + """ + Add the fast image processor to the model's doc file. + """ + doc_source = REPO_PATH / "docs" / "source" + # find the doc files + doc_files = list(doc_source.glob(f"*/model_doc/{model_name}.md")) + if not doc_files: + # try again with "-" + doc_files = list(doc_source.glob(f"*/model_doc/{model_name.replace('_', '-')}.md")) + if not doc_files: + raise ValueError(f"No doc files found for {model_name}") + + base_doc_string = ( + f"## {fast_image_processor_name[:-4]}\n\n" f"[[autodoc]] {fast_image_processor_name[:-4]}\n" " - preprocess" + ) + fast_doc_string = ( + f"## {fast_image_processor_name}\n\n" f"[[autodoc]] {fast_image_processor_name}\n" " - preprocess" + ) + + for doc_file in doc_files: + with open(doc_file, "r", encoding="utf-8") as f: + content = f.read() + + if fast_doc_string not in content: + # add the fast image processor to the doc + updated_content = content.replace( + base_doc_string, + base_doc_string + "\n\n" + fast_doc_string, + ) + + # write the updated content + with open(doc_file, "w", encoding="utf-8") as f: + f.write(updated_content) + + +def add_fast_image_processor_to_tests(fast_image_processor_name: str, model_name: str): + """ + Add the fast image processor to the image processing tests. + """ + tests_path = REPO_PATH / "tests" / "models" / model_name + test_file = tests_path / f"test_image_processing_{model_name}.py" + if not os.path.exists(test_file): + logger.warning(f"No test file found for {model_name}. Skipping.") + return + + with open(test_file, "r", encoding="utf-8") as f: + content = f.read() + + # add is_torchvision_available import to the imports + # Regex to match import statements from transformers.utils + pattern = r""" + from\s+transformers\.utils\s+import\s+ + (?: # Non-capturing group for either: + ([\w, ]+) # 1. Single-line imports (e.g., 'a, b') + | # OR + \((.*?)\) # 2. Multi-line imports (e.g., '(a, ... b)') + ) + """ + regex = re.compile(pattern, re.VERBOSE | re.DOTALL) + + def replacement_function(match): + # Extract existing imports + existing_imports = (match.group(1) or match.group(2)).split(",") + existing_imports = existing_imports[:-1] if existing_imports[-1] == "\n" else existing_imports + existing_imports = [imp.strip() for imp in existing_imports] + + # Add the new import if not already present + if "is_torchvision_available" not in existing_imports: + existing_imports.append("is_torchvision_available") + existing_imports.sort() + + # Rebuild the import statement + if match.group(1): # Single-line import + updated_imports = ", ".join(existing_imports) + else: # Multi-line import + updated_imports = "(\n " + ",\n ".join(existing_imports) + ",\n)" + + return f"from transformers.utils import {updated_imports}" + + # Replace all matches in the file content + updated_content = regex.sub(replacement_function, content) + + # add the fast image processor to the imports + base_import_string = f" from transformers import {fast_image_processor_name[:-4]}" + fast_import_string = ( + " if is_torchvision_available():\n" f" from transformers import {fast_image_processor_name}" + ) + if fast_import_string not in updated_content: + updated_content = updated_content.replace(base_import_string, base_import_string + "\n\n" + fast_import_string) + + # get line starting with " image_processing_class = " and add a line after it starting with " fast_image_processing_class = " + image_processing_class_line = re.search(r" image_processing_class = .*", updated_content) + if not image_processing_class_line: + logger.warning(f"Couldn't find the 'image_processing_class' line in {test_file}. Skipping.") + return + + fast_image_processing_class_line = ( + f" fast_image_processing_class = {fast_image_processor_name} if is_torchvision_available() else None" + ) + if " fast_image_processing_class = " not in updated_content: + updated_content = updated_content.replace( + image_processing_class_line.group(0), + image_processing_class_line.group(0) + "\n" + fast_image_processing_class_line, + ) + + # write the updated content + with open(test_file, "w", encoding="utf-8") as f: + f.write(updated_content) + + +def get_fast_image_processing_content_header(content: str) -> str: + """ + Get the header of the slow image processor file. + """ + # get all the commented lines at the beginning of the file + content_header = re.search(r"^# coding=utf-8\n(#[^\n]*\n)*", content, re.MULTILINE) + if not content_header: + logger.warning("Couldn't find the content header in the slow image processor file. Using a default header.") + return ( + f"# coding=utf-8\n" + f"# Copyright {CURRENT_YEAR} The HuggingFace Team. All rights reserved.\n" + f"#\n" + f'# Licensed under the Apache License, Version 2.0 (the "License");\n' + f"# you may not use this file except in compliance with the License.\n" + f"# You may obtain a copy of the License at\n" + f"#\n" + f"# http://www.apache.org/licenses/LICENSE-2.0\n" + f"#\n" + f"# Unless required by applicable law or agreed to in writing, software\n" + f'# distributed under the License is distributed on an "AS IS" BASIS,\n' + f"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + f"# See the License for the specific language governing permissions and\n" + f"# limitations under the License.\n" + f"\n" + ) + content_header = content_header.group(0) + # replace the year in the copyright + content_header = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content_header) + # get the line starting with """Image processor in content if it exists + match = re.search(r'^"""Image processor.*$', content, re.MULTILINE) + if match: + content_header += match.group(0).replace("Image processor", "Fast Image processor") + + return content_header + + +def write_default_fast_image_processor_file( + fast_image_processing_module_file: str, fast_image_processor_name: str, content_base_file: str +): + """ + Write a default fast image processor file. Used when encountering a problem while parsing the slow image processor file. + """ + imports = "\n\nfrom ...image_processing_utils_fast import BaseImageProcessorFast\n\n\n" + content_header = get_fast_image_processing_content_header(content_base_file) + content_base_file = ( + f"class {fast_image_processor_name}(BaseImageProcessorFast):\n" + " # To be implemented\n" + " resample = None\n" + " image_mean = None\n" + " image_std = None\n" + " size = None\n" + " default_to_square = None\n" + " crop_size = None\n" + " do_resize = None\n" + " do_center_crop = None\n" + " do_rescale = None\n" + " do_normalize = None\n" + " do_convert_rgb = None\n\n\n" + f'__all__ = ["{fast_image_processor_name}"]\n' + ) + + content = content_header + imports + content_base_file + + with open(fast_image_processing_module_file, "w", encoding="utf-8") as f: + f.write(content) + + +def add_fast_image_processor_file( + fast_image_processing_module_file: str, fast_image_processor_name: str, content_base_file: str +): + """ + Add the fast image processor file to the model's folder. + """ + # if the file already exists, do nothing + if os.path.exists(fast_image_processing_module_file): + print(f"{fast_image_processing_module_file} already exists. Skipping.") + return + + regex = rf"class {fast_image_processor_name[:-4]}.*?(\n\S|$)" + match = re.search(regex, content_base_file, re.DOTALL) + if not match: + print(f"Couldn't find the {fast_image_processor_name[:-4]} class in {fast_image_processing_module_file}") + print("Creating a new file with the default content.") + return write_default_fast_image_processor_file( + fast_image_processing_module_file, fast_image_processor_name, content_base_file + ) + # Exclude the last unindented line + slow_class_content = match.group(0).rstrip() + # get default args: + # find the __init__ block which start with def __init__ and ends with def + match = re.search(r"def __init__.*?def ", slow_class_content, re.DOTALL) + if not match: + print( + f"Couldn't find the __init__ block for {fast_image_processor_name[:-4]} in {fast_image_processing_module_file}" + ) + print("Creating a new file with the default content.") + return write_default_fast_image_processor_file( + fast_image_processing_module_file, fast_image_processor_name, content_base_file + ) + init = match.group(0) + init_signature_block = init.split(")")[0] + arg_names = init_signature_block.split(":") + arg_names = [arg_name.split("\n")[-1].strip() for arg_name in arg_names] + # get the default values + default_args = re.findall(r"= (.*?)(?:,|\))", init_signature_block) + + # build default args dict + default_args_dict = dict(zip(arg_names, default_args)) + pattern_default_size = r"size = size if size is not None else\s+(.*)" + match_default_size = re.findall(pattern_default_size, init) + default_args_dict["size"] = match_default_size[0] if match_default_size else None + pattern_default_crop_size = r"crop_size = crop_size if crop_size is not None else\s+(.*)" + match_default_crop_size = re.findall(pattern_default_crop_size, init) + default_args_dict["crop_size"] = match_default_crop_size[0] if match_default_crop_size else None + pattern_default_image_mean = r"self.image_mean = image_mean if image_mean is not None else\s+(.*)" + match_default_image_mean = re.findall(pattern_default_image_mean, init) + default_args_dict["image_mean"] = match_default_image_mean[0] if match_default_image_mean else None + pattern_default_image_std = r"self.image_std = image_std if image_std is not None else\s+(.*)" + match_default_image_std = re.findall(pattern_default_image_std, init) + default_args_dict["image_std"] = match_default_image_std[0] if match_default_image_std else None + default_args_dict["default_to_square"] = False if "(size, default_to_square=False" in init else None + + content_header = get_fast_image_processing_content_header(content_base_file) + content_base_file = ( + f"@add_start_docstrings(\n" + f' "Constructs a fast {fast_image_processor_name.replace("ImageProcessorFast", "")} image processor.",\n' + f" BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,\n)\n" + f"class {fast_image_processor_name}(BaseImageProcessorFast):\n" + " # This generated class can be used as a starting point for the fast image processor.\n" + " # if the image processor is only used for simple augmentations, such as resizing, center cropping, rescaling, or normalizing,\n" + " # only the default values should be set in the class.\n" + " # If the image processor requires more complex augmentations, methods from BaseImageProcessorFast can be overridden.\n" + " # In most cases, only the `_preprocess` method should be overridden.\n\n" + " # For an example of a fast image processor requiring more complex augmentations, see `LlavaNextImageProcessorFast`.\n\n" + " # Default values should be checked against the slow image processor\n" + " # None values left after checking can be removed\n" + f' resample = {default_args_dict.get("resample")}\n' + f' image_mean = {default_args_dict.get("image_mean")}\n' + f' image_std = {default_args_dict.get("image_std")}\n' + f' size = {default_args_dict.get("size")}\n' + f' default_to_square = {default_args_dict.get("default_to_square")}\n' + f' crop_size = {default_args_dict.get("crop_size")}\n' + f' do_resize = {default_args_dict.get("do_resize")}\n' + f' do_center_crop = {default_args_dict.get("do_center_crop")}\n' + f' do_rescale = {default_args_dict.get("do_rescale")}\n' + f' do_normalize = {default_args_dict.get("do_normalize")}\n' + f' do_convert_rgb = {default_args_dict.get("do_convert_rgb")}\n\n\n' + f'__all__ = ["{fast_image_processor_name}"]\n' + ) + + imports = ( + "\n\nfrom ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast\n" + ) + image_utils_imports = [] + if default_args_dict.get("resample") is not None and "PILImageResampling" in default_args_dict.get("resample"): + image_utils_imports.append("PILImageResampling") + if default_args_dict.get("image_mean") is not None and not any( + char.isdigit() for char in default_args_dict.get("image_mean") + ): + image_utils_imports.append(default_args_dict.get("image_mean")) + if default_args_dict.get("image_std") is not None and not any( + char.isdigit() for char in default_args_dict.get("image_std") + ): + image_utils_imports.append(default_args_dict.get("image_std")) + + if image_utils_imports: + # sort imports + image_utils_imports.sort() + imports += f"from ...image_utils import {', '.join(image_utils_imports)}\n" + + imports += "from ...utils import add_start_docstrings\n" + + content = content_header + imports + "\n\n" + content_base_file + + with open(fast_image_processing_module_file, "w", encoding="utf-8") as f: + f.write(content) + + +def add_fast_image_processor(model_name: str): + """ + Add the necessary references to the fast image processor in the transformers package, + and create the fast image processor file in the model's folder. + """ + model_module = TRANSFORMERS_PATH / "models" / model_name + image_processing_module_file = list(model_module.glob("image_processing*.py")) + if not image_processing_module_file: + raise ValueError(f"No image processing module found in {model_module}") + elif len(image_processing_module_file) > 1: + for file_name in image_processing_module_file: + if not str(file_name).endswith("_fast.py"): + image_processing_module_file = str(file_name) + break + else: + image_processing_module_file = str(image_processing_module_file[0]) + + with open(image_processing_module_file, "r", encoding="utf-8") as f: + content_base_file = f.read() + + # regex to find object starting with "class " and ending with "ImageProcessor", including "ImageProcessor" in the match + image_processor_name = re.findall(r"class (\w*ImageProcessor)", content_base_file) + if not image_processor_name: + raise ValueError(f"No ImageProcessor class found in {image_processing_module_file}") + elif len(image_processor_name) > 1: + raise ValueError(f"Multiple ImageProcessor classes found in {image_processing_module_file}") + + image_processor_name = image_processor_name[0] + fast_image_processor_name = image_processor_name + "Fast" + fast_image_processing_module_file = image_processing_module_file.replace(".py", "_fast.py") + + print(f"Adding {fast_image_processor_name} to {fast_image_processing_module_file}") + + add_fast_image_processor_to_main_init( + fast_image_processor_name=fast_image_processor_name, + model_name=model_name, + ) + + add_fast_image_processor_to_model_init( + fast_image_processing_module_file=fast_image_processing_module_file, + fast_image_processor_name=fast_image_processor_name, + model_name=model_name, + ) + + add_fast_image_processor_to_auto( + image_processor_name=image_processor_name, + fast_image_processor_name=fast_image_processor_name, + ) + + add_fast_image_processor_to_dummy(fast_image_processor_name=fast_image_processor_name) + + add_fast_image_processor_to_doc( + fast_image_processor_name=fast_image_processor_name, + model_name=model_name, + ) + + add_fast_image_processor_to_tests( + fast_image_processor_name=fast_image_processor_name, + model_name=model_name, + ) + + add_fast_image_processor_file( + fast_image_processing_module_file=fast_image_processing_module_file, + fast_image_processor_name=fast_image_processor_name, + content_base_file=content_base_file, + ) + + +def add_new_model_like_command_factory(args: Namespace): + return AddFastImageProcessorCommand(model_name=args.model_name) + + +class AddFastImageProcessorCommand(BaseTransformersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + add_fast_image_processor_parser = parser.add_parser("add-fast-image-processor") + add_fast_image_processor_parser.add_argument( + "--model-name", + type=str, + required=True, + help="The name of the folder containing the model's implementation.", + ) + add_fast_image_processor_parser.set_defaults(func=add_new_model_like_command_factory) + + def __init__(self, model_name: str, *args): + self.model_name = model_name + + def run(self): + add_fast_image_processor(model_name=self.model_name) diff --git a/vllm/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py b/vllm/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py new file mode 100644 index 0000000000000000000000000000000000000000..badf6f0a4048e797230028651f9da9ef488e6df7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py @@ -0,0 +1,1791 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import difflib +import json +import os +import re +from argparse import ArgumentParser, Namespace +from dataclasses import dataclass +from datetime import date +from itertools import chain +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union + +import yaml + +from ..models import auto as auto_module +from ..models.auto.configuration_auto import model_type_to_module_name +from ..utils import is_flax_available, is_tf_available, is_torch_available, logging +from . import BaseTransformersCLICommand +from .add_fast_image_processor import add_fast_image_processor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +CURRENT_YEAR = date.today().year +TRANSFORMERS_PATH = Path(__file__).parent.parent +REPO_PATH = TRANSFORMERS_PATH.parent.parent + + +@dataclass +class ModelPatterns: + """ + Holds the basic information about a new model for the add-new-model-like command. + + Args: + model_name (`str`): The model name. + checkpoint (`str`): The checkpoint to use for doc examples. + model_type (`str`, *optional*): + The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to + `model_name` lowercased with spaces replaced with minuses (-). + model_lower_cased (`str`, *optional*): + The lowercased version of the model name, to use for the module name or function names. Will default to + `model_name` lowercased with spaces and minuses replaced with underscores. + model_camel_cased (`str`, *optional*): + The camel-cased version of the model name, to use for the class names. Will default to `model_name` + camel-cased (with spaces and minuses both considered as word separators. + model_upper_cased (`str`, *optional*): + The uppercased version of the model name, to use for the constant names. Will default to `model_name` + uppercased with spaces and minuses replaced with underscores. + config_class (`str`, *optional*): + The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`. + tokenizer_class (`str`, *optional*): + The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer). + image_processor_class (`str`, *optional*): + The image processor class associated with this model (leave to `None` for models that don't use an image + processor). + image_processor_fast_class (`str`, *optional*): + The fast image processor class associated with this model (leave to `None` for models that don't use a fast + image processor). + feature_extractor_class (`str`, *optional*): + The feature extractor class associated with this model (leave to `None` for models that don't use a feature + extractor). + processor_class (`str`, *optional*): + The processor class associated with this model (leave to `None` for models that don't use a processor). + """ + + model_name: str + checkpoint: str + model_type: Optional[str] = None + model_lower_cased: Optional[str] = None + model_camel_cased: Optional[str] = None + model_upper_cased: Optional[str] = None + config_class: Optional[str] = None + tokenizer_class: Optional[str] = None + image_processor_class: Optional[str] = None + image_processor_fast_class: Optional[str] = None + feature_extractor_class: Optional[str] = None + processor_class: Optional[str] = None + + def __post_init__(self): + if self.model_type is None: + self.model_type = self.model_name.lower().replace(" ", "-") + if self.model_lower_cased is None: + self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_") + if self.model_camel_cased is None: + # Split the model name on - and space + words = self.model_name.split(" ") + words = list(chain(*[w.split("-") for w in words])) + # Make sure each word is capitalized + words = [w[0].upper() + w[1:] for w in words] + self.model_camel_cased = "".join(words) + if self.model_upper_cased is None: + self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_") + if self.config_class is None: + self.config_class = f"{self.model_camel_cased}Config" + + +ATTRIBUTE_TO_PLACEHOLDER = { + "config_class": "[CONFIG_CLASS]", + "tokenizer_class": "[TOKENIZER_CLASS]", + "image_processor_class": "[IMAGE_PROCESSOR_CLASS]", + "image_processor_fast_class": "[IMAGE_PROCESSOR_FAST_CLASS]", + "feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]", + "processor_class": "[PROCESSOR_CLASS]", + "checkpoint": "[CHECKPOINT]", + "model_type": "[MODEL_TYPE]", + "model_upper_cased": "[MODEL_UPPER_CASED]", + "model_camel_cased": "[MODEL_CAMELCASED]", + "model_lower_cased": "[MODEL_LOWER_CASED]", + "model_name": "[MODEL_NAME]", +} + + +def is_empty_line(line: str) -> bool: + """ + Determines whether a line is empty or not. + """ + return len(line) == 0 or line.isspace() + + +def find_indent(line: str) -> int: + """ + Returns the number of spaces that start a line indent. + """ + search = re.search(r"^(\s*)(?:\S|$)", line) + if search is None: + return 0 + return len(search.groups()[0]) + + +def parse_module_content(content: str) -> List[str]: + """ + Parse the content of a module in the list of objects it defines. + + Args: + content (`str`): The content to parse + + Returns: + `List[str]`: The list of objects defined in the module. + """ + objects = [] + current_object = [] + lines = content.split("\n") + # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this. + end_markers = [")", "]", "}", '"""'] + + for line in lines: + # End of an object + is_valid_object = len(current_object) > 0 + if is_valid_object and len(current_object) == 1: + is_valid_object = not current_object[0].startswith("# Copied from") + if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object: + # Closing parts should be included in current object + if line in end_markers: + current_object.append(line) + objects.append("\n".join(current_object)) + current_object = [] + else: + objects.append("\n".join(current_object)) + current_object = [line] + else: + current_object.append(line) + + # Add last object + if len(current_object) > 0: + objects.append("\n".join(current_object)) + + return objects + + +def extract_block(content: str, indent_level: int = 0) -> str: + """Return the first block in `content` with the indent level `indent_level`. + + The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown. + + This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is + encountered. + + Args: + content (`str`): The content to parse + indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for + + Returns: + `str`: The first block in `content` with the indent level `indent_level`. + """ + current_object = [] + lines = content.split("\n") + # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this. + end_markers = [")", "]", "}", '"""'] + + for idx, line in enumerate(lines): + if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level: + raise ValueError( + f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got " + f"{find_indent(line)} instead." + ) + + if find_indent(line) < indent_level and not is_empty_line(line): + break + + # End of an object + is_valid_object = len(current_object) > 0 + if ( + not is_empty_line(line) + and not line.endswith(":") + and find_indent(line) == indent_level + and is_valid_object + ): + # Closing parts should be included in current object + if line.lstrip() in end_markers: + current_object.append(line) + return "\n".join(current_object) + else: + current_object.append(line) + + # Add last object + if len(current_object) > 0: + return "\n".join(current_object) + + +def add_content_to_text( + text: str, + content: str, + add_after: Optional[Union[str, Pattern]] = None, + add_before: Optional[Union[str, Pattern]] = None, + exact_match: bool = False, +) -> str: + """ + A utility to add some content inside a given text. + + Args: + text (`str`): The text in which we want to insert some content. + content (`str`): The content to add. + add_after (`str` or `Pattern`): + The pattern to test on a line of `text`, the new content is added after the first instance matching it. + add_before (`str` or `Pattern`): + The pattern to test on a line of `text`, the new content is added before the first instance matching it. + exact_match (`bool`, *optional*, defaults to `False`): + A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`, + otherwise, if `add_after`/`add_before` is present in the line. + + + + The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided. + + + + Returns: + `str`: The text with the new content added if a match was found. + """ + if add_after is None and add_before is None: + raise ValueError("You need to pass either `add_after` or `add_before`") + if add_after is not None and add_before is not None: + raise ValueError("You can't pass both `add_after` or `add_before`") + pattern = add_after if add_before is None else add_before + + def this_is_the_line(line): + if isinstance(pattern, Pattern): + return pattern.search(line) is not None + elif exact_match: + return pattern == line + else: + return pattern in line + + new_lines = [] + for line in text.split("\n"): + if this_is_the_line(line): + if add_before is not None: + new_lines.append(content) + new_lines.append(line) + if add_after is not None: + new_lines.append(content) + else: + new_lines.append(line) + + return "\n".join(new_lines) + + +def add_content_to_file( + file_name: Union[str, os.PathLike], + content: str, + add_after: Optional[Union[str, Pattern]] = None, + add_before: Optional[Union[str, Pattern]] = None, + exact_match: bool = False, +): + """ + A utility to add some content inside a given file. + + Args: + file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content. + content (`str`): The content to add. + add_after (`str` or `Pattern`): + The pattern to test on a line of `text`, the new content is added after the first instance matching it. + add_before (`str` or `Pattern`): + The pattern to test on a line of `text`, the new content is added before the first instance matching it. + exact_match (`bool`, *optional*, defaults to `False`): + A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`, + otherwise, if `add_after`/`add_before` is present in the line. + + + + The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided. + + + """ + with open(file_name, "r", encoding="utf-8") as f: + old_content = f.read() + + new_content = add_content_to_text( + old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match + ) + + with open(file_name, "w", encoding="utf-8") as f: + f.write(new_content) + + +def replace_model_patterns( + text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns +) -> Tuple[str, str]: + """ + Replace all patterns present in a given text. + + Args: + text (`str`): The text to treat. + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + + Returns: + `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it. + """ + # The order is crucially important as we will check and replace in that order. For instance the config probably + # contains the camel-cased named, but will be treated before. + attributes_to_check = ["config_class"] + # Add relevant preprocessing classes + for attr in [ + "tokenizer_class", + "image_processor_class", + "image_processor_fast_class", + "feature_extractor_class", + "processor_class", + ]: + if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None: + attributes_to_check.append(attr) + + # Special cases for checkpoint and model_type + if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]: + attributes_to_check.append("checkpoint") + if old_model_patterns.model_type != old_model_patterns.model_lower_cased: + attributes_to_check.append("model_type") + else: + text = re.sub( + rf'(\s*)model_type = "{old_model_patterns.model_type}"', + r'\1model_type = "[MODEL_TYPE]"', + text, + ) + + # Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but + # not the new one. We can't just do a replace in all the text and will need a special regex + if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased: + old_model_value = old_model_patterns.model_upper_cased + if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None: + text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text) + else: + attributes_to_check.append("model_upper_cased") + + attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"]) + + # Now let's replace every other attribute by their placeholder + for attr in attributes_to_check: + text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr]) + + # Finally we can replace the placeholder byt the new values. + replacements = [] + for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items(): + if placeholder in text: + replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))) + text = text.replace(placeholder, getattr(new_model_patterns, attr)) + + # If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew) + old_replacement_values = [old for old, new in replacements] + if len(set(old_replacement_values)) != len(old_replacement_values): + return text, "" + + replacements = simplify_replacements(replacements) + replacements = [f"{old}->{new}" for old, new in replacements] + return text, ",".join(replacements) + + +def simplify_replacements(replacements): + """ + Simplify a list of replacement patterns to make sure there are no needless ones. + + For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement + "BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed. + + Args: + replacements (`List[Tuple[str, str]]`): List of patterns (old, new) + + Returns: + `List[Tuple[str, str]]`: The list of patterns simplified. + """ + if len(replacements) <= 1: + # Nothing to simplify + return replacements + + # Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter. + replacements.sort(key=lambda x: len(x[0])) + + idx = 0 + while idx < len(replacements): + old, new = replacements[idx] + # Loop through all replacements after + j = idx + 1 + while j < len(replacements): + old_2, new_2 = replacements[j] + # If the replacement is implied by the current one, we can drop it. + if old_2.replace(old, new) == new_2: + replacements.pop(j) + else: + j += 1 + idx += 1 + + return replacements + + +def get_module_from_file(module_file: Union[str, os.PathLike]) -> str: + """ + Returns the module name corresponding to a module file. + """ + full_module_path = Path(module_file).absolute() + module_parts = full_module_path.with_suffix("").parts + + # Find the first part named transformers, starting from the end. + idx = len(module_parts) - 1 + while idx >= 0 and module_parts[idx] != "transformers": + idx -= 1 + if idx < 0: + raise ValueError(f"{module_file} is not a transformers module.") + + return ".".join(module_parts[idx:]) + + +SPECIAL_PATTERNS = { + "_CHECKPOINT_FOR_DOC =": "checkpoint", + "_CONFIG_FOR_DOC =": "config_class", + "_TOKENIZER_FOR_DOC =": "tokenizer_class", + "_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class", + "_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class", + "_PROCESSOR_FOR_DOC =": "processor_class", +} + + +_re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE) + + +def remove_attributes(obj, target_attr): + """Remove `target_attr` in `obj`.""" + lines = obj.split(os.linesep) + + target_idx = None + for idx, line in enumerate(lines): + # search for assignment + if line.lstrip().startswith(f"{target_attr} = "): + target_idx = idx + break + # search for function/method definition + elif line.lstrip().startswith(f"def {target_attr}("): + target_idx = idx + break + + # target not found + if target_idx is None: + return obj + + line = lines[target_idx] + indent_level = find_indent(line) + # forward pass to find the ending of the block (including empty lines) + parsed = extract_block("\n".join(lines[target_idx:]), indent_level) + num_lines = len(parsed.split("\n")) + for idx in range(num_lines): + lines[target_idx + idx] = None + + # backward pass to find comments or decorator + for idx in range(target_idx - 1, -1, -1): + line = lines[idx] + if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level: + lines[idx] = None + else: + break + + new_obj = os.linesep.join([x for x in lines if x is not None]) + + return new_obj + + +def duplicate_module( + module_file: Union[str, os.PathLike], + old_model_patterns: ModelPatterns, + new_model_patterns: ModelPatterns, + dest_file: Optional[str] = None, + add_copied_from: bool = True, + attrs_to_remove: List[str] = None, +): + """ + Create a new module from an existing one and adapting all function and classes names from old patterns to new ones. + + Args: + module_file (`str` or `os.PathLike`): Path to the module to duplicate. + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + dest_file (`str` or `os.PathLike`, *optional*): Path to the new module. + add_copied_from (`bool`, *optional*, defaults to `True`): + Whether or not to add `# Copied from` statements in the duplicated module. + """ + if dest_file is None: + dest_file = str(module_file).replace( + old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased + ) + + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content) + objects = parse_module_content(content) + + # Loop and treat all objects + new_objects = [] + for obj in objects: + special_pattern = False + for pattern, attr in SPECIAL_PATTERNS.items(): + if pattern in obj: + obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)) + new_objects.append(obj) + special_pattern = True + break + + if special_pattern: + continue + + # Regular classes functions + old_obj = obj + obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns) + has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None + if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0: + # Copied from statement must be added just before the class/function definition, which may not be the + # first line because of decorators. + module_name = get_module_from_file(module_file) + old_object_name = _re_class_func.search(old_obj).groups()[0] + obj = add_content_to_text( + obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func + ) + # In all cases, we remove Copied from statement with indent on methods. + obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj) + + new_objects.append(obj) + + content = "\n".join(new_objects) + # Remove some attributes that we don't want to copy to the new file(s) + if attrs_to_remove is not None: + for attr in attrs_to_remove: + content = remove_attributes(content, target_attr=attr) + + with open(dest_file, "w", encoding="utf-8") as f: + f.write(content) + + +def filter_framework_files( + files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None +) -> List[Union[str, os.PathLike]]: + """ + Filter a list of files to only keep the ones corresponding to a list of frameworks. + + Args: + files (`List[Union[str, os.PathLike]]`): The list of files to filter. + frameworks (`List[str]`, *optional*): The list of allowed frameworks. + + Returns: + `List[Union[str, os.PathLike]]`: The list of filtered files. + """ + if frameworks is None: + frameworks = get_default_frameworks() + + framework_to_file = {} + others = [] + for f in files: + parts = Path(f).name.split("_") + if "modeling" not in parts: + others.append(f) + continue + if "tf" in parts: + framework_to_file["tf"] = f + elif "flax" in parts: + framework_to_file["flax"] = f + else: + framework_to_file["pt"] = f + + return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others + + +def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]: + """ + Retrieves all the files associated to a model. + + Args: + model_type (`str`): A valid model type (like "bert" or "gpt2") + frameworks (`List[str]`, *optional*): + If passed, will only keep the model files corresponding to the passed frameworks. + + Returns: + `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys: + - **doc_file** -- The documentation file for the model. + - **model_files** -- All the files in the model module. + - **test_files** -- The test files for the model. + """ + module_name = model_type_to_module_name(model_type) + + model_module = TRANSFORMERS_PATH / "models" / module_name + model_files = list(model_module.glob("*.py")) + model_files = filter_framework_files(model_files, frameworks=frameworks) + + doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md" + + # Basic pattern for test files + test_files = [ + f"test_modeling_{module_name}.py", + f"test_modeling_tf_{module_name}.py", + f"test_modeling_flax_{module_name}.py", + f"test_tokenization_{module_name}.py", + f"test_image_processing_{module_name}.py", + f"test_feature_extraction_{module_name}.py", + f"test_processor_{module_name}.py", + ] + test_files = filter_framework_files(test_files, frameworks=frameworks) + # Add the test directory + test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files] + # Filter by existing files + test_files = [f for f in test_files if f.exists()] + + return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files} + + +_re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE) + + +def find_base_model_checkpoint( + model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None +) -> str: + """ + Finds the model checkpoint used in the docstrings for a given model. + + Args: + model_type (`str`): A valid model type (like "bert" or "gpt2") + model_files (`Dict[str, Union[Path, List[Path]]`, *optional*): + The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed. + + Returns: + `str`: The checkpoint used. + """ + if model_files is None: + model_files = get_model_files(model_type) + module_files = model_files["model_files"] + for fname in module_files: + if "modeling" not in str(fname): + continue + + with open(fname, "r", encoding="utf-8") as f: + content = f.read() + if _re_checkpoint_for_doc.search(content) is not None: + checkpoint = _re_checkpoint_for_doc.search(content).groups()[0] + # Remove quotes + checkpoint = checkpoint.replace('"', "") + checkpoint = checkpoint.replace("'", "") + return checkpoint + + # TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file. + return "" + + +def get_default_frameworks(): + """ + Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment. + """ + frameworks = [] + if is_torch_available(): + frameworks.append("pt") + if is_tf_available(): + frameworks.append("tf") + if is_flax_available(): + frameworks.append("flax") + return frameworks + + +_re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES") + + +def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]: + """ + Retrieve the model classes associated to a given model. + + Args: + model_type (`str`): A valid model type (like "bert" or "gpt2") + frameworks (`List[str]`, *optional*): + The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict + the classes returned. + + Returns: + `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to + that framework as values. + """ + if frameworks is None: + frameworks = get_default_frameworks() + + modules = { + "pt": auto_module.modeling_auto if is_torch_available() else None, + "tf": auto_module.modeling_tf_auto if is_tf_available() else None, + "flax": auto_module.modeling_flax_auto if is_flax_available() else None, + } + + model_classes = {} + for framework in frameworks: + new_model_classes = [] + if modules[framework] is None: + raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.") + model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None] + for model_mapping_name in model_mappings: + model_mapping = getattr(modules[framework], model_mapping_name) + if model_type in model_mapping: + new_model_classes.append(model_mapping[model_type]) + + if len(new_model_classes) > 0: + # Remove duplicates + model_classes[framework] = list(set(new_model_classes)) + + return model_classes + + +def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None): + """ + Retrieves all the information from a given model_type. + + Args: + model_type (`str`): A valid model type (like "bert" or "gpt2") + frameworks (`List[str]`, *optional*): + If passed, will only keep the info corresponding to the passed frameworks. + + Returns: + `Dict`: A dictionary with the following keys: + - **frameworks** (`List[str]`): The list of frameworks that back this model type. + - **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type. + - **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type. + - **model_patterns** (`ModelPatterns`): The various patterns for the model. + """ + if model_type not in auto_module.MODEL_NAMES_MAPPING: + raise ValueError(f"{model_type} is not a valid model type.") + + model_name = auto_module.MODEL_NAMES_MAPPING[model_type] + config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type] + if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES: + tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type] + tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1] + else: + tokenizer_class = None + image_processor_classes = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None) + if isinstance(image_processor_classes, tuple): + image_processor_class, image_processor_fast_class = image_processor_classes + else: + image_processor_class = image_processor_classes + image_processor_fast_class = None + feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None) + processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None) + + model_files = get_model_files(model_type, frameworks=frameworks) + model_camel_cased = config_class.replace("Config", "") + + available_frameworks = [] + for fname in model_files["model_files"]: + if "modeling_tf" in str(fname): + available_frameworks.append("tf") + elif "modeling_flax" in str(fname): + available_frameworks.append("flax") + elif "modeling" in str(fname): + available_frameworks.append("pt") + + if frameworks is None: + frameworks = get_default_frameworks() + + frameworks = [f for f in frameworks if f in available_frameworks] + + model_classes = retrieve_model_classes(model_type, frameworks=frameworks) + + model_upper_cased = model_camel_cased.upper() + model_patterns = ModelPatterns( + model_name, + checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), + model_type=model_type, + model_camel_cased=model_camel_cased, + model_lower_cased=model_files["module_name"], + model_upper_cased=model_upper_cased, + config_class=config_class, + tokenizer_class=tokenizer_class, + image_processor_class=image_processor_class, + image_processor_fast_class=image_processor_fast_class, + feature_extractor_class=feature_extractor_class, + processor_class=processor_class, + ) + + return { + "frameworks": frameworks, + "model_classes": model_classes, + "model_files": model_files, + "model_patterns": model_patterns, + } + + +def clean_frameworks_in_init( + init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True +): + """ + Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature + extractors/image processors/processors in an init. + + Args: + init_file (`str` or `os.PathLike`): The path to the init to treat. + frameworks (`List[str]`, *optional*): + If passed, this will remove all imports that are subject to a framework not in frameworks + keep_processing (`bool`, *optional*, defaults to `True`): + Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports + in the init. + """ + if frameworks is None: + frameworks = get_default_frameworks() + + names = {"pt": "torch"} + to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks] + if not keep_processing: + to_remove.extend(["sentencepiece", "tokenizers", "vision"]) + + if len(to_remove) == 0: + # Nothing to do + return + + remove_pattern = "|".join(to_remove) + re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$") + re_try = re.compile(r"\s*try:") + re_else = re.compile(r"\s*else:") + re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available") + + with open(init_file, "r", encoding="utf-8") as f: + content = f.read() + + lines = content.split("\n") + new_lines = [] + idx = 0 + while idx < len(lines): + # Conditional imports in try-except-else blocks + if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None): + # Remove the preceding `try:` + new_lines.pop() + idx += 1 + # Iterate until `else:` + while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None: + idx += 1 + idx += 1 + indent = find_indent(lines[idx]) + while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]): + idx += 1 + # Remove the import from utils + elif re_is_xxx_available.search(lines[idx]) is not None: + line = lines[idx] + for framework in to_remove: + line = line.replace(f", is_{framework}_available", "") + line = line.replace(f"is_{framework}_available, ", "") + line = line.replace(f"is_{framework}_available,", "") + line = line.replace(f"is_{framework}_available", "") + + if len(line.strip()) > 0: + new_lines.append(line) + idx += 1 + # Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it. + elif keep_processing or ( + re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None + and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx]) + is None + ): + new_lines.append(lines[idx]) + idx += 1 + else: + idx += 1 + + with open(init_file, "w", encoding="utf-8") as f: + f.write("\n".join(new_lines)) + + +def add_model_to_main_init( + old_model_patterns: ModelPatterns, + new_model_patterns: ModelPatterns, + frameworks: Optional[List[str]] = None, + with_processing: bool = True, +): + """ + Add a model to the main init of Transformers. + + Args: + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + frameworks (`List[str]`, *optional*): + If specified, only the models implemented in those frameworks will be added. + with_processsing (`bool`, *optional*, defaults to `True`): + Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not. + """ + with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f: + content = f.read() + + lines = content.split("\n") + idx = 0 + new_lines = [] + framework = None + while idx < len(lines): + new_framework = False + if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0: + framework = None + elif lines[idx].lstrip().startswith("if not is_torch_available"): + framework = "pt" + new_framework = True + elif lines[idx].lstrip().startswith("if not is_tf_available"): + framework = "tf" + new_framework = True + elif lines[idx].lstrip().startswith("if not is_flax_available"): + framework = "flax" + new_framework = True + + if new_framework: + # For a new framework, we need to skip until the else: block to get where the imports are. + while lines[idx].strip() != "else:": + new_lines.append(lines[idx]) + idx += 1 + + # Skip if we are in a framework not wanted. + if framework is not None and frameworks is not None and framework not in frameworks: + new_lines.append(lines[idx]) + idx += 1 + elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None: + block = [lines[idx]] + indent = find_indent(lines[idx]) + idx += 1 + while find_indent(lines[idx]) > indent: + block.append(lines[idx]) + idx += 1 + if lines[idx].strip() in [")", "]", "],"]: + block.append(lines[idx]) + idx += 1 + block = "\n".join(block) + new_lines.append(block) + + add_block = True + if not with_processing: + processing_classes = [ + old_model_patterns.tokenizer_class, + old_model_patterns.image_processor_class, + old_model_patterns.image_processor_fast_class, + old_model_patterns.feature_extractor_class, + old_model_patterns.processor_class, + ] + # Only keep the ones that are not None + processing_classes = [c for c in processing_classes if c is not None] + for processing_class in processing_classes: + block = block.replace(f' "{processing_class}",', "") + block = block.replace(f', "{processing_class}"', "") + block = block.replace(f" {processing_class},", "") + block = block.replace(f", {processing_class}", "") + + if processing_class in block: + add_block = False + if add_block: + new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0]) + else: + new_lines.append(lines[idx]) + idx += 1 + + with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f: + f.write("\n".join(new_lines)) + + +def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns): + """ + Add a tokenizer to the relevant mappings in the auto module. + + Args: + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + """ + if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None: + return + + with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f: + content = f.read() + + lines = content.split("\n") + idx = 0 + # First we get to the TOKENIZER_MAPPING_NAMES block. + while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("): + idx += 1 + idx += 1 + + # That block will end at this prompt: + while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"): + # Either all the tokenizer block is defined on one line, in which case, it ends with ")," + if lines[idx].endswith(","): + block = lines[idx] + # Otherwise it takes several lines until we get to a ")," + else: + block = [] + while not lines[idx].startswith(" ),"): + block.append(lines[idx]) + idx += 1 + block = "\n".join(block) + idx += 1 + + # If we find the model type and tokenizer class in that block, we have the old model tokenizer block + if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block: + break + + new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type) + new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class) + + new_lines = lines[:idx] + [new_block] + lines[idx:] + with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f: + f.write("\n".join(new_lines)) + + +AUTO_CLASSES_PATTERNS = { + "configuration_auto.py": [ + ' ("{model_type}", "{model_name}"),', + ' ("{model_type}", "{config_class}"),', + ' ("{model_type}", "{pretrained_archive_map}"),', + ], + "feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'], + "image_processing_auto.py": [' ("{model_type}", "{image_processor_classes}"),'], + "modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'], + "modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'], + "modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'], + "processing_auto.py": [' ("{model_type}", "{processor_class}"),'], +} + + +def add_model_to_auto_classes( + old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]] +): + """ + Add a model to the relevant mappings in the auto module. + + Args: + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented. + """ + for filename in AUTO_CLASSES_PATTERNS: + # Extend patterns with all model classes if necessary + new_patterns = [] + for pattern in AUTO_CLASSES_PATTERNS[filename]: + if re.search("any_([a-z]*)_class", pattern) is not None: + framework = re.search("any_([a-z]*)_class", pattern).groups()[0] + if framework in model_classes: + new_patterns.extend( + [ + pattern.replace("{" + f"any_{framework}_class" + "}", cls) + for cls in model_classes[framework] + ] + ) + elif "{config_class}" in pattern: + new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class)) + elif "{image_processor_classes}" in pattern: + if ( + old_model_patterns.image_processor_class is not None + and new_model_patterns.image_processor_class is not None + ): + if ( + old_model_patterns.image_processor_fast_class is not None + and new_model_patterns.image_processor_fast_class is not None + ): + new_patterns.append( + pattern.replace( + '"{image_processor_classes}"', + f'("{old_model_patterns.image_processor_class}", "{old_model_patterns.image_processor_fast_class}")', + ) + ) + else: + new_patterns.append( + pattern.replace( + '"{image_processor_classes}"', f'("{old_model_patterns.image_processor_class}",)' + ) + ) + elif "{feature_extractor_class}" in pattern: + if ( + old_model_patterns.feature_extractor_class is not None + and new_model_patterns.feature_extractor_class is not None + ): + new_patterns.append( + pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class) + ) + elif "{processor_class}" in pattern: + if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None: + new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class)) + else: + new_patterns.append(pattern) + + # Loop through all patterns. + for pattern in new_patterns: + full_name = TRANSFORMERS_PATH / "models" / "auto" / filename + old_model_line = pattern + new_model_line = pattern + for attr in ["model_type", "model_name"]: + old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr)) + new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr)) + new_model_line = new_model_line.replace( + old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased + ) + add_content_to_file(full_name, new_model_line, add_after=old_model_line) + + # Tokenizers require special handling + insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns) + + +DOC_OVERVIEW_TEMPLATE = """## Overview + +The {model_name} model was proposed in []() by . + + +The abstract from the paper is the following: + +** + +Tips: + + + +This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/). +The original code can be found [here](). + +""" + + +def duplicate_doc_file( + doc_file: Union[str, os.PathLike], + old_model_patterns: ModelPatterns, + new_model_patterns: ModelPatterns, + dest_file: Optional[Union[str, os.PathLike]] = None, + frameworks: Optional[List[str]] = None, +): + """ + Duplicate a documentation file and adapts it for a new model. + + Args: + module_file (`str` or `os.PathLike`): Path to the doc file to duplicate. + old_model_patterns (`ModelPatterns`): The patterns for the old model. + new_model_patterns (`ModelPatterns`): The patterns for the new model. + dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file. + Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`. + frameworks (`List[str]`, *optional*): + If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file. + """ + with open(doc_file, "r", encoding="utf-8") as f: + content = f.read() + + content = re.sub(r"