Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h +35 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h +26 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h +35 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h +27 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h +44 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h +39 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_cpu_dispatch.h +26 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h +24 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static.h +39 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/pow_cpu_dispatch.h +33 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/pow_ops.h +105 -0
- infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor.h +43 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__init__.py +71 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/configuration_deta.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/image_processing_deta.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/modeling_deta.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/configuration_deta.py +267 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/image_processing_deta.py +1224 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/modeling_deta.py +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/modeling_ernie_m.py +1047 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py +405 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__init__.py +55 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/collating_graphormer.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/algos_graphormer.pyx +107 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/collating_graphormer.py +134 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/configuration_graphormer.py +215 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/modeling_graphormer.py +908 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__init__.py +66 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/configuration_jukebox.py +610 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/modeling_jukebox.py +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/tokenization_jukebox.py +404 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__init__.py +54 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/configuration_nat.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/modeling_nat.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/configuration_nat.py +145 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/modeling_nat.py +950 -0
- janus/lib/python3.10/site-packages/transformers/models/deprecated/qdqbert/__init__.py +69 -0
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar);
|
| 20 |
+
TORCH_API void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out);
|
| 21 |
+
TORCH_API void foreach_tensor_clamp_min_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar);
|
| 23 |
+
TORCH_API void foreach_tensor_clamp_min_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar);
|
| 24 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_list_kernel_slow(at::TensorList self, at::TensorList other);
|
| 25 |
+
TORCH_API void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out);
|
| 26 |
+
TORCH_API void foreach_tensor_clamp_min_list_kernel_slow_(at::TensorList self, at::TensorList other);
|
| 27 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_list_kernel_cuda(at::TensorList self, at::TensorList other);
|
| 28 |
+
TORCH_API void foreach_tensor_clamp_min_list_kernel_cuda_(at::TensorList self, at::TensorList other);
|
| 29 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 30 |
+
TORCH_API void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out);
|
| 31 |
+
TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 32 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 33 |
+
TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 34 |
+
} // namespace native
|
| 35 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor arctan(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & arctan_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeimplicitautograd
|
| 26 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/cumulative_trapezoid_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
|
| 26 |
+
inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) {
|
| 27 |
+
return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
|
| 31 |
+
inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) {
|
| 32 |
+
return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
}
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_exp2 : public TensorIteratorBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/gcd_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
|
| 27 |
+
return at::_ops::gcd_out::call(self, other, out);
|
| 28 |
+
}
|
| 29 |
+
// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
|
| 31 |
+
return at::_ops::gcd_out::call(self, other, out);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::gcd(Tensor self, Tensor other) -> Tensor
|
| 35 |
+
inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) {
|
| 36 |
+
return at::_ops::gcd::call(self, other);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
| 40 |
+
inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) {
|
| 41 |
+
return at::_ops::gcd_::call(self, other);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API maximum {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::maximum")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "maximum(Tensor self, Tensor other) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API maximum_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::maximum")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & nextafter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & nextafter_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & nextafter_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_cpu(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_cuda(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/nonzero_static_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & nonzero_static_out(at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value=-1) {
|
| 27 |
+
return at::_ops::nonzero_static_out::call(self, size, fill_value, out);
|
| 28 |
+
}
|
| 29 |
+
// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & nonzero_static_outf(const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out) {
|
| 31 |
+
return at::_ops::nonzero_static_out::call(self, size, fill_value, out);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor
|
| 35 |
+
inline at::Tensor nonzero_static(const at::Tensor & self, int64_t size, int64_t fill_value=-1) {
|
| 36 |
+
return at::_ops::nonzero_static::call(self, size, fill_value);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/pow_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent);
|
| 21 |
+
TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent);
|
| 22 |
+
TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent);
|
| 24 |
+
TORCH_API at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent);
|
| 25 |
+
TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent);
|
| 26 |
+
TORCH_API at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
|
| 27 |
+
TORCH_API at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent);
|
| 28 |
+
TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent);
|
| 29 |
+
TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
|
| 30 |
+
TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent);
|
| 31 |
+
|
| 32 |
+
} // namespace cpu
|
| 33 |
+
} // namespace at
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/pow_ops.h
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API pow_Tensor_Tensor_out {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API pow_Tensor_Tensor {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & exponent);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API pow_Scalar_out {
|
| 40 |
+
using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API pow_Scalar {
|
| 51 |
+
using schema = at::Tensor (const at::Scalar &, const at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Scalar(Scalar self, Tensor exponent) -> Tensor")
|
| 57 |
+
static at::Tensor call(const at::Scalar & self, const at::Tensor & exponent);
|
| 58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API pow_Tensor_Scalar_out {
|
| 62 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)")
|
| 68 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
|
| 69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
struct TORCH_API pow_Tensor_Scalar {
|
| 73 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
| 74 |
+
using ptr_schema = schema*;
|
| 75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow")
|
| 77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
|
| 78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor")
|
| 79 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & exponent);
|
| 80 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent);
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
struct TORCH_API pow__Scalar {
|
| 84 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
| 85 |
+
using ptr_schema = schema*;
|
| 86 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 87 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow_")
|
| 88 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 89 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)")
|
| 90 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & exponent);
|
| 91 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent);
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
struct TORCH_API pow__Tensor {
|
| 95 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
| 96 |
+
using ptr_schema = schema*;
|
| 97 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 98 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow_")
|
| 99 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 100 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)")
|
| 101 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & exponent);
|
| 102 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent);
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
}} // namespace at::_ops
|
infer_4_37_2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor.h
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/sparse_csc_tensor_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
| 26 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
| 27 |
+
return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 28 |
+
}
|
| 29 |
+
// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
| 30 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
|
| 31 |
+
return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
| 35 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
|
| 36 |
+
return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 37 |
+
}
|
| 38 |
+
// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
| 39 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
|
| 40 |
+
return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
}
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__init__.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import TYPE_CHECKING
|
| 16 |
+
|
| 17 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_import_structure = {
|
| 21 |
+
"configuration_deta": ["DetaConfig"],
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
if not is_vision_available():
|
| 26 |
+
raise OptionalDependencyNotAvailable()
|
| 27 |
+
except OptionalDependencyNotAvailable:
|
| 28 |
+
pass
|
| 29 |
+
else:
|
| 30 |
+
_import_structure["image_processing_deta"] = ["DetaImageProcessor"]
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
if not is_torch_available():
|
| 34 |
+
raise OptionalDependencyNotAvailable()
|
| 35 |
+
except OptionalDependencyNotAvailable:
|
| 36 |
+
pass
|
| 37 |
+
else:
|
| 38 |
+
_import_structure["modeling_deta"] = [
|
| 39 |
+
"DetaForObjectDetection",
|
| 40 |
+
"DetaModel",
|
| 41 |
+
"DetaPreTrainedModel",
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if TYPE_CHECKING:
|
| 46 |
+
from .configuration_deta import DetaConfig
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
if not is_vision_available():
|
| 50 |
+
raise OptionalDependencyNotAvailable()
|
| 51 |
+
except OptionalDependencyNotAvailable:
|
| 52 |
+
pass
|
| 53 |
+
else:
|
| 54 |
+
from .image_processing_deta import DetaImageProcessor
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
if not is_torch_available():
|
| 58 |
+
raise OptionalDependencyNotAvailable()
|
| 59 |
+
except OptionalDependencyNotAvailable:
|
| 60 |
+
pass
|
| 61 |
+
else:
|
| 62 |
+
from .modeling_deta import (
|
| 63 |
+
DetaForObjectDetection,
|
| 64 |
+
DetaModel,
|
| 65 |
+
DetaPreTrainedModel,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
import sys
|
| 70 |
+
|
| 71 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.01 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/configuration_deta.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/image_processing_deta.cpython-310.pyc
ADDED
|
Binary file (43.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/__pycache__/modeling_deta.cpython-310.pyc
ADDED
|
Binary file (99.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/configuration_deta.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""DETA model configuration"""
|
| 16 |
+
|
| 17 |
+
from ....configuration_utils import PretrainedConfig
|
| 18 |
+
from ....utils import logging
|
| 19 |
+
from ...auto import CONFIG_MAPPING
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class DetaConfig(PretrainedConfig):
|
| 26 |
+
r"""
|
| 27 |
+
This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
|
| 28 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 29 |
+
defaults will yield a similar configuration to that of the DETA
|
| 30 |
+
[SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
|
| 31 |
+
|
| 32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 33 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
|
| 37 |
+
The configuration of the backbone model.
|
| 38 |
+
backbone (`str`, *optional*):
|
| 39 |
+
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
|
| 40 |
+
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
|
| 41 |
+
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
|
| 42 |
+
use_pretrained_backbone (`bool`, *optional*, `False`):
|
| 43 |
+
Whether to use pretrained weights for the backbone.
|
| 44 |
+
use_timm_backbone (`bool`, *optional*, `False`):
|
| 45 |
+
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
|
| 46 |
+
library.
|
| 47 |
+
backbone_kwargs (`dict`, *optional*):
|
| 48 |
+
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
|
| 49 |
+
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
|
| 50 |
+
num_queries (`int`, *optional*, defaults to 900):
|
| 51 |
+
Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
|
| 52 |
+
detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
|
| 53 |
+
d_model (`int`, *optional*, defaults to 256):
|
| 54 |
+
Dimension of the layers.
|
| 55 |
+
encoder_layers (`int`, *optional*, defaults to 6):
|
| 56 |
+
Number of encoder layers.
|
| 57 |
+
decoder_layers (`int`, *optional*, defaults to 6):
|
| 58 |
+
Number of decoder layers.
|
| 59 |
+
encoder_attention_heads (`int`, *optional*, defaults to 8):
|
| 60 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 61 |
+
decoder_attention_heads (`int`, *optional*, defaults to 8):
|
| 62 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
| 63 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
|
| 64 |
+
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
| 65 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
|
| 66 |
+
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
| 67 |
+
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
|
| 68 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 69 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 70 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
| 71 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 72 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 73 |
+
The dropout ratio for the attention probabilities.
|
| 74 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
| 75 |
+
The dropout ratio for activations inside the fully connected layer.
|
| 76 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
| 77 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 78 |
+
init_xavier_std (`float`, *optional*, defaults to 1):
|
| 79 |
+
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
|
| 80 |
+
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
| 81 |
+
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
| 82 |
+
for more details.
|
| 83 |
+
auxiliary_loss (`bool`, *optional*, defaults to `False`):
|
| 84 |
+
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
|
| 85 |
+
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
|
| 86 |
+
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
|
| 87 |
+
class_cost (`float`, *optional*, defaults to 1):
|
| 88 |
+
Relative weight of the classification error in the Hungarian matching cost.
|
| 89 |
+
bbox_cost (`float`, *optional*, defaults to 5):
|
| 90 |
+
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
|
| 91 |
+
giou_cost (`float`, *optional*, defaults to 2):
|
| 92 |
+
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
|
| 93 |
+
mask_loss_coefficient (`float`, *optional*, defaults to 1):
|
| 94 |
+
Relative weight of the Focal loss in the panoptic segmentation loss.
|
| 95 |
+
dice_loss_coefficient (`float`, *optional*, defaults to 1):
|
| 96 |
+
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
|
| 97 |
+
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
|
| 98 |
+
Relative weight of the L1 bounding box loss in the object detection loss.
|
| 99 |
+
giou_loss_coefficient (`float`, *optional*, defaults to 2):
|
| 100 |
+
Relative weight of the generalized IoU loss in the object detection loss.
|
| 101 |
+
eos_coefficient (`float`, *optional*, defaults to 0.1):
|
| 102 |
+
Relative classification weight of the 'no-object' class in the object detection loss.
|
| 103 |
+
num_feature_levels (`int`, *optional*, defaults to 5):
|
| 104 |
+
The number of input feature levels.
|
| 105 |
+
encoder_n_points (`int`, *optional*, defaults to 4):
|
| 106 |
+
The number of sampled keys in each feature level for each attention head in the encoder.
|
| 107 |
+
decoder_n_points (`int`, *optional*, defaults to 4):
|
| 108 |
+
The number of sampled keys in each feature level for each attention head in the decoder.
|
| 109 |
+
two_stage (`bool`, *optional*, defaults to `True`):
|
| 110 |
+
Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
|
| 111 |
+
DETA, which are further fed into the decoder for iterative bounding box refinement.
|
| 112 |
+
two_stage_num_proposals (`int`, *optional*, defaults to 300):
|
| 113 |
+
The number of region proposals to be generated, in case `two_stage` is set to `True`.
|
| 114 |
+
with_box_refine (`bool`, *optional*, defaults to `True`):
|
| 115 |
+
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
|
| 116 |
+
based on the predictions from the previous layer.
|
| 117 |
+
focal_alpha (`float`, *optional*, defaults to 0.25):
|
| 118 |
+
Alpha parameter in the focal loss.
|
| 119 |
+
assign_first_stage (`bool`, *optional*, defaults to `True`):
|
| 120 |
+
Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7.
|
| 121 |
+
assign_second_stage (`bool`, *optional*, defaults to `True`):
|
| 122 |
+
Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure.
|
| 123 |
+
disable_custom_kernels (`bool`, *optional*, defaults to `True`):
|
| 124 |
+
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
|
| 125 |
+
kernels are not supported by PyTorch ONNX export.
|
| 126 |
+
|
| 127 |
+
Examples:
|
| 128 |
+
|
| 129 |
+
```python
|
| 130 |
+
>>> from transformers import DetaConfig, DetaModel
|
| 131 |
+
|
| 132 |
+
>>> # Initializing a DETA SenseTime/deformable-detr style configuration
|
| 133 |
+
>>> configuration = DetaConfig()
|
| 134 |
+
|
| 135 |
+
>>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
|
| 136 |
+
>>> model = DetaModel(configuration)
|
| 137 |
+
|
| 138 |
+
>>> # Accessing the model configuration
|
| 139 |
+
>>> configuration = model.config
|
| 140 |
+
```"""
|
| 141 |
+
|
| 142 |
+
model_type = "deta"
|
| 143 |
+
attribute_map = {
|
| 144 |
+
"hidden_size": "d_model",
|
| 145 |
+
"num_attention_heads": "encoder_attention_heads",
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
def __init__(
|
| 149 |
+
self,
|
| 150 |
+
backbone_config=None,
|
| 151 |
+
backbone=None,
|
| 152 |
+
use_pretrained_backbone=False,
|
| 153 |
+
use_timm_backbone=False,
|
| 154 |
+
backbone_kwargs=None,
|
| 155 |
+
num_queries=900,
|
| 156 |
+
max_position_embeddings=2048,
|
| 157 |
+
encoder_layers=6,
|
| 158 |
+
encoder_ffn_dim=2048,
|
| 159 |
+
encoder_attention_heads=8,
|
| 160 |
+
decoder_layers=6,
|
| 161 |
+
decoder_ffn_dim=1024,
|
| 162 |
+
decoder_attention_heads=8,
|
| 163 |
+
encoder_layerdrop=0.0,
|
| 164 |
+
is_encoder_decoder=True,
|
| 165 |
+
activation_function="relu",
|
| 166 |
+
d_model=256,
|
| 167 |
+
dropout=0.1,
|
| 168 |
+
attention_dropout=0.0,
|
| 169 |
+
activation_dropout=0.0,
|
| 170 |
+
init_std=0.02,
|
| 171 |
+
init_xavier_std=1.0,
|
| 172 |
+
return_intermediate=True,
|
| 173 |
+
auxiliary_loss=False,
|
| 174 |
+
position_embedding_type="sine",
|
| 175 |
+
num_feature_levels=5,
|
| 176 |
+
encoder_n_points=4,
|
| 177 |
+
decoder_n_points=4,
|
| 178 |
+
two_stage=True,
|
| 179 |
+
two_stage_num_proposals=300,
|
| 180 |
+
with_box_refine=True,
|
| 181 |
+
assign_first_stage=True,
|
| 182 |
+
assign_second_stage=True,
|
| 183 |
+
class_cost=1,
|
| 184 |
+
bbox_cost=5,
|
| 185 |
+
giou_cost=2,
|
| 186 |
+
mask_loss_coefficient=1,
|
| 187 |
+
dice_loss_coefficient=1,
|
| 188 |
+
bbox_loss_coefficient=5,
|
| 189 |
+
giou_loss_coefficient=2,
|
| 190 |
+
eos_coefficient=0.1,
|
| 191 |
+
focal_alpha=0.25,
|
| 192 |
+
disable_custom_kernels=True,
|
| 193 |
+
**kwargs,
|
| 194 |
+
):
|
| 195 |
+
if use_pretrained_backbone:
|
| 196 |
+
raise ValueError("Pretrained backbones are not supported yet.")
|
| 197 |
+
|
| 198 |
+
if backbone_config is not None and backbone is not None:
|
| 199 |
+
raise ValueError("You can't specify both `backbone` and `backbone_config`.")
|
| 200 |
+
|
| 201 |
+
if backbone_config is None and backbone is None:
|
| 202 |
+
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
|
| 203 |
+
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
|
| 204 |
+
else:
|
| 205 |
+
if isinstance(backbone_config, dict):
|
| 206 |
+
backbone_model_type = backbone_config.pop("model_type")
|
| 207 |
+
config_class = CONFIG_MAPPING[backbone_model_type]
|
| 208 |
+
backbone_config = config_class.from_dict(backbone_config)
|
| 209 |
+
|
| 210 |
+
if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
|
| 211 |
+
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
|
| 212 |
+
|
| 213 |
+
self.backbone_config = backbone_config
|
| 214 |
+
self.backbone = backbone
|
| 215 |
+
self.use_pretrained_backbone = use_pretrained_backbone
|
| 216 |
+
self.use_timm_backbone = use_timm_backbone
|
| 217 |
+
self.backbone_kwargs = backbone_kwargs
|
| 218 |
+
self.num_queries = num_queries
|
| 219 |
+
self.max_position_embeddings = max_position_embeddings
|
| 220 |
+
self.d_model = d_model
|
| 221 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
| 222 |
+
self.encoder_layers = encoder_layers
|
| 223 |
+
self.encoder_attention_heads = encoder_attention_heads
|
| 224 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
| 225 |
+
self.decoder_layers = decoder_layers
|
| 226 |
+
self.decoder_attention_heads = decoder_attention_heads
|
| 227 |
+
self.dropout = dropout
|
| 228 |
+
self.attention_dropout = attention_dropout
|
| 229 |
+
self.activation_dropout = activation_dropout
|
| 230 |
+
self.activation_function = activation_function
|
| 231 |
+
self.init_std = init_std
|
| 232 |
+
self.init_xavier_std = init_xavier_std
|
| 233 |
+
self.encoder_layerdrop = encoder_layerdrop
|
| 234 |
+
self.auxiliary_loss = auxiliary_loss
|
| 235 |
+
self.position_embedding_type = position_embedding_type
|
| 236 |
+
# deformable attributes
|
| 237 |
+
self.num_feature_levels = num_feature_levels
|
| 238 |
+
self.encoder_n_points = encoder_n_points
|
| 239 |
+
self.decoder_n_points = decoder_n_points
|
| 240 |
+
self.two_stage = two_stage
|
| 241 |
+
self.two_stage_num_proposals = two_stage_num_proposals
|
| 242 |
+
self.with_box_refine = with_box_refine
|
| 243 |
+
self.assign_first_stage = assign_first_stage
|
| 244 |
+
self.assign_second_stage = assign_second_stage
|
| 245 |
+
if two_stage is True and with_box_refine is False:
|
| 246 |
+
raise ValueError("If two_stage is True, with_box_refine must be True.")
|
| 247 |
+
# Hungarian matcher
|
| 248 |
+
self.class_cost = class_cost
|
| 249 |
+
self.bbox_cost = bbox_cost
|
| 250 |
+
self.giou_cost = giou_cost
|
| 251 |
+
# Loss coefficients
|
| 252 |
+
self.mask_loss_coefficient = mask_loss_coefficient
|
| 253 |
+
self.dice_loss_coefficient = dice_loss_coefficient
|
| 254 |
+
self.bbox_loss_coefficient = bbox_loss_coefficient
|
| 255 |
+
self.giou_loss_coefficient = giou_loss_coefficient
|
| 256 |
+
self.eos_coefficient = eos_coefficient
|
| 257 |
+
self.focal_alpha = focal_alpha
|
| 258 |
+
self.disable_custom_kernels = disable_custom_kernels
|
| 259 |
+
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
|
| 260 |
+
|
| 261 |
+
@property
|
| 262 |
+
def num_attention_heads(self) -> int:
|
| 263 |
+
return self.encoder_attention_heads
|
| 264 |
+
|
| 265 |
+
@property
|
| 266 |
+
def hidden_size(self) -> int:
|
| 267 |
+
return self.d_model
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/image_processing_deta.py
ADDED
|
@@ -0,0 +1,1224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for Deformable DETR."""
|
| 16 |
+
|
| 17 |
+
import pathlib
|
| 18 |
+
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
from ....feature_extraction_utils import BatchFeature
|
| 23 |
+
from ....image_processing_utils import BaseImageProcessor, get_size_dict
|
| 24 |
+
from ....image_transforms import (
|
| 25 |
+
PaddingMode,
|
| 26 |
+
center_to_corners_format,
|
| 27 |
+
corners_to_center_format,
|
| 28 |
+
pad,
|
| 29 |
+
rescale,
|
| 30 |
+
resize,
|
| 31 |
+
rgb_to_id,
|
| 32 |
+
to_channel_dimension_format,
|
| 33 |
+
)
|
| 34 |
+
from ....image_utils import (
|
| 35 |
+
IMAGENET_DEFAULT_MEAN,
|
| 36 |
+
IMAGENET_DEFAULT_STD,
|
| 37 |
+
AnnotationFormat,
|
| 38 |
+
AnnotationType,
|
| 39 |
+
ChannelDimension,
|
| 40 |
+
ImageInput,
|
| 41 |
+
PILImageResampling,
|
| 42 |
+
get_image_size,
|
| 43 |
+
infer_channel_dimension_format,
|
| 44 |
+
is_batched,
|
| 45 |
+
is_scaled_image,
|
| 46 |
+
to_numpy_array,
|
| 47 |
+
valid_images,
|
| 48 |
+
validate_annotations,
|
| 49 |
+
validate_preprocess_arguments,
|
| 50 |
+
)
|
| 51 |
+
from ....utils import (
|
| 52 |
+
is_flax_available,
|
| 53 |
+
is_jax_tensor,
|
| 54 |
+
is_tf_available,
|
| 55 |
+
is_tf_tensor,
|
| 56 |
+
is_torch_available,
|
| 57 |
+
is_torch_tensor,
|
| 58 |
+
is_torchvision_available,
|
| 59 |
+
is_vision_available,
|
| 60 |
+
logging,
|
| 61 |
+
)
|
| 62 |
+
from ....utils.generic import TensorType
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
if is_torch_available():
|
| 66 |
+
import torch
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if is_torchvision_available():
|
| 70 |
+
from torchvision.ops.boxes import batched_nms
|
| 71 |
+
|
| 72 |
+
if is_vision_available():
|
| 73 |
+
import PIL
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 77 |
+
|
| 78 |
+
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
|
| 82 |
+
"""
|
| 83 |
+
Computes the output image size given the input image size and the desired output size.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
image_size (`Tuple[int, int]`):
|
| 87 |
+
The input image size.
|
| 88 |
+
size (`int`):
|
| 89 |
+
The desired output size.
|
| 90 |
+
max_size (`int`, *optional*):
|
| 91 |
+
The maximum allowed output size.
|
| 92 |
+
"""
|
| 93 |
+
height, width = image_size
|
| 94 |
+
raw_size = None
|
| 95 |
+
if max_size is not None:
|
| 96 |
+
min_original_size = float(min((height, width)))
|
| 97 |
+
max_original_size = float(max((height, width)))
|
| 98 |
+
if max_original_size / min_original_size * size > max_size:
|
| 99 |
+
raw_size = max_size * min_original_size / max_original_size
|
| 100 |
+
size = int(round(raw_size))
|
| 101 |
+
|
| 102 |
+
if (height <= width and height == size) or (width <= height and width == size):
|
| 103 |
+
oh, ow = height, width
|
| 104 |
+
elif width < height:
|
| 105 |
+
ow = size
|
| 106 |
+
if max_size is not None and raw_size is not None:
|
| 107 |
+
oh = int(raw_size * height / width)
|
| 108 |
+
else:
|
| 109 |
+
oh = int(size * height / width)
|
| 110 |
+
else:
|
| 111 |
+
oh = size
|
| 112 |
+
if max_size is not None and raw_size is not None:
|
| 113 |
+
ow = int(raw_size * width / height)
|
| 114 |
+
else:
|
| 115 |
+
ow = int(size * width / height)
|
| 116 |
+
|
| 117 |
+
return (oh, ow)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def get_resize_output_image_size(
|
| 121 |
+
input_image: np.ndarray,
|
| 122 |
+
size: Union[int, Tuple[int, int], List[int]],
|
| 123 |
+
max_size: Optional[int] = None,
|
| 124 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 125 |
+
) -> Tuple[int, int]:
|
| 126 |
+
"""
|
| 127 |
+
Computes the output image size given the input image size and the desired output size. If the desired output size
|
| 128 |
+
is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
|
| 129 |
+
image size is computed by keeping the aspect ratio of the input image size.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
input_image (`np.ndarray`):
|
| 133 |
+
The image to resize.
|
| 134 |
+
size (`int` or `Tuple[int, int]` or `List[int]`):
|
| 135 |
+
The desired output size.
|
| 136 |
+
max_size (`int`, *optional*):
|
| 137 |
+
The maximum allowed output size.
|
| 138 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 139 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
|
| 140 |
+
"""
|
| 141 |
+
image_size = get_image_size(input_image, input_data_format)
|
| 142 |
+
if isinstance(size, (list, tuple)):
|
| 143 |
+
return size
|
| 144 |
+
|
| 145 |
+
return get_size_with_aspect_ratio(image_size, size, max_size)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def get_image_size_for_max_height_width(
|
| 149 |
+
input_image: np.ndarray,
|
| 150 |
+
max_height: int,
|
| 151 |
+
max_width: int,
|
| 152 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 153 |
+
) -> Tuple[int, int]:
|
| 154 |
+
"""
|
| 155 |
+
Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio.
|
| 156 |
+
Important, even if image_height < max_height and image_width < max_width, the image will be resized
|
| 157 |
+
to at least one of the edges be equal to max_height or max_width.
|
| 158 |
+
|
| 159 |
+
For example:
|
| 160 |
+
- input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50)
|
| 161 |
+
- input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400)
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
input_image (`np.ndarray`):
|
| 165 |
+
The image to resize.
|
| 166 |
+
max_height (`int`):
|
| 167 |
+
The maximum allowed height.
|
| 168 |
+
max_width (`int`):
|
| 169 |
+
The maximum allowed width.
|
| 170 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 171 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
|
| 172 |
+
"""
|
| 173 |
+
image_size = get_image_size(input_image, input_data_format)
|
| 174 |
+
height, width = image_size
|
| 175 |
+
height_scale = max_height / height
|
| 176 |
+
width_scale = max_width / width
|
| 177 |
+
min_scale = min(height_scale, width_scale)
|
| 178 |
+
new_height = int(height * min_scale)
|
| 179 |
+
new_width = int(width * min_scale)
|
| 180 |
+
return new_height, new_width
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def get_numpy_to_framework_fn(arr) -> Callable:
|
| 184 |
+
"""
|
| 185 |
+
Returns a function that converts a numpy array to the framework of the input array.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
arr (`np.ndarray`): The array to convert.
|
| 189 |
+
"""
|
| 190 |
+
if isinstance(arr, np.ndarray):
|
| 191 |
+
return np.array
|
| 192 |
+
if is_tf_available() and is_tf_tensor(arr):
|
| 193 |
+
import tensorflow as tf
|
| 194 |
+
|
| 195 |
+
return tf.convert_to_tensor
|
| 196 |
+
if is_torch_available() and is_torch_tensor(arr):
|
| 197 |
+
import torch
|
| 198 |
+
|
| 199 |
+
return torch.tensor
|
| 200 |
+
if is_flax_available() and is_jax_tensor(arr):
|
| 201 |
+
import jax.numpy as jnp
|
| 202 |
+
|
| 203 |
+
return jnp.array
|
| 204 |
+
raise ValueError(f"Cannot convert arrays of type {type(arr)}")
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
|
| 208 |
+
"""
|
| 209 |
+
Squeezes an array, but only if the axis specified has dim 1.
|
| 210 |
+
"""
|
| 211 |
+
if axis is None:
|
| 212 |
+
return arr.squeeze()
|
| 213 |
+
|
| 214 |
+
try:
|
| 215 |
+
return arr.squeeze(axis=axis)
|
| 216 |
+
except ValueError:
|
| 217 |
+
return arr
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
|
| 221 |
+
image_height, image_width = image_size
|
| 222 |
+
norm_annotation = {}
|
| 223 |
+
for key, value in annotation.items():
|
| 224 |
+
if key == "boxes":
|
| 225 |
+
boxes = value
|
| 226 |
+
boxes = corners_to_center_format(boxes)
|
| 227 |
+
boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
|
| 228 |
+
norm_annotation[key] = boxes
|
| 229 |
+
else:
|
| 230 |
+
norm_annotation[key] = value
|
| 231 |
+
return norm_annotation
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def max_across_indices(values: Iterable[Any]) -> List[Any]:
|
| 235 |
+
"""
|
| 236 |
+
Return the maximum value across all indices of an iterable of values.
|
| 237 |
+
"""
|
| 238 |
+
return [max(values_i) for values_i in zip(*values)]
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def get_max_height_width(
|
| 242 |
+
images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
|
| 243 |
+
) -> List[int]:
|
| 244 |
+
"""
|
| 245 |
+
Get the maximum height and width across all images in a batch.
|
| 246 |
+
"""
|
| 247 |
+
if input_data_format is None:
|
| 248 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
| 249 |
+
|
| 250 |
+
if input_data_format == ChannelDimension.FIRST:
|
| 251 |
+
_, max_height, max_width = max_across_indices([img.shape for img in images])
|
| 252 |
+
elif input_data_format == ChannelDimension.LAST:
|
| 253 |
+
max_height, max_width, _ = max_across_indices([img.shape for img in images])
|
| 254 |
+
else:
|
| 255 |
+
raise ValueError(f"Invalid channel dimension format: {input_data_format}")
|
| 256 |
+
return (max_height, max_width)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def make_pixel_mask(
|
| 260 |
+
image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
|
| 261 |
+
) -> np.ndarray:
|
| 262 |
+
"""
|
| 263 |
+
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
image (`np.ndarray`):
|
| 267 |
+
Image to make the pixel mask for.
|
| 268 |
+
output_size (`Tuple[int, int]`):
|
| 269 |
+
Output size of the mask.
|
| 270 |
+
"""
|
| 271 |
+
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
|
| 272 |
+
mask = np.zeros(output_size, dtype=np.int64)
|
| 273 |
+
mask[:input_height, :input_width] = 1
|
| 274 |
+
return mask
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
|
| 278 |
+
"""
|
| 279 |
+
Convert a COCO polygon annotation to a mask.
|
| 280 |
+
|
| 281 |
+
Args:
|
| 282 |
+
segmentations (`List[List[float]]`):
|
| 283 |
+
List of polygons, each polygon represented by a list of x-y coordinates.
|
| 284 |
+
height (`int`):
|
| 285 |
+
Height of the mask.
|
| 286 |
+
width (`int`):
|
| 287 |
+
Width of the mask.
|
| 288 |
+
"""
|
| 289 |
+
try:
|
| 290 |
+
from pycocotools import mask as coco_mask
|
| 291 |
+
except ImportError:
|
| 292 |
+
raise ImportError("Pycocotools is not installed in your environment.")
|
| 293 |
+
|
| 294 |
+
masks = []
|
| 295 |
+
for polygons in segmentations:
|
| 296 |
+
rles = coco_mask.frPyObjects(polygons, height, width)
|
| 297 |
+
mask = coco_mask.decode(rles)
|
| 298 |
+
if len(mask.shape) < 3:
|
| 299 |
+
mask = mask[..., None]
|
| 300 |
+
mask = np.asarray(mask, dtype=np.uint8)
|
| 301 |
+
mask = np.any(mask, axis=2)
|
| 302 |
+
masks.append(mask)
|
| 303 |
+
if masks:
|
| 304 |
+
masks = np.stack(masks, axis=0)
|
| 305 |
+
else:
|
| 306 |
+
masks = np.zeros((0, height, width), dtype=np.uint8)
|
| 307 |
+
|
| 308 |
+
return masks
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def prepare_coco_detection_annotation(
|
| 312 |
+
image,
|
| 313 |
+
target,
|
| 314 |
+
return_segmentation_masks: bool = False,
|
| 315 |
+
input_data_format: Optional[Union[ChannelDimension, str]] = None,
|
| 316 |
+
):
|
| 317 |
+
"""
|
| 318 |
+
Convert the target in COCO format into the format expected by DETA.
|
| 319 |
+
"""
|
| 320 |
+
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
|
| 321 |
+
|
| 322 |
+
image_id = target["image_id"]
|
| 323 |
+
image_id = np.asarray([image_id], dtype=np.int64)
|
| 324 |
+
|
| 325 |
+
# Get all COCO annotations for the given image.
|
| 326 |
+
annotations = target["annotations"]
|
| 327 |
+
annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
|
| 328 |
+
|
| 329 |
+
classes = [obj["category_id"] for obj in annotations]
|
| 330 |
+
classes = np.asarray(classes, dtype=np.int64)
|
| 331 |
+
|
| 332 |
+
# for conversion to coco api
|
| 333 |
+
area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
|
| 334 |
+
iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
|
| 335 |
+
|
| 336 |
+
boxes = [obj["bbox"] for obj in annotations]
|
| 337 |
+
# guard against no boxes via resizing
|
| 338 |
+
boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
|
| 339 |
+
boxes[:, 2:] += boxes[:, :2]
|
| 340 |
+
boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
|
| 341 |
+
boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
|
| 342 |
+
|
| 343 |
+
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
|
| 344 |
+
|
| 345 |
+
new_target = {}
|
| 346 |
+
new_target["image_id"] = image_id
|
| 347 |
+
new_target["class_labels"] = classes[keep]
|
| 348 |
+
new_target["boxes"] = boxes[keep]
|
| 349 |
+
new_target["area"] = area[keep]
|
| 350 |
+
new_target["iscrowd"] = iscrowd[keep]
|
| 351 |
+
new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
|
| 352 |
+
|
| 353 |
+
if annotations and "keypoints" in annotations[0]:
|
| 354 |
+
keypoints = [obj["keypoints"] for obj in annotations]
|
| 355 |
+
# Converting the filtered keypoints list to a numpy array
|
| 356 |
+
keypoints = np.asarray(keypoints, dtype=np.float32)
|
| 357 |
+
# Apply the keep mask here to filter the relevant annotations
|
| 358 |
+
keypoints = keypoints[keep]
|
| 359 |
+
num_keypoints = keypoints.shape[0]
|
| 360 |
+
keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
|
| 361 |
+
new_target["keypoints"] = keypoints
|
| 362 |
+
|
| 363 |
+
if return_segmentation_masks:
|
| 364 |
+
segmentation_masks = [obj["segmentation"] for obj in annotations]
|
| 365 |
+
masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
|
| 366 |
+
new_target["masks"] = masks[keep]
|
| 367 |
+
|
| 368 |
+
return new_target
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
|
| 372 |
+
"""
|
| 373 |
+
Compute the bounding boxes around the provided panoptic segmentation masks.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
masks: masks in format `[number_masks, height, width]` where N is the number of masks
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
|
| 380 |
+
"""
|
| 381 |
+
if masks.size == 0:
|
| 382 |
+
return np.zeros((0, 4))
|
| 383 |
+
|
| 384 |
+
h, w = masks.shape[-2:]
|
| 385 |
+
y = np.arange(0, h, dtype=np.float32)
|
| 386 |
+
x = np.arange(0, w, dtype=np.float32)
|
| 387 |
+
# see https://github.com/pytorch/pytorch/issues/50276
|
| 388 |
+
y, x = np.meshgrid(y, x, indexing="ij")
|
| 389 |
+
|
| 390 |
+
x_mask = masks * np.expand_dims(x, axis=0)
|
| 391 |
+
x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
|
| 392 |
+
x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
|
| 393 |
+
x_min = x.filled(fill_value=1e8)
|
| 394 |
+
x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
|
| 395 |
+
|
| 396 |
+
y_mask = masks * np.expand_dims(y, axis=0)
|
| 397 |
+
y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
|
| 398 |
+
y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
|
| 399 |
+
y_min = y.filled(fill_value=1e8)
|
| 400 |
+
y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
|
| 401 |
+
|
| 402 |
+
return np.stack([x_min, y_min, x_max, y_max], 1)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def prepare_coco_panoptic_annotation(
|
| 406 |
+
image: np.ndarray,
|
| 407 |
+
target: Dict,
|
| 408 |
+
masks_path: Union[str, pathlib.Path],
|
| 409 |
+
return_masks: bool = True,
|
| 410 |
+
input_data_format: Union[ChannelDimension, str] = None,
|
| 411 |
+
) -> Dict:
|
| 412 |
+
"""
|
| 413 |
+
Prepare a coco panoptic annotation for DETA.
|
| 414 |
+
"""
|
| 415 |
+
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
|
| 416 |
+
annotation_path = pathlib.Path(masks_path) / target["file_name"]
|
| 417 |
+
|
| 418 |
+
new_target = {}
|
| 419 |
+
new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
|
| 420 |
+
new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
|
| 421 |
+
new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
|
| 422 |
+
|
| 423 |
+
if "segments_info" in target:
|
| 424 |
+
masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
|
| 425 |
+
masks = rgb_to_id(masks)
|
| 426 |
+
|
| 427 |
+
ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
|
| 428 |
+
masks = masks == ids[:, None, None]
|
| 429 |
+
masks = masks.astype(np.uint8)
|
| 430 |
+
if return_masks:
|
| 431 |
+
new_target["masks"] = masks
|
| 432 |
+
new_target["boxes"] = masks_to_boxes(masks)
|
| 433 |
+
new_target["class_labels"] = np.array(
|
| 434 |
+
[segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
|
| 435 |
+
)
|
| 436 |
+
new_target["iscrowd"] = np.asarray(
|
| 437 |
+
[segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
|
| 438 |
+
)
|
| 439 |
+
new_target["area"] = np.asarray(
|
| 440 |
+
[segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
return new_target
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def resize_annotation(
|
| 447 |
+
annotation: Dict[str, Any],
|
| 448 |
+
orig_size: Tuple[int, int],
|
| 449 |
+
target_size: Tuple[int, int],
|
| 450 |
+
threshold: float = 0.5,
|
| 451 |
+
resample: PILImageResampling = PILImageResampling.NEAREST,
|
| 452 |
+
):
|
| 453 |
+
"""
|
| 454 |
+
Resizes an annotation to a target size.
|
| 455 |
+
|
| 456 |
+
Args:
|
| 457 |
+
annotation (`Dict[str, Any]`):
|
| 458 |
+
The annotation dictionary.
|
| 459 |
+
orig_size (`Tuple[int, int]`):
|
| 460 |
+
The original size of the input image.
|
| 461 |
+
target_size (`Tuple[int, int]`):
|
| 462 |
+
The target size of the image, as returned by the preprocessing `resize` step.
|
| 463 |
+
threshold (`float`, *optional*, defaults to 0.5):
|
| 464 |
+
The threshold used to binarize the segmentation masks.
|
| 465 |
+
resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
|
| 466 |
+
The resampling filter to use when resizing the masks.
|
| 467 |
+
"""
|
| 468 |
+
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
|
| 469 |
+
ratio_height, ratio_width = ratios
|
| 470 |
+
|
| 471 |
+
new_annotation = {}
|
| 472 |
+
new_annotation["size"] = target_size
|
| 473 |
+
|
| 474 |
+
for key, value in annotation.items():
|
| 475 |
+
if key == "boxes":
|
| 476 |
+
boxes = value
|
| 477 |
+
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
|
| 478 |
+
new_annotation["boxes"] = scaled_boxes
|
| 479 |
+
elif key == "area":
|
| 480 |
+
area = value
|
| 481 |
+
scaled_area = area * (ratio_width * ratio_height)
|
| 482 |
+
new_annotation["area"] = scaled_area
|
| 483 |
+
elif key == "masks":
|
| 484 |
+
masks = value[:, None]
|
| 485 |
+
masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
|
| 486 |
+
masks = masks.astype(np.float32)
|
| 487 |
+
masks = masks[:, 0] > threshold
|
| 488 |
+
new_annotation["masks"] = masks
|
| 489 |
+
elif key == "size":
|
| 490 |
+
new_annotation["size"] = target_size
|
| 491 |
+
else:
|
| 492 |
+
new_annotation[key] = value
|
| 493 |
+
|
| 494 |
+
return new_annotation
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class DetaImageProcessor(BaseImageProcessor):
|
| 498 |
+
r"""
|
| 499 |
+
Constructs a Deformable DETR image processor.
|
| 500 |
+
|
| 501 |
+
Args:
|
| 502 |
+
format (`str`, *optional*, defaults to `"coco_detection"`):
|
| 503 |
+
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
|
| 504 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 505 |
+
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
|
| 506 |
+
overridden by the `do_resize` parameter in the `preprocess` method.
|
| 507 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
|
| 508 |
+
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
|
| 509 |
+
in the `preprocess` method. Available options are:
|
| 510 |
+
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
|
| 511 |
+
Do NOT keep the aspect ratio.
|
| 512 |
+
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
|
| 513 |
+
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
|
| 514 |
+
less or equal to `longest_edge`.
|
| 515 |
+
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
|
| 516 |
+
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
|
| 517 |
+
`max_width`.
|
| 518 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
| 519 |
+
Resampling filter to use if resizing the image.
|
| 520 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 521 |
+
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
|
| 522 |
+
`do_rescale` parameter in the `preprocess` method.
|
| 523 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 524 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
| 525 |
+
`preprocess` method.
|
| 526 |
+
do_normalize:
|
| 527 |
+
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
|
| 528 |
+
`preprocess` method.
|
| 529 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
|
| 530 |
+
Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
|
| 531 |
+
channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 532 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
|
| 533 |
+
Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
|
| 534 |
+
for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 535 |
+
do_convert_annotations (`bool`, *optional*, defaults to `True`):
|
| 536 |
+
Controls whether to convert the annotations to the format expected by the DETR model. Converts the
|
| 537 |
+
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
|
| 538 |
+
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
|
| 539 |
+
do_pad (`bool`, *optional*, defaults to `True`):
|
| 540 |
+
Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
|
| 541 |
+
method. If `True`, padding will be applied to the bottom and right of the image with zeros.
|
| 542 |
+
If `pad_size` is provided, the image will be padded to the specified dimensions.
|
| 543 |
+
Otherwise, the image will be padded to the maximum height and width of the batch.
|
| 544 |
+
pad_size (`Dict[str, int]`, *optional*):
|
| 545 |
+
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
|
| 546 |
+
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
|
| 547 |
+
height and width in the batch.
|
| 548 |
+
"""
|
| 549 |
+
|
| 550 |
+
model_input_names = ["pixel_values", "pixel_mask"]
|
| 551 |
+
|
| 552 |
+
def __init__(
|
| 553 |
+
self,
|
| 554 |
+
format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
|
| 555 |
+
do_resize: bool = True,
|
| 556 |
+
size: Dict[str, int] = None,
|
| 557 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
| 558 |
+
do_rescale: bool = True,
|
| 559 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 560 |
+
do_normalize: bool = True,
|
| 561 |
+
image_mean: Union[float, List[float]] = None,
|
| 562 |
+
image_std: Union[float, List[float]] = None,
|
| 563 |
+
do_convert_annotations: bool = True,
|
| 564 |
+
do_pad: bool = True,
|
| 565 |
+
pad_size: Optional[Dict[str, int]] = None,
|
| 566 |
+
**kwargs,
|
| 567 |
+
) -> None:
|
| 568 |
+
if "pad_and_return_pixel_mask" in kwargs:
|
| 569 |
+
do_pad = kwargs.pop("pad_and_return_pixel_mask")
|
| 570 |
+
|
| 571 |
+
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
|
| 572 |
+
size = get_size_dict(size, default_to_square=False)
|
| 573 |
+
|
| 574 |
+
if do_convert_annotations is None:
|
| 575 |
+
do_convert_annotations = do_normalize
|
| 576 |
+
|
| 577 |
+
super().__init__(**kwargs)
|
| 578 |
+
self.format = format
|
| 579 |
+
self.do_resize = do_resize
|
| 580 |
+
self.size = size
|
| 581 |
+
self.resample = resample
|
| 582 |
+
self.do_rescale = do_rescale
|
| 583 |
+
self.rescale_factor = rescale_factor
|
| 584 |
+
self.do_normalize = do_normalize
|
| 585 |
+
self.do_convert_annotations = do_convert_annotations
|
| 586 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
|
| 587 |
+
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
|
| 588 |
+
self.do_pad = do_pad
|
| 589 |
+
self.pad_size = pad_size
|
| 590 |
+
|
| 591 |
+
def prepare_annotation(
|
| 592 |
+
self,
|
| 593 |
+
image: np.ndarray,
|
| 594 |
+
target: Dict,
|
| 595 |
+
format: Optional[AnnotationFormat] = None,
|
| 596 |
+
return_segmentation_masks: bool = None,
|
| 597 |
+
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
| 598 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 599 |
+
) -> Dict:
|
| 600 |
+
"""
|
| 601 |
+
Prepare an annotation for feeding into DETA model.
|
| 602 |
+
"""
|
| 603 |
+
format = format if format is not None else self.format
|
| 604 |
+
|
| 605 |
+
if format == AnnotationFormat.COCO_DETECTION:
|
| 606 |
+
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
|
| 607 |
+
target = prepare_coco_detection_annotation(
|
| 608 |
+
image, target, return_segmentation_masks, input_data_format=input_data_format
|
| 609 |
+
)
|
| 610 |
+
elif format == AnnotationFormat.COCO_PANOPTIC:
|
| 611 |
+
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
|
| 612 |
+
target = prepare_coco_panoptic_annotation(
|
| 613 |
+
image,
|
| 614 |
+
target,
|
| 615 |
+
masks_path=masks_path,
|
| 616 |
+
return_masks=return_segmentation_masks,
|
| 617 |
+
input_data_format=input_data_format,
|
| 618 |
+
)
|
| 619 |
+
else:
|
| 620 |
+
raise ValueError(f"Format {format} is not supported.")
|
| 621 |
+
return target
|
| 622 |
+
|
| 623 |
+
def resize(
|
| 624 |
+
self,
|
| 625 |
+
image: np.ndarray,
|
| 626 |
+
size: Dict[str, int],
|
| 627 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
| 628 |
+
data_format: Optional[ChannelDimension] = None,
|
| 629 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 630 |
+
**kwargs,
|
| 631 |
+
) -> np.ndarray:
|
| 632 |
+
"""
|
| 633 |
+
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
|
| 634 |
+
int, smaller edge of the image will be matched to this number.
|
| 635 |
+
|
| 636 |
+
Args:
|
| 637 |
+
image (`np.ndarray`):
|
| 638 |
+
Image to resize.
|
| 639 |
+
size (`Dict[str, int]`):
|
| 640 |
+
Size of the image's `(height, width)` dimensions after resizing. Available options are:
|
| 641 |
+
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
|
| 642 |
+
Do NOT keep the aspect ratio.
|
| 643 |
+
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
|
| 644 |
+
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
|
| 645 |
+
less or equal to `longest_edge`.
|
| 646 |
+
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
|
| 647 |
+
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
|
| 648 |
+
`max_width`.
|
| 649 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
| 650 |
+
Resampling filter to use if resizing the image.
|
| 651 |
+
data_format (`ChannelDimension`, *optional*):
|
| 652 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
| 653 |
+
image is used.
|
| 654 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 655 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input
|
| 656 |
+
image.
|
| 657 |
+
"""
|
| 658 |
+
size = get_size_dict(size, default_to_square=False)
|
| 659 |
+
if "shortest_edge" in size and "longest_edge" in size:
|
| 660 |
+
new_size = get_resize_output_image_size(
|
| 661 |
+
image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
|
| 662 |
+
)
|
| 663 |
+
elif "height" in size and "width" in size:
|
| 664 |
+
new_size = (size["height"], size["width"])
|
| 665 |
+
elif "max_height" in size and "max_width" in size:
|
| 666 |
+
new_size = get_image_size_for_max_height_width(
|
| 667 |
+
image, size["max_height"], size["max_width"], input_data_format=input_data_format
|
| 668 |
+
)
|
| 669 |
+
else:
|
| 670 |
+
raise ValueError(
|
| 671 |
+
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
|
| 672 |
+
f" {size.keys()}."
|
| 673 |
+
)
|
| 674 |
+
image = resize(
|
| 675 |
+
image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format
|
| 676 |
+
)
|
| 677 |
+
return image
|
| 678 |
+
|
| 679 |
+
def resize_annotation(
|
| 680 |
+
self,
|
| 681 |
+
annotation,
|
| 682 |
+
orig_size,
|
| 683 |
+
size,
|
| 684 |
+
resample: PILImageResampling = PILImageResampling.NEAREST,
|
| 685 |
+
) -> Dict:
|
| 686 |
+
"""
|
| 687 |
+
Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
|
| 688 |
+
to this number.
|
| 689 |
+
"""
|
| 690 |
+
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
|
| 691 |
+
|
| 692 |
+
def rescale(
|
| 693 |
+
self,
|
| 694 |
+
image: np.ndarray,
|
| 695 |
+
rescale_factor: float,
|
| 696 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 697 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 698 |
+
) -> np.ndarray:
|
| 699 |
+
"""
|
| 700 |
+
Rescale the image by the given factor. image = image * rescale_factor.
|
| 701 |
+
|
| 702 |
+
Args:
|
| 703 |
+
image (`np.ndarray`):
|
| 704 |
+
Image to rescale.
|
| 705 |
+
rescale_factor (`float`):
|
| 706 |
+
The value to use for rescaling.
|
| 707 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
| 708 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
| 709 |
+
image is used. Can be one of:
|
| 710 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 711 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 712 |
+
input_data_format (`str` or `ChannelDimension`, *optional*):
|
| 713 |
+
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
|
| 714 |
+
one of:
|
| 715 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 716 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 717 |
+
"""
|
| 718 |
+
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
|
| 719 |
+
|
| 720 |
+
def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
|
| 721 |
+
"""
|
| 722 |
+
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
|
| 723 |
+
`[center_x, center_y, width, height]` format and from absolute to relative pixel values.
|
| 724 |
+
"""
|
| 725 |
+
return normalize_annotation(annotation, image_size=image_size)
|
| 726 |
+
|
| 727 |
+
def _update_annotation_for_padded_image(
|
| 728 |
+
self,
|
| 729 |
+
annotation: Dict,
|
| 730 |
+
input_image_size: Tuple[int, int],
|
| 731 |
+
output_image_size: Tuple[int, int],
|
| 732 |
+
padding,
|
| 733 |
+
update_bboxes,
|
| 734 |
+
) -> Dict:
|
| 735 |
+
"""
|
| 736 |
+
Update the annotation for a padded image.
|
| 737 |
+
"""
|
| 738 |
+
new_annotation = {}
|
| 739 |
+
new_annotation["size"] = output_image_size
|
| 740 |
+
|
| 741 |
+
for key, value in annotation.items():
|
| 742 |
+
if key == "masks":
|
| 743 |
+
masks = value
|
| 744 |
+
masks = pad(
|
| 745 |
+
masks,
|
| 746 |
+
padding,
|
| 747 |
+
mode=PaddingMode.CONSTANT,
|
| 748 |
+
constant_values=0,
|
| 749 |
+
input_data_format=ChannelDimension.FIRST,
|
| 750 |
+
)
|
| 751 |
+
masks = safe_squeeze(masks, 1)
|
| 752 |
+
new_annotation["masks"] = masks
|
| 753 |
+
elif key == "boxes" and update_bboxes:
|
| 754 |
+
boxes = value
|
| 755 |
+
boxes *= np.asarray(
|
| 756 |
+
[
|
| 757 |
+
input_image_size[1] / output_image_size[1],
|
| 758 |
+
input_image_size[0] / output_image_size[0],
|
| 759 |
+
input_image_size[1] / output_image_size[1],
|
| 760 |
+
input_image_size[0] / output_image_size[0],
|
| 761 |
+
]
|
| 762 |
+
)
|
| 763 |
+
new_annotation["boxes"] = boxes
|
| 764 |
+
elif key == "size":
|
| 765 |
+
new_annotation["size"] = output_image_size
|
| 766 |
+
else:
|
| 767 |
+
new_annotation[key] = value
|
| 768 |
+
return new_annotation
|
| 769 |
+
|
| 770 |
+
def _pad_image(
|
| 771 |
+
self,
|
| 772 |
+
image: np.ndarray,
|
| 773 |
+
output_size: Tuple[int, int],
|
| 774 |
+
annotation: Optional[Dict[str, Any]] = None,
|
| 775 |
+
constant_values: Union[float, Iterable[float]] = 0,
|
| 776 |
+
data_format: Optional[ChannelDimension] = None,
|
| 777 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 778 |
+
update_bboxes: bool = True,
|
| 779 |
+
) -> np.ndarray:
|
| 780 |
+
"""
|
| 781 |
+
Pad an image with zeros to the given size.
|
| 782 |
+
"""
|
| 783 |
+
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
|
| 784 |
+
output_height, output_width = output_size
|
| 785 |
+
|
| 786 |
+
pad_bottom = output_height - input_height
|
| 787 |
+
pad_right = output_width - input_width
|
| 788 |
+
padding = ((0, pad_bottom), (0, pad_right))
|
| 789 |
+
padded_image = pad(
|
| 790 |
+
image,
|
| 791 |
+
padding,
|
| 792 |
+
mode=PaddingMode.CONSTANT,
|
| 793 |
+
constant_values=constant_values,
|
| 794 |
+
data_format=data_format,
|
| 795 |
+
input_data_format=input_data_format,
|
| 796 |
+
)
|
| 797 |
+
if annotation is not None:
|
| 798 |
+
annotation = self._update_annotation_for_padded_image(
|
| 799 |
+
annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
|
| 800 |
+
)
|
| 801 |
+
return padded_image, annotation
|
| 802 |
+
|
| 803 |
+
def pad(
|
| 804 |
+
self,
|
| 805 |
+
images: List[np.ndarray],
|
| 806 |
+
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
|
| 807 |
+
constant_values: Union[float, Iterable[float]] = 0,
|
| 808 |
+
return_pixel_mask: bool = True,
|
| 809 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 810 |
+
data_format: Optional[ChannelDimension] = None,
|
| 811 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 812 |
+
update_bboxes: bool = True,
|
| 813 |
+
pad_size: Optional[Dict[str, int]] = None,
|
| 814 |
+
) -> BatchFeature:
|
| 815 |
+
"""
|
| 816 |
+
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
|
| 817 |
+
in the batch and optionally returns their corresponding pixel mask.
|
| 818 |
+
|
| 819 |
+
Args:
|
| 820 |
+
images (List[`np.ndarray`]):
|
| 821 |
+
Images to pad.
|
| 822 |
+
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
|
| 823 |
+
Annotations to transform according to the padding that is applied to the images.
|
| 824 |
+
constant_values (`float` or `Iterable[float]`, *optional*):
|
| 825 |
+
The value to use for the padding if `mode` is `"constant"`.
|
| 826 |
+
return_pixel_mask (`bool`, *optional*, defaults to `True`):
|
| 827 |
+
Whether to return a pixel mask.
|
| 828 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 829 |
+
The type of tensors to return. Can be one of:
|
| 830 |
+
- Unset: Return a list of `np.ndarray`.
|
| 831 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 832 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 833 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 834 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 835 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
| 836 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
| 837 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 838 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
| 839 |
+
update_bboxes (`bool`, *optional*, defaults to `True`):
|
| 840 |
+
Whether to update the bounding boxes in the annotations to match the padded images. If the
|
| 841 |
+
bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
|
| 842 |
+
format, the bounding boxes will not be updated.
|
| 843 |
+
pad_size (`Dict[str, int]`, *optional*):
|
| 844 |
+
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
|
| 845 |
+
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
|
| 846 |
+
height and width in the batch.
|
| 847 |
+
"""
|
| 848 |
+
pad_size = pad_size if pad_size is not None else self.pad_size
|
| 849 |
+
if pad_size is not None:
|
| 850 |
+
padded_size = (pad_size["height"], pad_size["width"])
|
| 851 |
+
else:
|
| 852 |
+
padded_size = get_max_height_width(images, input_data_format=input_data_format)
|
| 853 |
+
|
| 854 |
+
annotation_list = annotations if annotations is not None else [None] * len(images)
|
| 855 |
+
padded_images = []
|
| 856 |
+
padded_annotations = []
|
| 857 |
+
for image, annotation in zip(images, annotation_list):
|
| 858 |
+
padded_image, padded_annotation = self._pad_image(
|
| 859 |
+
image,
|
| 860 |
+
padded_size,
|
| 861 |
+
annotation,
|
| 862 |
+
constant_values=constant_values,
|
| 863 |
+
data_format=data_format,
|
| 864 |
+
input_data_format=input_data_format,
|
| 865 |
+
update_bboxes=update_bboxes,
|
| 866 |
+
)
|
| 867 |
+
padded_images.append(padded_image)
|
| 868 |
+
padded_annotations.append(padded_annotation)
|
| 869 |
+
|
| 870 |
+
data = {"pixel_values": padded_images}
|
| 871 |
+
|
| 872 |
+
if return_pixel_mask:
|
| 873 |
+
masks = [
|
| 874 |
+
make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format)
|
| 875 |
+
for image in images
|
| 876 |
+
]
|
| 877 |
+
data["pixel_mask"] = masks
|
| 878 |
+
|
| 879 |
+
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
|
| 880 |
+
|
| 881 |
+
if annotations is not None:
|
| 882 |
+
encoded_inputs["labels"] = [
|
| 883 |
+
BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
|
| 884 |
+
]
|
| 885 |
+
|
| 886 |
+
return encoded_inputs
|
| 887 |
+
|
| 888 |
+
def preprocess(
|
| 889 |
+
self,
|
| 890 |
+
images: ImageInput,
|
| 891 |
+
annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
|
| 892 |
+
return_segmentation_masks: bool = None,
|
| 893 |
+
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
| 894 |
+
do_resize: Optional[bool] = None,
|
| 895 |
+
size: Optional[Dict[str, int]] = None,
|
| 896 |
+
resample=None, # PILImageResampling
|
| 897 |
+
do_rescale: Optional[bool] = None,
|
| 898 |
+
rescale_factor: Optional[Union[int, float]] = None,
|
| 899 |
+
do_normalize: Optional[bool] = None,
|
| 900 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 901 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 902 |
+
do_convert_annotations: Optional[bool] = None,
|
| 903 |
+
do_pad: Optional[bool] = None,
|
| 904 |
+
format: Optional[Union[str, AnnotationFormat]] = None,
|
| 905 |
+
return_tensors: Optional[Union[TensorType, str]] = None,
|
| 906 |
+
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
|
| 907 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 908 |
+
pad_size: Optional[Dict[str, int]] = None,
|
| 909 |
+
**kwargs,
|
| 910 |
+
) -> BatchFeature:
|
| 911 |
+
"""
|
| 912 |
+
Preprocess an image or a batch of images so that it can be used by the model.
|
| 913 |
+
|
| 914 |
+
Args:
|
| 915 |
+
images (`ImageInput`):
|
| 916 |
+
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
|
| 917 |
+
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 918 |
+
annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
|
| 919 |
+
List of annotations associated with the image or batch of images. If annotation is for object
|
| 920 |
+
detection, the annotations should be a dictionary with the following keys:
|
| 921 |
+
- "image_id" (`int`): The image id.
|
| 922 |
+
- "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
|
| 923 |
+
dictionary. An image can have no annotations, in which case the list should be empty.
|
| 924 |
+
If annotation is for segmentation, the annotations should be a dictionary with the following keys:
|
| 925 |
+
- "image_id" (`int`): The image id.
|
| 926 |
+
- "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
|
| 927 |
+
An image can have no segments, in which case the list should be empty.
|
| 928 |
+
- "file_name" (`str`): The file name of the image.
|
| 929 |
+
return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
|
| 930 |
+
Whether to return segmentation masks.
|
| 931 |
+
masks_path (`str` or `pathlib.Path`, *optional*):
|
| 932 |
+
Path to the directory containing the segmentation masks.
|
| 933 |
+
do_resize (`bool`, *optional*, defaults to self.do_resize):
|
| 934 |
+
Whether to resize the image.
|
| 935 |
+
size (`Dict[str, int]`, *optional*, defaults to self.size):
|
| 936 |
+
Size of the image's `(height, width)` dimensions after resizing. Available options are:
|
| 937 |
+
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
|
| 938 |
+
Do NOT keep the aspect ratio.
|
| 939 |
+
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
|
| 940 |
+
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
|
| 941 |
+
less or equal to `longest_edge`.
|
| 942 |
+
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
|
| 943 |
+
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
|
| 944 |
+
`max_width`.
|
| 945 |
+
resample (`PILImageResampling`, *optional*, defaults to self.resample):
|
| 946 |
+
Resampling filter to use when resizing the image.
|
| 947 |
+
do_rescale (`bool`, *optional*, defaults to self.do_rescale):
|
| 948 |
+
Whether to rescale the image.
|
| 949 |
+
rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
|
| 950 |
+
Rescale factor to use when rescaling the image.
|
| 951 |
+
do_normalize (`bool`, *optional*, defaults to self.do_normalize):
|
| 952 |
+
Whether to normalize the image.
|
| 953 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
|
| 954 |
+
Mean to use when normalizing the image.
|
| 955 |
+
image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
|
| 956 |
+
Standard deviation to use when normalizing the image.
|
| 957 |
+
do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
|
| 958 |
+
Whether to convert the annotations to the format expected by the model. Converts the bounding
|
| 959 |
+
boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
|
| 960 |
+
and in relative coordinates.
|
| 961 |
+
do_pad (`bool`, *optional*, defaults to self.do_pad):
|
| 962 |
+
Whether to pad the image. If `True`, padding will be applied to the bottom and right of
|
| 963 |
+
the image with zeros. If `pad_size` is provided, the image will be padded to the specified
|
| 964 |
+
dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.
|
| 965 |
+
format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
|
| 966 |
+
Format of the annotations.
|
| 967 |
+
return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
|
| 968 |
+
Type of tensors to return. If `None`, will return the list of images.
|
| 969 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 970 |
+
The channel dimension format for the output image. Can be one of:
|
| 971 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 972 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 973 |
+
- Unset: Use the channel dimension format of the input image.
|
| 974 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 975 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 976 |
+
from the input image. Can be one of:
|
| 977 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 978 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 979 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 980 |
+
pad_size (`Dict[str, int]`, *optional*):
|
| 981 |
+
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
|
| 982 |
+
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
|
| 983 |
+
height and width in the batch.
|
| 984 |
+
"""
|
| 985 |
+
if "pad_and_return_pixel_mask" in kwargs:
|
| 986 |
+
logger.warning_once(
|
| 987 |
+
"The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
|
| 988 |
+
"use `do_pad` instead.",
|
| 989 |
+
)
|
| 990 |
+
do_pad = kwargs.pop("pad_and_return_pixel_mask")
|
| 991 |
+
|
| 992 |
+
do_resize = self.do_resize if do_resize is None else do_resize
|
| 993 |
+
size = self.size if size is None else size
|
| 994 |
+
size = get_size_dict(size=size, default_to_square=False)
|
| 995 |
+
resample = self.resample if resample is None else resample
|
| 996 |
+
do_rescale = self.do_rescale if do_rescale is None else do_rescale
|
| 997 |
+
rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
|
| 998 |
+
do_normalize = self.do_normalize if do_normalize is None else do_normalize
|
| 999 |
+
image_mean = self.image_mean if image_mean is None else image_mean
|
| 1000 |
+
image_std = self.image_std if image_std is None else image_std
|
| 1001 |
+
do_convert_annotations = (
|
| 1002 |
+
self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
|
| 1003 |
+
)
|
| 1004 |
+
do_pad = self.do_pad if do_pad is None else do_pad
|
| 1005 |
+
pad_size = self.pad_size if pad_size is None else pad_size
|
| 1006 |
+
format = self.format if format is None else format
|
| 1007 |
+
|
| 1008 |
+
# Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
|
| 1009 |
+
|
| 1010 |
+
validate_preprocess_arguments(
|
| 1011 |
+
do_rescale=do_rescale,
|
| 1012 |
+
rescale_factor=rescale_factor,
|
| 1013 |
+
do_normalize=do_normalize,
|
| 1014 |
+
image_mean=image_mean,
|
| 1015 |
+
image_std=image_std,
|
| 1016 |
+
do_resize=do_resize,
|
| 1017 |
+
size=size,
|
| 1018 |
+
resample=resample,
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
if not is_batched(images):
|
| 1022 |
+
images = [images]
|
| 1023 |
+
annotations = [annotations] if annotations is not None else None
|
| 1024 |
+
|
| 1025 |
+
if not valid_images(images):
|
| 1026 |
+
raise ValueError(
|
| 1027 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 1028 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 1029 |
+
)
|
| 1030 |
+
if annotations is not None and len(images) != len(annotations):
|
| 1031 |
+
raise ValueError(
|
| 1032 |
+
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
|
| 1033 |
+
)
|
| 1034 |
+
|
| 1035 |
+
format = AnnotationFormat(format)
|
| 1036 |
+
if annotations is not None:
|
| 1037 |
+
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
|
| 1038 |
+
|
| 1039 |
+
if (
|
| 1040 |
+
masks_path is not None
|
| 1041 |
+
and format == AnnotationFormat.COCO_PANOPTIC
|
| 1042 |
+
and not isinstance(masks_path, (pathlib.Path, str))
|
| 1043 |
+
):
|
| 1044 |
+
raise ValueError(
|
| 1045 |
+
"The path to the directory containing the mask PNG files should be provided as a"
|
| 1046 |
+
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
|
| 1047 |
+
)
|
| 1048 |
+
|
| 1049 |
+
# All transformations expect numpy arrays
|
| 1050 |
+
images = [to_numpy_array(image) for image in images]
|
| 1051 |
+
|
| 1052 |
+
if do_rescale and is_scaled_image(images[0]):
|
| 1053 |
+
logger.warning_once(
|
| 1054 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
| 1055 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
if input_data_format is None:
|
| 1059 |
+
# We assume that all images have the same channel dimension format.
|
| 1060 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
| 1061 |
+
|
| 1062 |
+
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
|
| 1063 |
+
if annotations is not None:
|
| 1064 |
+
prepared_images = []
|
| 1065 |
+
prepared_annotations = []
|
| 1066 |
+
for image, target in zip(images, annotations):
|
| 1067 |
+
target = self.prepare_annotation(
|
| 1068 |
+
image,
|
| 1069 |
+
target,
|
| 1070 |
+
format,
|
| 1071 |
+
return_segmentation_masks=return_segmentation_masks,
|
| 1072 |
+
masks_path=masks_path,
|
| 1073 |
+
input_data_format=input_data_format,
|
| 1074 |
+
)
|
| 1075 |
+
prepared_images.append(image)
|
| 1076 |
+
prepared_annotations.append(target)
|
| 1077 |
+
images = prepared_images
|
| 1078 |
+
annotations = prepared_annotations
|
| 1079 |
+
del prepared_images, prepared_annotations
|
| 1080 |
+
|
| 1081 |
+
# transformations
|
| 1082 |
+
if do_resize:
|
| 1083 |
+
if annotations is not None:
|
| 1084 |
+
resized_images, resized_annotations = [], []
|
| 1085 |
+
for image, target in zip(images, annotations):
|
| 1086 |
+
orig_size = get_image_size(image, input_data_format)
|
| 1087 |
+
resized_image = self.resize(
|
| 1088 |
+
image, size=size, resample=resample, input_data_format=input_data_format
|
| 1089 |
+
)
|
| 1090 |
+
resized_annotation = self.resize_annotation(
|
| 1091 |
+
target, orig_size, get_image_size(resized_image, input_data_format)
|
| 1092 |
+
)
|
| 1093 |
+
resized_images.append(resized_image)
|
| 1094 |
+
resized_annotations.append(resized_annotation)
|
| 1095 |
+
images = resized_images
|
| 1096 |
+
annotations = resized_annotations
|
| 1097 |
+
del resized_images, resized_annotations
|
| 1098 |
+
else:
|
| 1099 |
+
images = [
|
| 1100 |
+
self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
|
| 1101 |
+
for image in images
|
| 1102 |
+
]
|
| 1103 |
+
|
| 1104 |
+
if do_rescale:
|
| 1105 |
+
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
|
| 1106 |
+
|
| 1107 |
+
if do_normalize:
|
| 1108 |
+
images = [
|
| 1109 |
+
self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
|
| 1110 |
+
]
|
| 1111 |
+
|
| 1112 |
+
if do_convert_annotations and annotations is not None:
|
| 1113 |
+
annotations = [
|
| 1114 |
+
self.normalize_annotation(annotation, get_image_size(image, input_data_format))
|
| 1115 |
+
for annotation, image in zip(annotations, images)
|
| 1116 |
+
]
|
| 1117 |
+
|
| 1118 |
+
if do_pad:
|
| 1119 |
+
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
|
| 1120 |
+
encoded_inputs = self.pad(
|
| 1121 |
+
images,
|
| 1122 |
+
annotations=annotations,
|
| 1123 |
+
return_pixel_mask=True,
|
| 1124 |
+
data_format=data_format,
|
| 1125 |
+
input_data_format=input_data_format,
|
| 1126 |
+
return_tensors=return_tensors,
|
| 1127 |
+
update_bboxes=do_convert_annotations,
|
| 1128 |
+
pad_size=pad_size,
|
| 1129 |
+
)
|
| 1130 |
+
else:
|
| 1131 |
+
images = [
|
| 1132 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
| 1133 |
+
for image in images
|
| 1134 |
+
]
|
| 1135 |
+
encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
|
| 1136 |
+
if annotations is not None:
|
| 1137 |
+
encoded_inputs["labels"] = [
|
| 1138 |
+
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
|
| 1139 |
+
]
|
| 1140 |
+
|
| 1141 |
+
return encoded_inputs
|
| 1142 |
+
|
| 1143 |
+
def post_process_object_detection(
|
| 1144 |
+
self,
|
| 1145 |
+
outputs,
|
| 1146 |
+
threshold: float = 0.5,
|
| 1147 |
+
target_sizes: Union[TensorType, List[Tuple]] = None,
|
| 1148 |
+
nms_threshold: float = 0.7,
|
| 1149 |
+
):
|
| 1150 |
+
"""
|
| 1151 |
+
Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
|
| 1152 |
+
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
|
| 1153 |
+
|
| 1154 |
+
Args:
|
| 1155 |
+
outputs ([`DetrObjectDetectionOutput`]):
|
| 1156 |
+
Raw outputs of the model.
|
| 1157 |
+
threshold (`float`, *optional*, defaults to 0.5):
|
| 1158 |
+
Score threshold to keep object detection predictions.
|
| 1159 |
+
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
|
| 1160 |
+
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
|
| 1161 |
+
(height, width) of each image in the batch. If left to None, predictions will not be resized.
|
| 1162 |
+
nms_threshold (`float`, *optional*, defaults to 0.7):
|
| 1163 |
+
NMS threshold.
|
| 1164 |
+
|
| 1165 |
+
Returns:
|
| 1166 |
+
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
|
| 1167 |
+
in the batch as predicted by the model.
|
| 1168 |
+
"""
|
| 1169 |
+
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
|
| 1170 |
+
batch_size, num_queries, num_labels = out_logits.shape
|
| 1171 |
+
|
| 1172 |
+
if target_sizes is not None:
|
| 1173 |
+
if len(out_logits) != len(target_sizes):
|
| 1174 |
+
raise ValueError(
|
| 1175 |
+
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
prob = out_logits.sigmoid()
|
| 1179 |
+
|
| 1180 |
+
all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)
|
| 1181 |
+
all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)
|
| 1182 |
+
all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor")
|
| 1183 |
+
all_labels = all_indexes % out_logits.shape[2]
|
| 1184 |
+
|
| 1185 |
+
boxes = center_to_corners_format(out_bbox)
|
| 1186 |
+
boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))
|
| 1187 |
+
|
| 1188 |
+
# and from relative [0, 1] to absolute [0, height] coordinates
|
| 1189 |
+
if target_sizes is not None:
|
| 1190 |
+
if isinstance(target_sizes, List):
|
| 1191 |
+
img_h = torch.Tensor([i[0] for i in target_sizes])
|
| 1192 |
+
img_w = torch.Tensor([i[1] for i in target_sizes])
|
| 1193 |
+
else:
|
| 1194 |
+
img_h, img_w = target_sizes.unbind(1)
|
| 1195 |
+
|
| 1196 |
+
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
|
| 1197 |
+
boxes = boxes * scale_fct[:, None, :]
|
| 1198 |
+
|
| 1199 |
+
results = []
|
| 1200 |
+
for b in range(batch_size):
|
| 1201 |
+
box = boxes[b]
|
| 1202 |
+
score = all_scores[b]
|
| 1203 |
+
lbls = all_labels[b]
|
| 1204 |
+
|
| 1205 |
+
pre_topk = score.topk(min(10000, num_queries * num_labels)).indices
|
| 1206 |
+
box = box[pre_topk]
|
| 1207 |
+
score = score[pre_topk]
|
| 1208 |
+
lbls = lbls[pre_topk]
|
| 1209 |
+
|
| 1210 |
+
# apply NMS
|
| 1211 |
+
keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]
|
| 1212 |
+
score = score[keep_inds]
|
| 1213 |
+
lbls = lbls[keep_inds]
|
| 1214 |
+
box = box[keep_inds]
|
| 1215 |
+
|
| 1216 |
+
results.append(
|
| 1217 |
+
{
|
| 1218 |
+
"scores": score[score > threshold],
|
| 1219 |
+
"labels": lbls[score > threshold],
|
| 1220 |
+
"boxes": box[score > threshold],
|
| 1221 |
+
}
|
| 1222 |
+
)
|
| 1223 |
+
|
| 1224 |
+
return results
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/deta/modeling_deta.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.23 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc
ADDED
|
Binary file (5.15 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc
ADDED
|
Binary file (29.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
ADDED
|
@@ -0,0 +1,1047 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch ErnieM model."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from typing import List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint
|
| 22 |
+
from torch import nn, tensor
|
| 23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 24 |
+
|
| 25 |
+
from ....activations import ACT2FN
|
| 26 |
+
from ....modeling_outputs import (
|
| 27 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 28 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
| 29 |
+
MultipleChoiceModelOutput,
|
| 30 |
+
QuestionAnsweringModelOutput,
|
| 31 |
+
SequenceClassifierOutput,
|
| 32 |
+
TokenClassifierOutput,
|
| 33 |
+
)
|
| 34 |
+
from ....modeling_utils import PreTrainedModel
|
| 35 |
+
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
| 36 |
+
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 37 |
+
from .configuration_ernie_m import ErnieMConfig
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
logger = logging.get_logger(__name__)
|
| 41 |
+
|
| 42 |
+
_CHECKPOINT_FOR_DOC = "susnato/ernie-m-base_pytorch"
|
| 43 |
+
_CONFIG_FOR_DOC = "ErnieMConfig"
|
| 44 |
+
_TOKENIZER_FOR_DOC = "ErnieMTokenizer"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings
|
| 48 |
+
class ErnieMEmbeddings(nn.Module):
|
| 49 |
+
"""Construct the embeddings from word and position embeddings."""
|
| 50 |
+
|
| 51 |
+
def __init__(self, config):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.hidden_size = config.hidden_size
|
| 54 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
| 55 |
+
self.position_embeddings = nn.Embedding(
|
| 56 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id
|
| 57 |
+
)
|
| 58 |
+
self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
|
| 59 |
+
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
|
| 60 |
+
self.padding_idx = config.pad_token_id
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 65 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 66 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 67 |
+
past_key_values_length: int = 0,
|
| 68 |
+
) -> torch.Tensor:
|
| 69 |
+
if inputs_embeds is None:
|
| 70 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 71 |
+
if position_ids is None:
|
| 72 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 73 |
+
ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
|
| 74 |
+
seq_length = torch.cumsum(ones, dim=1)
|
| 75 |
+
position_ids = seq_length - ones
|
| 76 |
+
|
| 77 |
+
if past_key_values_length > 0:
|
| 78 |
+
position_ids = position_ids + past_key_values_length
|
| 79 |
+
# to mimic paddlenlp implementation
|
| 80 |
+
position_ids += 2
|
| 81 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 82 |
+
embeddings = inputs_embeds + position_embeddings
|
| 83 |
+
embeddings = self.layer_norm(embeddings)
|
| 84 |
+
embeddings = self.dropout(embeddings)
|
| 85 |
+
|
| 86 |
+
return embeddings
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class ErnieMSelfAttention(nn.Module):
|
| 90 |
+
def __init__(self, config, position_embedding_type=None):
|
| 91 |
+
super().__init__()
|
| 92 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 93 |
+
raise ValueError(
|
| 94 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 95 |
+
f"heads ({config.num_attention_heads})"
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
self.num_attention_heads = config.num_attention_heads
|
| 99 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 100 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 101 |
+
|
| 102 |
+
self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
| 103 |
+
self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
| 104 |
+
self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
| 105 |
+
|
| 106 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 107 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
| 108 |
+
config, "position_embedding_type", "absolute"
|
| 109 |
+
)
|
| 110 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 111 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 112 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
| 113 |
+
|
| 114 |
+
self.is_decoder = config.is_decoder
|
| 115 |
+
|
| 116 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
| 117 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 118 |
+
x = x.view(new_x_shape)
|
| 119 |
+
return x.permute(0, 2, 1, 3)
|
| 120 |
+
|
| 121 |
+
def forward(
|
| 122 |
+
self,
|
| 123 |
+
hidden_states: torch.Tensor,
|
| 124 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 125 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 126 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 127 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| 128 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 129 |
+
output_attentions: Optional[bool] = False,
|
| 130 |
+
) -> Tuple[torch.Tensor]:
|
| 131 |
+
mixed_query_layer = self.q_proj(hidden_states)
|
| 132 |
+
|
| 133 |
+
# If this is instantiated as a cross-attention module, the keys
|
| 134 |
+
# and values come from an encoder; the attention mask needs to be
|
| 135 |
+
# such that the encoder's padding tokens are not attended to.
|
| 136 |
+
is_cross_attention = encoder_hidden_states is not None
|
| 137 |
+
|
| 138 |
+
if is_cross_attention and past_key_value is not None:
|
| 139 |
+
# reuse k,v, cross_attentions
|
| 140 |
+
key_layer = past_key_value[0]
|
| 141 |
+
value_layer = past_key_value[1]
|
| 142 |
+
attention_mask = encoder_attention_mask
|
| 143 |
+
elif is_cross_attention:
|
| 144 |
+
key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
|
| 145 |
+
value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
|
| 146 |
+
attention_mask = encoder_attention_mask
|
| 147 |
+
elif past_key_value is not None:
|
| 148 |
+
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
|
| 149 |
+
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
|
| 150 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
| 151 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
| 152 |
+
else:
|
| 153 |
+
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
|
| 154 |
+
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
|
| 155 |
+
|
| 156 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 157 |
+
|
| 158 |
+
use_cache = past_key_value is not None
|
| 159 |
+
if self.is_decoder:
|
| 160 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
| 161 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 162 |
+
# key/value_states (first "if" case)
|
| 163 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
| 164 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 165 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 166 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 167 |
+
past_key_value = (key_layer, value_layer)
|
| 168 |
+
|
| 169 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 170 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 171 |
+
|
| 172 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 173 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
| 174 |
+
if use_cache:
|
| 175 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
| 176 |
+
-1, 1
|
| 177 |
+
)
|
| 178 |
+
else:
|
| 179 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
| 180 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
| 181 |
+
distance = position_ids_l - position_ids_r
|
| 182 |
+
|
| 183 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
| 184 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
| 185 |
+
|
| 186 |
+
if self.position_embedding_type == "relative_key":
|
| 187 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 188 |
+
attention_scores = attention_scores + relative_position_scores
|
| 189 |
+
elif self.position_embedding_type == "relative_key_query":
|
| 190 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 191 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
| 192 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
| 193 |
+
|
| 194 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 195 |
+
if attention_mask is not None:
|
| 196 |
+
# Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)
|
| 197 |
+
attention_scores = attention_scores + attention_mask
|
| 198 |
+
|
| 199 |
+
# Normalize the attention scores to probabilities.
|
| 200 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 201 |
+
|
| 202 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 203 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 204 |
+
attention_probs = self.dropout(attention_probs)
|
| 205 |
+
|
| 206 |
+
# Mask heads if we want to
|
| 207 |
+
if head_mask is not None:
|
| 208 |
+
attention_probs = attention_probs * head_mask
|
| 209 |
+
|
| 210 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 211 |
+
|
| 212 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 213 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 214 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 215 |
+
|
| 216 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 217 |
+
|
| 218 |
+
if self.is_decoder:
|
| 219 |
+
outputs = outputs + (past_key_value,)
|
| 220 |
+
return outputs
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class ErnieMAttention(nn.Module):
|
| 224 |
+
def __init__(self, config, position_embedding_type=None):
|
| 225 |
+
super().__init__()
|
| 226 |
+
self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
|
| 227 |
+
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
|
| 228 |
+
self.pruned_heads = set()
|
| 229 |
+
|
| 230 |
+
def prune_heads(self, heads):
|
| 231 |
+
if len(heads) == 0:
|
| 232 |
+
return
|
| 233 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 234 |
+
heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# Prune linear layers
|
| 238 |
+
self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
|
| 239 |
+
self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
|
| 240 |
+
self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
|
| 241 |
+
self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
|
| 242 |
+
|
| 243 |
+
# Update hyper params and store pruned heads
|
| 244 |
+
self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
|
| 245 |
+
self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
|
| 246 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 247 |
+
|
| 248 |
+
def forward(
|
| 249 |
+
self,
|
| 250 |
+
hidden_states: torch.Tensor,
|
| 251 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 252 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 253 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 254 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| 255 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 256 |
+
output_attentions: Optional[bool] = False,
|
| 257 |
+
) -> Tuple[torch.Tensor]:
|
| 258 |
+
self_outputs = self.self_attn(
|
| 259 |
+
hidden_states,
|
| 260 |
+
attention_mask,
|
| 261 |
+
head_mask,
|
| 262 |
+
encoder_hidden_states,
|
| 263 |
+
encoder_attention_mask,
|
| 264 |
+
past_key_value,
|
| 265 |
+
output_attentions,
|
| 266 |
+
)
|
| 267 |
+
attention_output = self.out_proj(self_outputs[0])
|
| 268 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 269 |
+
return outputs
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class ErnieMEncoderLayer(nn.Module):
|
| 273 |
+
def __init__(self, config):
|
| 274 |
+
super().__init__()
|
| 275 |
+
# to mimic paddlenlp implementation
|
| 276 |
+
dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
|
| 277 |
+
act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
|
| 278 |
+
|
| 279 |
+
self.self_attn = ErnieMAttention(config)
|
| 280 |
+
self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 281 |
+
self.dropout = nn.Dropout(act_dropout)
|
| 282 |
+
self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 283 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 284 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 285 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 286 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 287 |
+
if isinstance(config.hidden_act, str):
|
| 288 |
+
self.activation = ACT2FN[config.hidden_act]
|
| 289 |
+
else:
|
| 290 |
+
self.activation = config.hidden_act
|
| 291 |
+
|
| 292 |
+
def forward(
|
| 293 |
+
self,
|
| 294 |
+
hidden_states: torch.Tensor,
|
| 295 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 296 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 297 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 298 |
+
output_attentions: Optional[bool] = True,
|
| 299 |
+
):
|
| 300 |
+
residual = hidden_states
|
| 301 |
+
if output_attentions:
|
| 302 |
+
hidden_states, attention_opt_weights = self.self_attn(
|
| 303 |
+
hidden_states=hidden_states,
|
| 304 |
+
attention_mask=attention_mask,
|
| 305 |
+
head_mask=head_mask,
|
| 306 |
+
past_key_value=past_key_value,
|
| 307 |
+
output_attentions=output_attentions,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
else:
|
| 311 |
+
hidden_states = self.self_attn(
|
| 312 |
+
hidden_states=hidden_states,
|
| 313 |
+
attention_mask=attention_mask,
|
| 314 |
+
head_mask=head_mask,
|
| 315 |
+
past_key_value=past_key_value,
|
| 316 |
+
output_attentions=output_attentions,
|
| 317 |
+
)
|
| 318 |
+
hidden_states = residual + self.dropout1(hidden_states)
|
| 319 |
+
hidden_states = self.norm1(hidden_states)
|
| 320 |
+
residual = hidden_states
|
| 321 |
+
|
| 322 |
+
hidden_states = self.linear1(hidden_states)
|
| 323 |
+
hidden_states = self.activation(hidden_states)
|
| 324 |
+
hidden_states = self.dropout(hidden_states)
|
| 325 |
+
hidden_states = self.linear2(hidden_states)
|
| 326 |
+
hidden_states = residual + self.dropout2(hidden_states)
|
| 327 |
+
hidden_states = self.norm2(hidden_states)
|
| 328 |
+
|
| 329 |
+
if output_attentions:
|
| 330 |
+
return hidden_states, attention_opt_weights
|
| 331 |
+
else:
|
| 332 |
+
return hidden_states
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
class ErnieMEncoder(nn.Module):
|
| 336 |
+
def __init__(self, config):
|
| 337 |
+
super().__init__()
|
| 338 |
+
self.config = config
|
| 339 |
+
self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 340 |
+
|
| 341 |
+
def forward(
|
| 342 |
+
self,
|
| 343 |
+
input_embeds: torch.Tensor,
|
| 344 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 345 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 346 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 347 |
+
output_attentions: Optional[bool] = False,
|
| 348 |
+
output_hidden_states: Optional[bool] = False,
|
| 349 |
+
return_dict: Optional[bool] = True,
|
| 350 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
|
| 351 |
+
hidden_states = () if output_hidden_states else None
|
| 352 |
+
attentions = () if output_attentions else None
|
| 353 |
+
|
| 354 |
+
output = input_embeds
|
| 355 |
+
if output_hidden_states:
|
| 356 |
+
hidden_states = hidden_states + (output,)
|
| 357 |
+
for i, layer in enumerate(self.layers):
|
| 358 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 359 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
| 360 |
+
|
| 361 |
+
output, opt_attn_weights = layer(
|
| 362 |
+
hidden_states=output,
|
| 363 |
+
attention_mask=attention_mask,
|
| 364 |
+
head_mask=layer_head_mask,
|
| 365 |
+
past_key_value=past_key_value,
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
if output_hidden_states:
|
| 369 |
+
hidden_states = hidden_states + (output,)
|
| 370 |
+
if output_attentions:
|
| 371 |
+
attentions = attentions + (opt_attn_weights,)
|
| 372 |
+
|
| 373 |
+
last_hidden_state = output
|
| 374 |
+
if not return_dict:
|
| 375 |
+
return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)
|
| 376 |
+
|
| 377 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 378 |
+
last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
class ErnieMPooler(nn.Module):
|
| 383 |
+
def __init__(self, config):
|
| 384 |
+
super().__init__()
|
| 385 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 386 |
+
self.activation = nn.Tanh()
|
| 387 |
+
|
| 388 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 389 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 390 |
+
# to the first token.
|
| 391 |
+
first_token_tensor = hidden_states[:, 0]
|
| 392 |
+
pooled_output = self.dense(first_token_tensor)
|
| 393 |
+
pooled_output = self.activation(pooled_output)
|
| 394 |
+
return pooled_output
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class ErnieMPreTrainedModel(PreTrainedModel):
|
| 398 |
+
"""
|
| 399 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 400 |
+
models.
|
| 401 |
+
"""
|
| 402 |
+
|
| 403 |
+
config_class = ErnieMConfig
|
| 404 |
+
base_model_prefix = "ernie_m"
|
| 405 |
+
|
| 406 |
+
def _init_weights(self, module):
|
| 407 |
+
"""Initialize the weights"""
|
| 408 |
+
if isinstance(module, nn.Linear):
|
| 409 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 410 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 411 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 412 |
+
if module.bias is not None:
|
| 413 |
+
module.bias.data.zero_()
|
| 414 |
+
elif isinstance(module, nn.Embedding):
|
| 415 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 416 |
+
if module.padding_idx is not None:
|
| 417 |
+
module.weight.data[module.padding_idx].zero_()
|
| 418 |
+
elif isinstance(module, nn.LayerNorm):
|
| 419 |
+
module.bias.data.zero_()
|
| 420 |
+
module.weight.data.fill_(1.0)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
ERNIE_M_START_DOCSTRING = r"""
|
| 424 |
+
|
| 425 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 426 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 427 |
+
etc.)
|
| 428 |
+
|
| 429 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 430 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 431 |
+
behavior.
|
| 432 |
+
|
| 433 |
+
Parameters:
|
| 434 |
+
config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.
|
| 435 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 436 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
ERNIE_M_INPUTS_DOCSTRING = r"""
|
| 440 |
+
Args:
|
| 441 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
| 442 |
+
Indices of input sequence tokens in the vocabulary.
|
| 443 |
+
|
| 444 |
+
Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 445 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 446 |
+
|
| 447 |
+
[What are input IDs?](../glossary#input-ids)
|
| 448 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
| 449 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 450 |
+
|
| 451 |
+
- 1 for tokens that are **not masked**,
|
| 452 |
+
- 0 for tokens that are **masked**.
|
| 453 |
+
|
| 454 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 455 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 456 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 457 |
+
config.max_position_embeddings - 1]`.
|
| 458 |
+
|
| 459 |
+
[What are position IDs?](../glossary#position-ids)
|
| 460 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 461 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 462 |
+
|
| 463 |
+
- 1 indicates the head is **not masked**,
|
| 464 |
+
- 0 indicates the head is **masked**.
|
| 465 |
+
|
| 466 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
| 467 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 468 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
| 469 |
+
model's internal embedding lookup matrix.
|
| 470 |
+
output_attentions (`bool`, *optional*):
|
| 471 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 472 |
+
tensors for more detail.
|
| 473 |
+
output_hidden_states (`bool`, *optional*):
|
| 474 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 475 |
+
more detail.
|
| 476 |
+
return_dict (`bool`, *optional*):
|
| 477 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 478 |
+
"""
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
@add_start_docstrings(
|
| 482 |
+
"The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.",
|
| 483 |
+
ERNIE_M_START_DOCSTRING,
|
| 484 |
+
)
|
| 485 |
+
class ErnieMModel(ErnieMPreTrainedModel):
|
| 486 |
+
def __init__(self, config, add_pooling_layer=True):
|
| 487 |
+
super(ErnieMModel, self).__init__(config)
|
| 488 |
+
self.initializer_range = config.initializer_range
|
| 489 |
+
self.embeddings = ErnieMEmbeddings(config)
|
| 490 |
+
self.encoder = ErnieMEncoder(config)
|
| 491 |
+
self.pooler = ErnieMPooler(config) if add_pooling_layer else None
|
| 492 |
+
self.post_init()
|
| 493 |
+
|
| 494 |
+
def get_input_embeddings(self):
|
| 495 |
+
return self.embeddings.word_embeddings
|
| 496 |
+
|
| 497 |
+
def set_input_embeddings(self, value):
|
| 498 |
+
self.embeddings.word_embeddings = value
|
| 499 |
+
|
| 500 |
+
def _prune_heads(self, heads_to_prune):
|
| 501 |
+
"""
|
| 502 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 503 |
+
class PreTrainedModel
|
| 504 |
+
"""
|
| 505 |
+
for layer, heads in heads_to_prune.items():
|
| 506 |
+
self.encoder.layers[layer].self_attn.prune_heads(heads)
|
| 507 |
+
|
| 508 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 509 |
+
@add_code_sample_docstrings(
|
| 510 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
| 511 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 512 |
+
output_type=BaseModelOutputWithPastAndCrossAttentions,
|
| 513 |
+
config_class=_CONFIG_FOR_DOC,
|
| 514 |
+
)
|
| 515 |
+
def forward(
|
| 516 |
+
self,
|
| 517 |
+
input_ids: Optional[tensor] = None,
|
| 518 |
+
position_ids: Optional[tensor] = None,
|
| 519 |
+
attention_mask: Optional[tensor] = None,
|
| 520 |
+
head_mask: Optional[tensor] = None,
|
| 521 |
+
inputs_embeds: Optional[tensor] = None,
|
| 522 |
+
past_key_values: Optional[Tuple[Tuple[tensor]]] = None,
|
| 523 |
+
use_cache: Optional[bool] = None,
|
| 524 |
+
output_hidden_states: Optional[bool] = None,
|
| 525 |
+
output_attentions: Optional[bool] = None,
|
| 526 |
+
return_dict: Optional[bool] = None,
|
| 527 |
+
) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
| 528 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 529 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.")
|
| 530 |
+
|
| 531 |
+
# init the default bool value
|
| 532 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 533 |
+
output_hidden_states = (
|
| 534 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 535 |
+
)
|
| 536 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 537 |
+
|
| 538 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 539 |
+
|
| 540 |
+
past_key_values_length = 0
|
| 541 |
+
if past_key_values is not None:
|
| 542 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
| 543 |
+
|
| 544 |
+
# Adapted from paddlenlp.transformers.ernie_m.ErnieMModel
|
| 545 |
+
if attention_mask is None:
|
| 546 |
+
attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
|
| 547 |
+
attention_mask *= torch.finfo(attention_mask.dtype).min
|
| 548 |
+
if past_key_values is not None:
|
| 549 |
+
batch_size = past_key_values[0][0].shape[0]
|
| 550 |
+
past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
|
| 551 |
+
attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
|
| 552 |
+
# For 2D attention_mask from tokenizer
|
| 553 |
+
elif attention_mask.ndim == 2:
|
| 554 |
+
attention_mask = attention_mask.to(torch.float32)
|
| 555 |
+
attention_mask = 1.0 - attention_mask
|
| 556 |
+
attention_mask *= torch.finfo(attention_mask.dtype).min
|
| 557 |
+
|
| 558 |
+
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
|
| 559 |
+
|
| 560 |
+
embedding_output = self.embeddings(
|
| 561 |
+
input_ids=input_ids,
|
| 562 |
+
position_ids=position_ids,
|
| 563 |
+
inputs_embeds=inputs_embeds,
|
| 564 |
+
past_key_values_length=past_key_values_length,
|
| 565 |
+
)
|
| 566 |
+
encoder_outputs = self.encoder(
|
| 567 |
+
embedding_output,
|
| 568 |
+
attention_mask=extended_attention_mask,
|
| 569 |
+
head_mask=head_mask,
|
| 570 |
+
past_key_values=past_key_values,
|
| 571 |
+
output_attentions=output_attentions,
|
| 572 |
+
output_hidden_states=output_hidden_states,
|
| 573 |
+
return_dict=return_dict,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
if not return_dict:
|
| 577 |
+
sequence_output = encoder_outputs[0]
|
| 578 |
+
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 579 |
+
return (sequence_output, pooler_output) + encoder_outputs[1:]
|
| 580 |
+
|
| 581 |
+
sequence_output = encoder_outputs["last_hidden_state"]
|
| 582 |
+
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 583 |
+
hidden_states = None if not output_hidden_states else encoder_outputs["hidden_states"]
|
| 584 |
+
attentions = None if not output_attentions else encoder_outputs["attentions"]
|
| 585 |
+
|
| 586 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
| 587 |
+
last_hidden_state=sequence_output,
|
| 588 |
+
pooler_output=pooler_output,
|
| 589 |
+
hidden_states=hidden_states,
|
| 590 |
+
attentions=attentions,
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
@add_start_docstrings(
|
| 595 |
+
"""ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of
|
| 596 |
+
the pooled output) e.g. for GLUE tasks.""",
|
| 597 |
+
ERNIE_M_START_DOCSTRING,
|
| 598 |
+
)
|
| 599 |
+
class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
|
| 600 |
+
def __init__(self, config):
|
| 601 |
+
super().__init__(config)
|
| 602 |
+
self.num_labels = config.num_labels
|
| 603 |
+
self.config = config
|
| 604 |
+
|
| 605 |
+
self.ernie_m = ErnieMModel(config)
|
| 606 |
+
classifier_dropout = (
|
| 607 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 608 |
+
)
|
| 609 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 610 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 611 |
+
|
| 612 |
+
# Initialize weights and apply final processing
|
| 613 |
+
self.post_init()
|
| 614 |
+
|
| 615 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 616 |
+
@add_code_sample_docstrings(
|
| 617 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
| 618 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 619 |
+
output_type=SequenceClassifierOutput,
|
| 620 |
+
config_class=_CONFIG_FOR_DOC,
|
| 621 |
+
)
|
| 622 |
+
def forward(
|
| 623 |
+
self,
|
| 624 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 625 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 626 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 627 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 628 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 629 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
| 630 |
+
use_cache: Optional[bool] = None,
|
| 631 |
+
output_hidden_states: Optional[bool] = None,
|
| 632 |
+
output_attentions: Optional[bool] = None,
|
| 633 |
+
return_dict: Optional[bool] = True,
|
| 634 |
+
labels: Optional[torch.Tensor] = None,
|
| 635 |
+
) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
|
| 636 |
+
r"""
|
| 637 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 638 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 639 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 640 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 641 |
+
"""
|
| 642 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 643 |
+
|
| 644 |
+
outputs = self.ernie_m(
|
| 645 |
+
input_ids,
|
| 646 |
+
attention_mask=attention_mask,
|
| 647 |
+
position_ids=position_ids,
|
| 648 |
+
head_mask=head_mask,
|
| 649 |
+
inputs_embeds=inputs_embeds,
|
| 650 |
+
past_key_values=past_key_values,
|
| 651 |
+
output_hidden_states=output_hidden_states,
|
| 652 |
+
output_attentions=output_attentions,
|
| 653 |
+
return_dict=return_dict,
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
pooled_output = outputs[1]
|
| 657 |
+
|
| 658 |
+
pooled_output = self.dropout(pooled_output)
|
| 659 |
+
logits = self.classifier(pooled_output)
|
| 660 |
+
|
| 661 |
+
loss = None
|
| 662 |
+
if labels is not None:
|
| 663 |
+
if self.config.problem_type is None:
|
| 664 |
+
if self.num_labels == 1:
|
| 665 |
+
self.config.problem_type = "regression"
|
| 666 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 667 |
+
self.config.problem_type = "single_label_classification"
|
| 668 |
+
else:
|
| 669 |
+
self.config.problem_type = "multi_label_classification"
|
| 670 |
+
|
| 671 |
+
if self.config.problem_type == "regression":
|
| 672 |
+
loss_fct = MSELoss()
|
| 673 |
+
if self.num_labels == 1:
|
| 674 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 675 |
+
else:
|
| 676 |
+
loss = loss_fct(logits, labels)
|
| 677 |
+
elif self.config.problem_type == "single_label_classification":
|
| 678 |
+
loss_fct = CrossEntropyLoss()
|
| 679 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 680 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 681 |
+
loss_fct = BCEWithLogitsLoss()
|
| 682 |
+
loss = loss_fct(logits, labels)
|
| 683 |
+
if not return_dict:
|
| 684 |
+
output = (logits,) + outputs[2:]
|
| 685 |
+
return ((loss,) + output) if loss is not None else output
|
| 686 |
+
|
| 687 |
+
return SequenceClassifierOutput(
|
| 688 |
+
loss=loss,
|
| 689 |
+
logits=logits,
|
| 690 |
+
hidden_states=outputs.hidden_states,
|
| 691 |
+
attentions=outputs.attentions,
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
@add_start_docstrings(
|
| 696 |
+
"""ErnieM Model with a multiple choice classification head on top (a linear layer on top of
|
| 697 |
+
the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
|
| 698 |
+
ERNIE_M_START_DOCSTRING,
|
| 699 |
+
)
|
| 700 |
+
class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
|
| 701 |
+
def __init__(self, config):
|
| 702 |
+
super().__init__(config)
|
| 703 |
+
|
| 704 |
+
self.ernie_m = ErnieMModel(config)
|
| 705 |
+
classifier_dropout = (
|
| 706 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 707 |
+
)
|
| 708 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 709 |
+
self.classifier = nn.Linear(config.hidden_size, 1)
|
| 710 |
+
|
| 711 |
+
# Initialize weights and apply final processing
|
| 712 |
+
self.post_init()
|
| 713 |
+
|
| 714 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
|
| 715 |
+
@add_code_sample_docstrings(
|
| 716 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 717 |
+
output_type=MultipleChoiceModelOutput,
|
| 718 |
+
config_class=_CONFIG_FOR_DOC,
|
| 719 |
+
)
|
| 720 |
+
def forward(
|
| 721 |
+
self,
|
| 722 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 723 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 724 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 725 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 726 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 727 |
+
labels: Optional[torch.Tensor] = None,
|
| 728 |
+
output_attentions: Optional[bool] = None,
|
| 729 |
+
output_hidden_states: Optional[bool] = None,
|
| 730 |
+
return_dict: Optional[bool] = True,
|
| 731 |
+
) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
|
| 732 |
+
r"""
|
| 733 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 734 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
| 735 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
| 736 |
+
`input_ids` above)
|
| 737 |
+
"""
|
| 738 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 739 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
| 740 |
+
|
| 741 |
+
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
| 742 |
+
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
| 743 |
+
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
| 744 |
+
inputs_embeds = (
|
| 745 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
| 746 |
+
if inputs_embeds is not None
|
| 747 |
+
else None
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
outputs = self.ernie_m(
|
| 751 |
+
input_ids,
|
| 752 |
+
attention_mask=attention_mask,
|
| 753 |
+
position_ids=position_ids,
|
| 754 |
+
head_mask=head_mask,
|
| 755 |
+
inputs_embeds=inputs_embeds,
|
| 756 |
+
output_attentions=output_attentions,
|
| 757 |
+
output_hidden_states=output_hidden_states,
|
| 758 |
+
return_dict=return_dict,
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
pooled_output = outputs[1]
|
| 762 |
+
|
| 763 |
+
pooled_output = self.dropout(pooled_output)
|
| 764 |
+
logits = self.classifier(pooled_output)
|
| 765 |
+
reshaped_logits = logits.view(-1, num_choices)
|
| 766 |
+
|
| 767 |
+
loss = None
|
| 768 |
+
if labels is not None:
|
| 769 |
+
loss_fct = CrossEntropyLoss()
|
| 770 |
+
loss = loss_fct(reshaped_logits, labels)
|
| 771 |
+
|
| 772 |
+
if not return_dict:
|
| 773 |
+
output = (reshaped_logits,) + outputs[2:]
|
| 774 |
+
return ((loss,) + output) if loss is not None else output
|
| 775 |
+
|
| 776 |
+
return MultipleChoiceModelOutput(
|
| 777 |
+
loss=loss,
|
| 778 |
+
logits=reshaped_logits,
|
| 779 |
+
hidden_states=outputs.hidden_states,
|
| 780 |
+
attentions=outputs.attentions,
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@add_start_docstrings(
|
| 785 |
+
"""ErnieM Model with a token classification head on top (a linear layer on top of
|
| 786 |
+
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
|
| 787 |
+
ERNIE_M_START_DOCSTRING,
|
| 788 |
+
)
|
| 789 |
+
class ErnieMForTokenClassification(ErnieMPreTrainedModel):
|
| 790 |
+
def __init__(self, config):
|
| 791 |
+
super().__init__(config)
|
| 792 |
+
self.num_labels = config.num_labels
|
| 793 |
+
|
| 794 |
+
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
|
| 795 |
+
classifier_dropout = (
|
| 796 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 797 |
+
)
|
| 798 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 799 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 800 |
+
|
| 801 |
+
# Initialize weights and apply final processing
|
| 802 |
+
self.post_init()
|
| 803 |
+
|
| 804 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 805 |
+
@add_code_sample_docstrings(
|
| 806 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
| 807 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 808 |
+
output_type=TokenClassifierOutput,
|
| 809 |
+
config_class=_CONFIG_FOR_DOC,
|
| 810 |
+
)
|
| 811 |
+
def forward(
|
| 812 |
+
self,
|
| 813 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 814 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 815 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 816 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 817 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 818 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
| 819 |
+
output_hidden_states: Optional[bool] = None,
|
| 820 |
+
output_attentions: Optional[bool] = None,
|
| 821 |
+
return_dict: Optional[bool] = True,
|
| 822 |
+
labels: Optional[torch.Tensor] = None,
|
| 823 |
+
) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
|
| 824 |
+
r"""
|
| 825 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 826 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
| 827 |
+
"""
|
| 828 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 829 |
+
|
| 830 |
+
outputs = self.ernie_m(
|
| 831 |
+
input_ids,
|
| 832 |
+
attention_mask=attention_mask,
|
| 833 |
+
position_ids=position_ids,
|
| 834 |
+
head_mask=head_mask,
|
| 835 |
+
inputs_embeds=inputs_embeds,
|
| 836 |
+
past_key_values=past_key_values,
|
| 837 |
+
output_attentions=output_attentions,
|
| 838 |
+
output_hidden_states=output_hidden_states,
|
| 839 |
+
return_dict=return_dict,
|
| 840 |
+
)
|
| 841 |
+
|
| 842 |
+
sequence_output = outputs[0]
|
| 843 |
+
|
| 844 |
+
sequence_output = self.dropout(sequence_output)
|
| 845 |
+
logits = self.classifier(sequence_output)
|
| 846 |
+
|
| 847 |
+
loss = None
|
| 848 |
+
if labels is not None:
|
| 849 |
+
loss_fct = CrossEntropyLoss()
|
| 850 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 851 |
+
|
| 852 |
+
if not return_dict:
|
| 853 |
+
output = (logits,) + outputs[2:]
|
| 854 |
+
return ((loss,) + output) if loss is not None else output
|
| 855 |
+
|
| 856 |
+
return TokenClassifierOutput(
|
| 857 |
+
loss=loss,
|
| 858 |
+
logits=logits,
|
| 859 |
+
hidden_states=outputs.hidden_states,
|
| 860 |
+
attentions=outputs.attentions,
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
@add_start_docstrings(
|
| 865 |
+
"""ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
| 866 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
|
| 867 |
+
ERNIE_M_START_DOCSTRING,
|
| 868 |
+
)
|
| 869 |
+
class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
|
| 870 |
+
def __init__(self, config):
|
| 871 |
+
super().__init__(config)
|
| 872 |
+
self.num_labels = config.num_labels
|
| 873 |
+
|
| 874 |
+
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
|
| 875 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
| 876 |
+
|
| 877 |
+
# Initialize weights and apply final processing
|
| 878 |
+
self.post_init()
|
| 879 |
+
|
| 880 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 881 |
+
@add_code_sample_docstrings(
|
| 882 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
| 883 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 884 |
+
output_type=QuestionAnsweringModelOutput,
|
| 885 |
+
config_class=_CONFIG_FOR_DOC,
|
| 886 |
+
)
|
| 887 |
+
def forward(
|
| 888 |
+
self,
|
| 889 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 890 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 891 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 892 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 893 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 894 |
+
start_positions: Optional[torch.Tensor] = None,
|
| 895 |
+
end_positions: Optional[torch.Tensor] = None,
|
| 896 |
+
output_attentions: Optional[bool] = None,
|
| 897 |
+
output_hidden_states: Optional[bool] = None,
|
| 898 |
+
return_dict: Optional[bool] = True,
|
| 899 |
+
) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
|
| 900 |
+
r"""
|
| 901 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 902 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 903 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 904 |
+
are not taken into account for computing the loss.
|
| 905 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 906 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 907 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 908 |
+
are not taken into account for computing the loss.
|
| 909 |
+
"""
|
| 910 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 911 |
+
|
| 912 |
+
outputs = self.ernie_m(
|
| 913 |
+
input_ids,
|
| 914 |
+
attention_mask=attention_mask,
|
| 915 |
+
position_ids=position_ids,
|
| 916 |
+
head_mask=head_mask,
|
| 917 |
+
inputs_embeds=inputs_embeds,
|
| 918 |
+
output_attentions=output_attentions,
|
| 919 |
+
output_hidden_states=output_hidden_states,
|
| 920 |
+
return_dict=return_dict,
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
sequence_output = outputs[0]
|
| 924 |
+
|
| 925 |
+
logits = self.qa_outputs(sequence_output)
|
| 926 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 927 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 928 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 929 |
+
|
| 930 |
+
total_loss = None
|
| 931 |
+
if start_positions is not None and end_positions is not None:
|
| 932 |
+
# If we are on multi-GPU, split add a dimension
|
| 933 |
+
if len(start_positions.size()) > 1:
|
| 934 |
+
start_positions = start_positions.squeeze(-1)
|
| 935 |
+
if len(end_positions.size()) > 1:
|
| 936 |
+
end_positions = end_positions.squeeze(-1)
|
| 937 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 938 |
+
ignored_index = start_logits.size(1)
|
| 939 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 940 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 941 |
+
|
| 942 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
| 943 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 944 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 945 |
+
total_loss = (start_loss + end_loss) / 2
|
| 946 |
+
|
| 947 |
+
if not return_dict:
|
| 948 |
+
output = (start_logits, end_logits) + outputs[2:]
|
| 949 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
| 950 |
+
|
| 951 |
+
return QuestionAnsweringModelOutput(
|
| 952 |
+
loss=total_loss,
|
| 953 |
+
start_logits=start_logits,
|
| 954 |
+
end_logits=end_logits,
|
| 955 |
+
hidden_states=outputs.hidden_states,
|
| 956 |
+
attentions=outputs.attentions,
|
| 957 |
+
)
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
@add_start_docstrings(
|
| 961 |
+
"""ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to
|
| 962 |
+
compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""",
|
| 963 |
+
ERNIE_M_START_DOCSTRING,
|
| 964 |
+
)
|
| 965 |
+
class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
|
| 966 |
+
def __init__(self, config):
|
| 967 |
+
super(ErnieMForInformationExtraction, self).__init__(config)
|
| 968 |
+
self.ernie_m = ErnieMModel(config)
|
| 969 |
+
self.linear_start = nn.Linear(config.hidden_size, 1)
|
| 970 |
+
self.linear_end = nn.Linear(config.hidden_size, 1)
|
| 971 |
+
self.sigmoid = nn.Sigmoid()
|
| 972 |
+
self.post_init()
|
| 973 |
+
|
| 974 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
|
| 975 |
+
def forward(
|
| 976 |
+
self,
|
| 977 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 978 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 979 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 980 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 981 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 982 |
+
start_positions: Optional[torch.Tensor] = None,
|
| 983 |
+
end_positions: Optional[torch.Tensor] = None,
|
| 984 |
+
output_attentions: Optional[bool] = None,
|
| 985 |
+
output_hidden_states: Optional[bool] = None,
|
| 986 |
+
return_dict: Optional[bool] = True,
|
| 987 |
+
) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
|
| 988 |
+
r"""
|
| 989 |
+
start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 990 |
+
Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
|
| 991 |
+
not taken into account for computing the loss.
|
| 992 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 993 |
+
Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
|
| 994 |
+
taken into account for computing the loss.
|
| 995 |
+
"""
|
| 996 |
+
|
| 997 |
+
result = self.ernie_m(
|
| 998 |
+
input_ids,
|
| 999 |
+
attention_mask=attention_mask,
|
| 1000 |
+
position_ids=position_ids,
|
| 1001 |
+
head_mask=head_mask,
|
| 1002 |
+
inputs_embeds=inputs_embeds,
|
| 1003 |
+
output_attentions=output_attentions,
|
| 1004 |
+
output_hidden_states=output_hidden_states,
|
| 1005 |
+
return_dict=return_dict,
|
| 1006 |
+
)
|
| 1007 |
+
if return_dict:
|
| 1008 |
+
sequence_output = result.last_hidden_state
|
| 1009 |
+
elif not return_dict:
|
| 1010 |
+
sequence_output = result[0]
|
| 1011 |
+
|
| 1012 |
+
start_logits = self.linear_start(sequence_output)
|
| 1013 |
+
start_logits = start_logits.squeeze(-1)
|
| 1014 |
+
end_logits = self.linear_end(sequence_output)
|
| 1015 |
+
end_logits = end_logits.squeeze(-1)
|
| 1016 |
+
|
| 1017 |
+
total_loss = None
|
| 1018 |
+
if start_positions is not None and end_positions is not None:
|
| 1019 |
+
# If we are on multi-GPU, split add a dimension
|
| 1020 |
+
if len(start_positions.size()) > 1:
|
| 1021 |
+
start_positions = start_positions.squeeze(-1)
|
| 1022 |
+
if len(end_positions.size()) > 1:
|
| 1023 |
+
end_positions = end_positions.squeeze(-1)
|
| 1024 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 1025 |
+
ignored_index = start_logits.size(1)
|
| 1026 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 1027 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 1028 |
+
|
| 1029 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1030 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 1031 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 1032 |
+
total_loss = (start_loss + end_loss) / 2
|
| 1033 |
+
|
| 1034 |
+
if not return_dict:
|
| 1035 |
+
return tuple(
|
| 1036 |
+
i
|
| 1037 |
+
for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]
|
| 1038 |
+
if i is not None
|
| 1039 |
+
)
|
| 1040 |
+
|
| 1041 |
+
return QuestionAnsweringModelOutput(
|
| 1042 |
+
loss=total_loss,
|
| 1043 |
+
start_logits=start_logits,
|
| 1044 |
+
end_logits=end_logits,
|
| 1045 |
+
hidden_states=result.hidden_states,
|
| 1046 |
+
attentions=result.attentions,
|
| 1047 |
+
)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for Ernie-M."""
|
| 16 |
+
|
| 17 |
+
import io
|
| 18 |
+
import os
|
| 19 |
+
import unicodedata
|
| 20 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 21 |
+
|
| 22 |
+
import sentencepiece as spm
|
| 23 |
+
|
| 24 |
+
from ....tokenization_utils import PreTrainedTokenizer
|
| 25 |
+
from ....utils import logging
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
SPIECE_UNDERLINE = "▁"
|
| 31 |
+
|
| 32 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
|
| 33 |
+
|
| 34 |
+
RESOURCE_FILES_NAMES = {
|
| 35 |
+
"sentencepiece_model_file": "sentencepiece.bpe.model",
|
| 36 |
+
"vocab_file": "vocab.txt",
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
|
| 41 |
+
class ErnieMTokenizer(PreTrainedTokenizer):
|
| 42 |
+
r"""
|
| 43 |
+
Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
sentencepiece_model_file (`str`):
|
| 47 |
+
The file path of sentencepiece model.
|
| 48 |
+
vocab_file (`str`, *optional*):
|
| 49 |
+
The file path of the vocabulary.
|
| 50 |
+
do_lower_case (`str`, *optional*, defaults to `True`):
|
| 51 |
+
Whether or not to lowercase the input when tokenizing.
|
| 52 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
| 53 |
+
A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
|
| 54 |
+
`unk_token` inorder to be converted to an ID.
|
| 55 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
| 56 |
+
A special token separating two different sentences in the same input.
|
| 57 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
| 58 |
+
A special token used to make arrays of tokens the same size for batching purposes.
|
| 59 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
| 60 |
+
A special token used for sequence classification. It is the last token of the sequence when built with
|
| 61 |
+
special tokens.
|
| 62 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
| 63 |
+
A special token representing a masked token. This is the token used in the masked language modeling task
|
| 64 |
+
which the model tries to predict the original unmasked ones.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
# Ernie-M model doesn't have token_type embedding.
|
| 68 |
+
model_input_names: List[str] = ["input_ids"]
|
| 69 |
+
|
| 70 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 71 |
+
resource_files_names = RESOURCE_FILES_NAMES
|
| 72 |
+
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
sentencepiece_model_ckpt,
|
| 76 |
+
vocab_file=None,
|
| 77 |
+
do_lower_case=False,
|
| 78 |
+
encoding="utf8",
|
| 79 |
+
unk_token="[UNK]",
|
| 80 |
+
sep_token="[SEP]",
|
| 81 |
+
pad_token="[PAD]",
|
| 82 |
+
cls_token="[CLS]",
|
| 83 |
+
mask_token="[MASK]",
|
| 84 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
| 85 |
+
**kwargs,
|
| 86 |
+
) -> None:
|
| 87 |
+
# Mask token behave like a normal word, i.e. include the space before it and
|
| 88 |
+
# is included in the raw text, there should be a match in a non-normalized sentence.
|
| 89 |
+
|
| 90 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
| 91 |
+
|
| 92 |
+
self.do_lower_case = do_lower_case
|
| 93 |
+
self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
|
| 94 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
| 95 |
+
self.sp_model.Load(sentencepiece_model_ckpt)
|
| 96 |
+
|
| 97 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
| 98 |
+
if vocab_file is not None:
|
| 99 |
+
self.vocab = self.load_vocab(filepath=vocab_file)
|
| 100 |
+
else:
|
| 101 |
+
self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
|
| 102 |
+
self.reverse_vocab = {v: k for k, v in self.vocab.items()}
|
| 103 |
+
|
| 104 |
+
super().__init__(
|
| 105 |
+
do_lower_case=do_lower_case,
|
| 106 |
+
unk_token=unk_token,
|
| 107 |
+
sep_token=sep_token,
|
| 108 |
+
pad_token=pad_token,
|
| 109 |
+
cls_token=cls_token,
|
| 110 |
+
mask_token=mask_token,
|
| 111 |
+
vocab_file=vocab_file,
|
| 112 |
+
encoding=encoding,
|
| 113 |
+
sp_model_kwargs=self.sp_model_kwargs,
|
| 114 |
+
**kwargs,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def get_offset_mapping(self, text):
|
| 118 |
+
if text is None:
|
| 119 |
+
return None
|
| 120 |
+
|
| 121 |
+
split_tokens = self.tokenize(text)
|
| 122 |
+
normalized_text, char_mapping = "", []
|
| 123 |
+
|
| 124 |
+
for i, ch in enumerate(text):
|
| 125 |
+
if ch in self.SP_CHAR_MAPPING:
|
| 126 |
+
ch = self.SP_CHAR_MAPPING.get(ch)
|
| 127 |
+
else:
|
| 128 |
+
ch = unicodedata.normalize("NFKC", ch)
|
| 129 |
+
if self.is_whitespace(ch):
|
| 130 |
+
continue
|
| 131 |
+
normalized_text += ch
|
| 132 |
+
char_mapping.extend([i] * len(ch))
|
| 133 |
+
|
| 134 |
+
text, token_mapping, offset = normalized_text, [], 0
|
| 135 |
+
|
| 136 |
+
if self.do_lower_case:
|
| 137 |
+
text = text.lower()
|
| 138 |
+
|
| 139 |
+
for token in split_tokens:
|
| 140 |
+
if token[:1] == "▁":
|
| 141 |
+
token = token[1:]
|
| 142 |
+
start = text[offset:].index(token) + offset
|
| 143 |
+
end = start + len(token)
|
| 144 |
+
|
| 145 |
+
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
|
| 146 |
+
offset = end
|
| 147 |
+
return token_mapping
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def vocab_size(self):
|
| 151 |
+
return len(self.vocab)
|
| 152 |
+
|
| 153 |
+
def get_vocab(self):
|
| 154 |
+
return dict(self.vocab, **self.added_tokens_encoder)
|
| 155 |
+
|
| 156 |
+
def __getstate__(self):
|
| 157 |
+
state = self.__dict__.copy()
|
| 158 |
+
state["sp_model"] = None
|
| 159 |
+
return state
|
| 160 |
+
|
| 161 |
+
def __setstate__(self, d):
|
| 162 |
+
self.__dict__ = d
|
| 163 |
+
|
| 164 |
+
# for backward compatibility
|
| 165 |
+
if not hasattr(self, "sp_model_kwargs"):
|
| 166 |
+
self.sp_model_kwargs = {}
|
| 167 |
+
|
| 168 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
| 169 |
+
self.sp_model.Load(self.sentencepiece_model_ckpt)
|
| 170 |
+
|
| 171 |
+
def clean_text(self, text):
|
| 172 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
| 173 |
+
return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
|
| 174 |
+
|
| 175 |
+
def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
|
| 176 |
+
"""Tokenize a string."""
|
| 177 |
+
|
| 178 |
+
if self.sp_model_kwargs.get("enable_sampling") is True:
|
| 179 |
+
enable_sampling = True
|
| 180 |
+
if self.sp_model_kwargs.get("alpha") is not None:
|
| 181 |
+
alpha = self.sp_model_kwargs.get("alpha")
|
| 182 |
+
if self.sp_model_kwargs.get("nbest_size") is not None:
|
| 183 |
+
nbest_size = self.sp_model_kwargs.get("nbest_size")
|
| 184 |
+
|
| 185 |
+
if not enable_sampling:
|
| 186 |
+
pieces = self.sp_model.EncodeAsPieces(text)
|
| 187 |
+
else:
|
| 188 |
+
pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
|
| 189 |
+
new_pieces = []
|
| 190 |
+
for pi, piece in enumerate(pieces):
|
| 191 |
+
if piece == SPIECE_UNDERLINE:
|
| 192 |
+
if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
|
| 193 |
+
new_pieces.append(SPIECE_UNDERLINE)
|
| 194 |
+
continue
|
| 195 |
+
else:
|
| 196 |
+
continue
|
| 197 |
+
lst_i = 0
|
| 198 |
+
for i, chunk in enumerate(piece):
|
| 199 |
+
if chunk == SPIECE_UNDERLINE:
|
| 200 |
+
continue
|
| 201 |
+
if self.is_ch_char(chunk) or self.is_punct(chunk):
|
| 202 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
| 203 |
+
new_pieces.append(piece[lst_i:i])
|
| 204 |
+
new_pieces.append(chunk)
|
| 205 |
+
lst_i = i + 1
|
| 206 |
+
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
|
| 207 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
| 208 |
+
new_pieces.append(piece[lst_i:i])
|
| 209 |
+
lst_i = i
|
| 210 |
+
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
|
| 211 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
| 212 |
+
new_pieces.append(piece[lst_i:i])
|
| 213 |
+
lst_i = i
|
| 214 |
+
if len(piece) > lst_i:
|
| 215 |
+
new_pieces.append(piece[lst_i:])
|
| 216 |
+
return new_pieces
|
| 217 |
+
|
| 218 |
+
def convert_tokens_to_string(self, tokens):
|
| 219 |
+
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
|
| 220 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
| 221 |
+
return out_string
|
| 222 |
+
|
| 223 |
+
def convert_ids_to_string(self, ids):
|
| 224 |
+
"""
|
| 225 |
+
Converts a sequence of tokens (strings for sub-words) in a single string.
|
| 226 |
+
"""
|
| 227 |
+
tokens = self.convert_ids_to_tokens(ids)
|
| 228 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
| 229 |
+
return out_string
|
| 230 |
+
|
| 231 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
| 232 |
+
def _convert_token_to_id(self, token):
|
| 233 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
| 234 |
+
|
| 235 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
| 236 |
+
def _convert_id_to_token(self, index):
|
| 237 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 238 |
+
return self.reverse_vocab.get(index, self.unk_token)
|
| 239 |
+
|
| 240 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
| 241 |
+
r"""
|
| 242 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 243 |
+
adding special tokens. An ErnieM sequence has the following format:
|
| 244 |
+
|
| 245 |
+
- single sequence: `[CLS] X [SEP]`
|
| 246 |
+
- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
token_ids_0 (`List[int]`):
|
| 250 |
+
List of IDs to which the special tokens will be added.
|
| 251 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 252 |
+
Optional second list of IDs for sequence pairs.
|
| 253 |
+
Returns:
|
| 254 |
+
`List[int]`: List of input_id with the appropriate special tokens.
|
| 255 |
+
"""
|
| 256 |
+
if token_ids_1 is None:
|
| 257 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
| 258 |
+
_cls = [self.cls_token_id]
|
| 259 |
+
_sep = [self.sep_token_id]
|
| 260 |
+
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
|
| 261 |
+
|
| 262 |
+
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
|
| 263 |
+
r"""
|
| 264 |
+
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
|
| 265 |
+
offset_mapping has the following format:
|
| 266 |
+
|
| 267 |
+
- single sequence: `(0,0) X (0,0)`
|
| 268 |
+
- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
|
| 269 |
+
|
| 270 |
+
Args:
|
| 271 |
+
offset_mapping_ids_0 (`List[tuple]`):
|
| 272 |
+
List of char offsets to which the special tokens will be added.
|
| 273 |
+
offset_mapping_ids_1 (`List[tuple]`, *optional*):
|
| 274 |
+
Optional second list of wordpiece offsets for offset mapping pairs.
|
| 275 |
+
Returns:
|
| 276 |
+
`List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
|
| 277 |
+
"""
|
| 278 |
+
if offset_mapping_1 is None:
|
| 279 |
+
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
|
| 280 |
+
|
| 281 |
+
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
|
| 282 |
+
|
| 283 |
+
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
|
| 284 |
+
r"""
|
| 285 |
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 286 |
+
special tokens using the tokenizer `encode` method.
|
| 287 |
+
|
| 288 |
+
Args:
|
| 289 |
+
token_ids_0 (`List[int]`):
|
| 290 |
+
List of ids of the first sequence.
|
| 291 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 292 |
+
Optional second list of IDs for sequence pairs.
|
| 293 |
+
already_has_special_tokens (`str`, *optional*, defaults to `False`):
|
| 294 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 295 |
+
Returns:
|
| 296 |
+
`List[int]`:
|
| 297 |
+
The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
if already_has_special_tokens:
|
| 301 |
+
if token_ids_1 is not None:
|
| 302 |
+
raise ValueError(
|
| 303 |
+
"You should not supply a second sequence if the provided sequence of "
|
| 304 |
+
"ids is already formatted with special tokens for the model."
|
| 305 |
+
)
|
| 306 |
+
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
|
| 307 |
+
|
| 308 |
+
if token_ids_1 is not None:
|
| 309 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
| 310 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
| 311 |
+
|
| 312 |
+
def create_token_type_ids_from_sequences(
|
| 313 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 314 |
+
) -> List[int]:
|
| 315 |
+
"""
|
| 316 |
+
Create the token type IDs corresponding to the sequences passed. [What are token type
|
| 317 |
+
IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
|
| 318 |
+
building: those.
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
token_ids_0 (`List[int]`):
|
| 322 |
+
The first tokenized sequence.
|
| 323 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 324 |
+
The second tokenized sequence.
|
| 325 |
+
Returns:
|
| 326 |
+
`List[int]`: The token type ids.
|
| 327 |
+
"""
|
| 328 |
+
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
|
| 329 |
+
if token_ids_1 is None:
|
| 330 |
+
# [CLS] X [SEP]
|
| 331 |
+
return (len(token_ids_0) + 2) * [0]
|
| 332 |
+
|
| 333 |
+
# [CLS] A [SEP] [SEP] B [SEP]
|
| 334 |
+
return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
|
| 335 |
+
|
| 336 |
+
def is_ch_char(self, char):
|
| 337 |
+
"""
|
| 338 |
+
is_ch_char
|
| 339 |
+
"""
|
| 340 |
+
if "\u4e00" <= char <= "\u9fff":
|
| 341 |
+
return True
|
| 342 |
+
return False
|
| 343 |
+
|
| 344 |
+
def is_alpha(self, char):
|
| 345 |
+
"""
|
| 346 |
+
is_alpha
|
| 347 |
+
"""
|
| 348 |
+
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
|
| 349 |
+
return True
|
| 350 |
+
return False
|
| 351 |
+
|
| 352 |
+
def is_punct(self, char):
|
| 353 |
+
"""
|
| 354 |
+
is_punct
|
| 355 |
+
"""
|
| 356 |
+
if char in ",;:.?!~,;:。?!《》【】":
|
| 357 |
+
return True
|
| 358 |
+
return False
|
| 359 |
+
|
| 360 |
+
def is_whitespace(self, char):
|
| 361 |
+
"""
|
| 362 |
+
is whitespace
|
| 363 |
+
"""
|
| 364 |
+
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
| 365 |
+
return True
|
| 366 |
+
if len(char) == 1:
|
| 367 |
+
cat = unicodedata.category(char)
|
| 368 |
+
if cat == "Zs":
|
| 369 |
+
return True
|
| 370 |
+
return False
|
| 371 |
+
|
| 372 |
+
def load_vocab(self, filepath):
|
| 373 |
+
token_to_idx = {}
|
| 374 |
+
with io.open(filepath, "r", encoding="utf-8") as f:
|
| 375 |
+
for index, line in enumerate(f):
|
| 376 |
+
token = line.rstrip("\n")
|
| 377 |
+
token_to_idx[token] = int(index)
|
| 378 |
+
|
| 379 |
+
return token_to_idx
|
| 380 |
+
|
| 381 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 382 |
+
index = 0
|
| 383 |
+
if os.path.isdir(save_directory):
|
| 384 |
+
vocab_file = os.path.join(
|
| 385 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 386 |
+
)
|
| 387 |
+
else:
|
| 388 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
| 389 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
| 390 |
+
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
| 391 |
+
if index != token_index:
|
| 392 |
+
logger.warning(
|
| 393 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
| 394 |
+
" Please check that the vocabulary is not corrupted!"
|
| 395 |
+
)
|
| 396 |
+
index = token_index
|
| 397 |
+
writer.write(token + "\n")
|
| 398 |
+
index += 1
|
| 399 |
+
|
| 400 |
+
tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
|
| 401 |
+
with open(tokenizer_model_file, "wb") as fi:
|
| 402 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
| 403 |
+
fi.write(content_spiece_model)
|
| 404 |
+
|
| 405 |
+
return (vocab_file,)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {
|
| 20 |
+
"configuration_graphormer": ["GraphormerConfig"],
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
if not is_torch_available():
|
| 25 |
+
raise OptionalDependencyNotAvailable()
|
| 26 |
+
except OptionalDependencyNotAvailable:
|
| 27 |
+
pass
|
| 28 |
+
else:
|
| 29 |
+
_import_structure["modeling_graphormer"] = [
|
| 30 |
+
"GraphormerForGraphClassification",
|
| 31 |
+
"GraphormerModel",
|
| 32 |
+
"GraphormerPreTrainedModel",
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if TYPE_CHECKING:
|
| 37 |
+
from .configuration_graphormer import GraphormerConfig
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
if not is_torch_available():
|
| 41 |
+
raise OptionalDependencyNotAvailable()
|
| 42 |
+
except OptionalDependencyNotAvailable:
|
| 43 |
+
pass
|
| 44 |
+
else:
|
| 45 |
+
from .modeling_graphormer import (
|
| 46 |
+
GraphormerForGraphClassification,
|
| 47 |
+
GraphormerModel,
|
| 48 |
+
GraphormerPreTrainedModel,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
else:
|
| 53 |
+
import sys
|
| 54 |
+
|
| 55 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (876 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/collating_graphormer.cpython-310.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc
ADDED
|
Binary file (9.03 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc
ADDED
|
Binary file (25.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/algos_graphormer.pyx
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation and HuggingFace
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
|
| 4 |
+
import cython
|
| 5 |
+
|
| 6 |
+
cimport numpy
|
| 7 |
+
from cython.parallel cimport parallel, prange
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Reduce this number if matrices are too big for large graphs
|
| 13 |
+
UNREACHABLE_NODE_DISTANCE = 510
|
| 14 |
+
|
| 15 |
+
def floyd_warshall(adjacency_matrix):
|
| 16 |
+
"""
|
| 17 |
+
Applies the Floyd-Warshall algorithm to the adjacency matrix, to compute the
|
| 18 |
+
shortest paths distance between all nodes, up to UNREACHABLE_NODE_DISTANCE.
|
| 19 |
+
"""
|
| 20 |
+
(nrows, ncols) = adjacency_matrix.shape
|
| 21 |
+
assert nrows == ncols
|
| 22 |
+
cdef unsigned int n = nrows
|
| 23 |
+
|
| 24 |
+
adj_mat_copy = adjacency_matrix.astype(np.int32, order='C', casting='safe', copy=True)
|
| 25 |
+
assert adj_mat_copy.flags['C_CONTIGUOUS']
|
| 26 |
+
cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] M = adj_mat_copy
|
| 27 |
+
cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] path = -1 * np.ones([n, n], dtype=np.int32)
|
| 28 |
+
|
| 29 |
+
cdef unsigned int i, j, k
|
| 30 |
+
cdef numpy.int32_t M_ij, M_ik, cost_ikkj
|
| 31 |
+
cdef numpy.int32_t* M_ptr = &M[0,0]
|
| 32 |
+
cdef numpy.int32_t* M_i_ptr
|
| 33 |
+
cdef numpy.int32_t* M_k_ptr
|
| 34 |
+
|
| 35 |
+
# set unreachable nodes distance to UNREACHABLE_NODE_DISTANCE
|
| 36 |
+
for i in range(n):
|
| 37 |
+
for j in range(n):
|
| 38 |
+
if i == j:
|
| 39 |
+
M[i][j] = 0
|
| 40 |
+
elif M[i][j] == 0:
|
| 41 |
+
M[i][j] = UNREACHABLE_NODE_DISTANCE
|
| 42 |
+
|
| 43 |
+
# floyed algo
|
| 44 |
+
for k in range(n):
|
| 45 |
+
M_k_ptr = M_ptr + n*k
|
| 46 |
+
for i in range(n):
|
| 47 |
+
M_i_ptr = M_ptr + n*i
|
| 48 |
+
M_ik = M_i_ptr[k]
|
| 49 |
+
for j in range(n):
|
| 50 |
+
cost_ikkj = M_ik + M_k_ptr[j]
|
| 51 |
+
M_ij = M_i_ptr[j]
|
| 52 |
+
if M_ij > cost_ikkj:
|
| 53 |
+
M_i_ptr[j] = cost_ikkj
|
| 54 |
+
path[i][j] = k
|
| 55 |
+
|
| 56 |
+
# set unreachable path to UNREACHABLE_NODE_DISTANCE
|
| 57 |
+
for i in range(n):
|
| 58 |
+
for j in range(n):
|
| 59 |
+
if M[i][j] >= UNREACHABLE_NODE_DISTANCE:
|
| 60 |
+
path[i][j] = UNREACHABLE_NODE_DISTANCE
|
| 61 |
+
M[i][j] = UNREACHABLE_NODE_DISTANCE
|
| 62 |
+
|
| 63 |
+
return M, path
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_all_edges(path, i, j):
|
| 67 |
+
"""
|
| 68 |
+
Recursive function to compute all possible paths between two nodes from the graph adjacency matrix.
|
| 69 |
+
"""
|
| 70 |
+
cdef int k = path[i][j]
|
| 71 |
+
if k == -1:
|
| 72 |
+
return []
|
| 73 |
+
else:
|
| 74 |
+
return get_all_edges(path, i, k) + [k] + get_all_edges(path, k, j)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def gen_edge_input(max_dist, path, edge_feat):
|
| 78 |
+
"""
|
| 79 |
+
Generates the full edge feature and adjacency matrix.
|
| 80 |
+
Shape: num_nodes * num_nodes * max_distance_between_nodes * num_edge_features
|
| 81 |
+
Dim 1 is the input node, dim 2 the output node of the edge, dim 3 the depth of the edge, dim 4 the feature
|
| 82 |
+
"""
|
| 83 |
+
(nrows, ncols) = path.shape
|
| 84 |
+
assert nrows == ncols
|
| 85 |
+
cdef unsigned int n = nrows
|
| 86 |
+
cdef unsigned int max_dist_copy = max_dist
|
| 87 |
+
|
| 88 |
+
path_copy = path.astype(long, order='C', casting='safe', copy=True)
|
| 89 |
+
edge_feat_copy = edge_feat.astype(long, order='C', casting='safe', copy=True)
|
| 90 |
+
assert path_copy.flags['C_CONTIGUOUS']
|
| 91 |
+
assert edge_feat_copy.flags['C_CONTIGUOUS']
|
| 92 |
+
|
| 93 |
+
cdef numpy.ndarray[numpy.int32_t, ndim=4, mode='c'] edge_fea_all = -1 * np.ones([n, n, max_dist_copy, edge_feat.shape[-1]], dtype=np.int32)
|
| 94 |
+
cdef unsigned int i, j, k, num_path, cur
|
| 95 |
+
|
| 96 |
+
for i in range(n):
|
| 97 |
+
for j in range(n):
|
| 98 |
+
if i == j:
|
| 99 |
+
continue
|
| 100 |
+
if path_copy[i][j] == UNREACHABLE_NODE_DISTANCE:
|
| 101 |
+
continue
|
| 102 |
+
path = [i] + get_all_edges(path_copy, i, j) + [j]
|
| 103 |
+
num_path = len(path) - 1
|
| 104 |
+
for k in range(num_path):
|
| 105 |
+
edge_fea_all[i, j, k, :] = edge_feat_copy[path[k], path[k+1], :]
|
| 106 |
+
|
| 107 |
+
return edge_fea_all
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/collating_graphormer.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation and HuggingFace
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
|
| 4 |
+
from typing import Any, Dict, List, Mapping
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from ....utils import is_cython_available, requires_backends
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
if is_cython_available():
|
| 13 |
+
import pyximport
|
| 14 |
+
|
| 15 |
+
pyximport.install(setup_args={"include_dirs": np.get_include()})
|
| 16 |
+
from . import algos_graphormer # noqa E402
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def convert_to_single_emb(x, offset: int = 512):
|
| 20 |
+
feature_num = x.shape[1] if len(x.shape) > 1 else 1
|
| 21 |
+
feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64)
|
| 22 |
+
x = x + feature_offset
|
| 23 |
+
return x
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def preprocess_item(item, keep_features=True):
|
| 27 |
+
requires_backends(preprocess_item, ["cython"])
|
| 28 |
+
|
| 29 |
+
if keep_features and "edge_attr" in item.keys(): # edge_attr
|
| 30 |
+
edge_attr = np.asarray(item["edge_attr"], dtype=np.int64)
|
| 31 |
+
else:
|
| 32 |
+
edge_attr = np.ones((len(item["edge_index"][0]), 1), dtype=np.int64) # same embedding for all
|
| 33 |
+
|
| 34 |
+
if keep_features and "node_feat" in item.keys(): # input_nodes
|
| 35 |
+
node_feature = np.asarray(item["node_feat"], dtype=np.int64)
|
| 36 |
+
else:
|
| 37 |
+
node_feature = np.ones((item["num_nodes"], 1), dtype=np.int64) # same embedding for all
|
| 38 |
+
|
| 39 |
+
edge_index = np.asarray(item["edge_index"], dtype=np.int64)
|
| 40 |
+
|
| 41 |
+
input_nodes = convert_to_single_emb(node_feature) + 1
|
| 42 |
+
num_nodes = item["num_nodes"]
|
| 43 |
+
|
| 44 |
+
if len(edge_attr.shape) == 1:
|
| 45 |
+
edge_attr = edge_attr[:, None]
|
| 46 |
+
attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64)
|
| 47 |
+
attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1
|
| 48 |
+
|
| 49 |
+
# node adj matrix [num_nodes, num_nodes] bool
|
| 50 |
+
adj = np.zeros([num_nodes, num_nodes], dtype=bool)
|
| 51 |
+
adj[edge_index[0], edge_index[1]] = True
|
| 52 |
+
|
| 53 |
+
shortest_path_result, path = algos_graphormer.floyd_warshall(adj)
|
| 54 |
+
max_dist = np.amax(shortest_path_result)
|
| 55 |
+
|
| 56 |
+
input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type)
|
| 57 |
+
attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) # with graph token
|
| 58 |
+
|
| 59 |
+
# combine
|
| 60 |
+
item["input_nodes"] = input_nodes + 1 # we shift all indices by one for padding
|
| 61 |
+
item["attn_bias"] = attn_bias
|
| 62 |
+
item["attn_edge_type"] = attn_edge_type
|
| 63 |
+
item["spatial_pos"] = shortest_path_result.astype(np.int64) + 1 # we shift all indices by one for padding
|
| 64 |
+
item["in_degree"] = np.sum(adj, axis=1).reshape(-1) + 1 # we shift all indices by one for padding
|
| 65 |
+
item["out_degree"] = item["in_degree"] # for undirected graph
|
| 66 |
+
item["input_edges"] = input_edges + 1 # we shift all indices by one for padding
|
| 67 |
+
if "labels" not in item:
|
| 68 |
+
item["labels"] = item["y"]
|
| 69 |
+
|
| 70 |
+
return item
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class GraphormerDataCollator:
|
| 74 |
+
def __init__(self, spatial_pos_max=20, on_the_fly_processing=False):
|
| 75 |
+
if not is_cython_available():
|
| 76 |
+
raise ImportError("Graphormer preprocessing needs Cython (pyximport)")
|
| 77 |
+
|
| 78 |
+
self.spatial_pos_max = spatial_pos_max
|
| 79 |
+
self.on_the_fly_processing = on_the_fly_processing
|
| 80 |
+
|
| 81 |
+
def __call__(self, features: List[dict]) -> Dict[str, Any]:
|
| 82 |
+
if self.on_the_fly_processing:
|
| 83 |
+
features = [preprocess_item(i) for i in features]
|
| 84 |
+
|
| 85 |
+
if not isinstance(features[0], Mapping):
|
| 86 |
+
features = [vars(f) for f in features]
|
| 87 |
+
batch = {}
|
| 88 |
+
|
| 89 |
+
max_node_num = max(len(i["input_nodes"]) for i in features)
|
| 90 |
+
node_feat_size = len(features[0]["input_nodes"][0])
|
| 91 |
+
edge_feat_size = len(features[0]["attn_edge_type"][0][0])
|
| 92 |
+
max_dist = max(len(i["input_edges"][0][0]) for i in features)
|
| 93 |
+
edge_input_size = len(features[0]["input_edges"][0][0][0])
|
| 94 |
+
batch_size = len(features)
|
| 95 |
+
|
| 96 |
+
batch["attn_bias"] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float)
|
| 97 |
+
batch["attn_edge_type"] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long)
|
| 98 |
+
batch["spatial_pos"] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long)
|
| 99 |
+
batch["in_degree"] = torch.zeros(batch_size, max_node_num, dtype=torch.long)
|
| 100 |
+
batch["input_nodes"] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long)
|
| 101 |
+
batch["input_edges"] = torch.zeros(
|
| 102 |
+
batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
for ix, f in enumerate(features):
|
| 106 |
+
for k in ["attn_bias", "attn_edge_type", "spatial_pos", "in_degree", "input_nodes", "input_edges"]:
|
| 107 |
+
f[k] = torch.tensor(f[k])
|
| 108 |
+
|
| 109 |
+
if len(f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max]) > 0:
|
| 110 |
+
f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max] = float("-inf")
|
| 111 |
+
|
| 112 |
+
batch["attn_bias"][ix, : f["attn_bias"].shape[0], : f["attn_bias"].shape[1]] = f["attn_bias"]
|
| 113 |
+
batch["attn_edge_type"][ix, : f["attn_edge_type"].shape[0], : f["attn_edge_type"].shape[1], :] = f[
|
| 114 |
+
"attn_edge_type"
|
| 115 |
+
]
|
| 116 |
+
batch["spatial_pos"][ix, : f["spatial_pos"].shape[0], : f["spatial_pos"].shape[1]] = f["spatial_pos"]
|
| 117 |
+
batch["in_degree"][ix, : f["in_degree"].shape[0]] = f["in_degree"]
|
| 118 |
+
batch["input_nodes"][ix, : f["input_nodes"].shape[0], :] = f["input_nodes"]
|
| 119 |
+
batch["input_edges"][
|
| 120 |
+
ix, : f["input_edges"].shape[0], : f["input_edges"].shape[1], : f["input_edges"].shape[2], :
|
| 121 |
+
] = f["input_edges"]
|
| 122 |
+
|
| 123 |
+
batch["out_degree"] = batch["in_degree"]
|
| 124 |
+
|
| 125 |
+
sample = features[0]["labels"]
|
| 126 |
+
if len(sample) == 1: # one task
|
| 127 |
+
if isinstance(sample[0], float): # regression
|
| 128 |
+
batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
|
| 129 |
+
else: # binary classification
|
| 130 |
+
batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
|
| 131 |
+
else: # multi task classification, left to float to keep the NaNs
|
| 132 |
+
batch["labels"] = torch.from_numpy(np.stack([i["labels"] for i in features], axis=0))
|
| 133 |
+
|
| 134 |
+
return batch
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/configuration_graphormer.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Graphormer model configuration"""
|
| 16 |
+
|
| 17 |
+
from ....configuration_utils import PretrainedConfig
|
| 18 |
+
from ....utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class GraphormerConfig(PretrainedConfig):
|
| 25 |
+
r"""
|
| 26 |
+
This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
|
| 27 |
+
Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
|
| 28 |
+
configuration with the defaults will yield a similar configuration to that of the Graphormer
|
| 29 |
+
[graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
|
| 30 |
+
|
| 31 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 32 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
num_classes (`int`, *optional*, defaults to 1):
|
| 37 |
+
Number of target classes or labels, set to n for binary classification of n tasks.
|
| 38 |
+
num_atoms (`int`, *optional*, defaults to 512*9):
|
| 39 |
+
Number of node types in the graphs.
|
| 40 |
+
num_edges (`int`, *optional*, defaults to 512*3):
|
| 41 |
+
Number of edges types in the graph.
|
| 42 |
+
num_in_degree (`int`, *optional*, defaults to 512):
|
| 43 |
+
Number of in degrees types in the input graphs.
|
| 44 |
+
num_out_degree (`int`, *optional*, defaults to 512):
|
| 45 |
+
Number of out degrees types in the input graphs.
|
| 46 |
+
num_edge_dis (`int`, *optional*, defaults to 128):
|
| 47 |
+
Number of edge dis in the input graphs.
|
| 48 |
+
multi_hop_max_dist (`int`, *optional*, defaults to 20):
|
| 49 |
+
Maximum distance of multi hop edges between two nodes.
|
| 50 |
+
spatial_pos_max (`int`, *optional*, defaults to 1024):
|
| 51 |
+
Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
|
| 52 |
+
collation.
|
| 53 |
+
edge_type (`str`, *optional*, defaults to multihop):
|
| 54 |
+
Type of edge relation chosen.
|
| 55 |
+
max_nodes (`int`, *optional*, defaults to 512):
|
| 56 |
+
Maximum number of nodes which can be parsed for the input graphs.
|
| 57 |
+
share_input_output_embed (`bool`, *optional*, defaults to `False`):
|
| 58 |
+
Shares the embedding layer between encoder and decoder - careful, True is not implemented.
|
| 59 |
+
num_layers (`int`, *optional*, defaults to 12):
|
| 60 |
+
Number of layers.
|
| 61 |
+
embedding_dim (`int`, *optional*, defaults to 768):
|
| 62 |
+
Dimension of the embedding layer in encoder.
|
| 63 |
+
ffn_embedding_dim (`int`, *optional*, defaults to 768):
|
| 64 |
+
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
|
| 65 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 66 |
+
Number of attention heads in the encoder.
|
| 67 |
+
self_attention (`bool`, *optional*, defaults to `True`):
|
| 68 |
+
Model is self attentive (False not implemented).
|
| 69 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 70 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 71 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 72 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
| 73 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 74 |
+
attention_dropout (`float`, *optional*, defaults to 0.1):
|
| 75 |
+
The dropout probability for the attention weights.
|
| 76 |
+
activation_dropout (`float`, *optional*, defaults to 0.1):
|
| 77 |
+
The dropout probability for the activation of the linear transformer layer.
|
| 78 |
+
layerdrop (`float`, *optional*, defaults to 0.0):
|
| 79 |
+
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
| 80 |
+
for more details.
|
| 81 |
+
bias (`bool`, *optional*, defaults to `True`):
|
| 82 |
+
Uses bias in the attention module - unsupported at the moment.
|
| 83 |
+
embed_scale(`float`, *optional*, defaults to None):
|
| 84 |
+
Scaling factor for the node embeddings.
|
| 85 |
+
num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
|
| 86 |
+
Number of transformer layers to freeze.
|
| 87 |
+
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
|
| 88 |
+
Normalize features before encoding the graph.
|
| 89 |
+
pre_layernorm (`bool`, *optional*, defaults to `False`):
|
| 90 |
+
Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
|
| 91 |
+
used.
|
| 92 |
+
apply_graphormer_init (`bool`, *optional*, defaults to `False`):
|
| 93 |
+
Apply a custom graphormer initialisation to the model before training.
|
| 94 |
+
freeze_embeddings (`bool`, *optional*, defaults to `False`):
|
| 95 |
+
Freeze the embedding layer, or train it along the model.
|
| 96 |
+
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
|
| 97 |
+
Apply the layer norm before each encoder block.
|
| 98 |
+
q_noise (`float`, *optional*, defaults to 0.0):
|
| 99 |
+
Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
|
| 100 |
+
more detail, see fairseq's documentation on quant_noise).
|
| 101 |
+
qn_block_size (`int`, *optional*, defaults to 8):
|
| 102 |
+
Size of the blocks for subsequent quantization with iPQ (see q_noise).
|
| 103 |
+
kdim (`int`, *optional*, defaults to None):
|
| 104 |
+
Dimension of the key in the attention, if different from the other values.
|
| 105 |
+
vdim (`int`, *optional*, defaults to None):
|
| 106 |
+
Dimension of the value in the attention, if different from the other values.
|
| 107 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 108 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
| 109 |
+
traceable (`bool`, *optional*, defaults to `False`):
|
| 110 |
+
Changes return value of the encoder's inner_state to stacked tensors.
|
| 111 |
+
|
| 112 |
+
Example:
|
| 113 |
+
```python
|
| 114 |
+
>>> from transformers import GraphormerForGraphClassification, GraphormerConfig
|
| 115 |
+
|
| 116 |
+
>>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
|
| 117 |
+
>>> configuration = GraphormerConfig()
|
| 118 |
+
|
| 119 |
+
>>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
|
| 120 |
+
>>> model = GraphormerForGraphClassification(configuration)
|
| 121 |
+
|
| 122 |
+
>>> # Accessing the model configuration
|
| 123 |
+
>>> configuration = model.config
|
| 124 |
+
```
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
model_type = "graphormer"
|
| 128 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
num_classes: int = 1,
|
| 133 |
+
num_atoms: int = 512 * 9,
|
| 134 |
+
num_edges: int = 512 * 3,
|
| 135 |
+
num_in_degree: int = 512,
|
| 136 |
+
num_out_degree: int = 512,
|
| 137 |
+
num_spatial: int = 512,
|
| 138 |
+
num_edge_dis: int = 128,
|
| 139 |
+
multi_hop_max_dist: int = 5, # sometimes is 20
|
| 140 |
+
spatial_pos_max: int = 1024,
|
| 141 |
+
edge_type: str = "multi_hop",
|
| 142 |
+
max_nodes: int = 512,
|
| 143 |
+
share_input_output_embed: bool = False,
|
| 144 |
+
num_hidden_layers: int = 12,
|
| 145 |
+
embedding_dim: int = 768,
|
| 146 |
+
ffn_embedding_dim: int = 768,
|
| 147 |
+
num_attention_heads: int = 32,
|
| 148 |
+
dropout: float = 0.1,
|
| 149 |
+
attention_dropout: float = 0.1,
|
| 150 |
+
activation_dropout: float = 0.1,
|
| 151 |
+
layerdrop: float = 0.0,
|
| 152 |
+
encoder_normalize_before: bool = False,
|
| 153 |
+
pre_layernorm: bool = False,
|
| 154 |
+
apply_graphormer_init: bool = False,
|
| 155 |
+
activation_fn: str = "gelu",
|
| 156 |
+
embed_scale: float = None,
|
| 157 |
+
freeze_embeddings: bool = False,
|
| 158 |
+
num_trans_layers_to_freeze: int = 0,
|
| 159 |
+
traceable: bool = False,
|
| 160 |
+
q_noise: float = 0.0,
|
| 161 |
+
qn_block_size: int = 8,
|
| 162 |
+
kdim: int = None,
|
| 163 |
+
vdim: int = None,
|
| 164 |
+
bias: bool = True,
|
| 165 |
+
self_attention: bool = True,
|
| 166 |
+
pad_token_id=0,
|
| 167 |
+
bos_token_id=1,
|
| 168 |
+
eos_token_id=2,
|
| 169 |
+
**kwargs,
|
| 170 |
+
):
|
| 171 |
+
self.num_classes = num_classes
|
| 172 |
+
self.num_atoms = num_atoms
|
| 173 |
+
self.num_in_degree = num_in_degree
|
| 174 |
+
self.num_out_degree = num_out_degree
|
| 175 |
+
self.num_edges = num_edges
|
| 176 |
+
self.num_spatial = num_spatial
|
| 177 |
+
self.num_edge_dis = num_edge_dis
|
| 178 |
+
self.edge_type = edge_type
|
| 179 |
+
self.multi_hop_max_dist = multi_hop_max_dist
|
| 180 |
+
self.spatial_pos_max = spatial_pos_max
|
| 181 |
+
self.max_nodes = max_nodes
|
| 182 |
+
self.num_hidden_layers = num_hidden_layers
|
| 183 |
+
self.embedding_dim = embedding_dim
|
| 184 |
+
self.hidden_size = embedding_dim
|
| 185 |
+
self.ffn_embedding_dim = ffn_embedding_dim
|
| 186 |
+
self.num_attention_heads = num_attention_heads
|
| 187 |
+
self.dropout = dropout
|
| 188 |
+
self.attention_dropout = attention_dropout
|
| 189 |
+
self.activation_dropout = activation_dropout
|
| 190 |
+
self.layerdrop = layerdrop
|
| 191 |
+
self.encoder_normalize_before = encoder_normalize_before
|
| 192 |
+
self.pre_layernorm = pre_layernorm
|
| 193 |
+
self.apply_graphormer_init = apply_graphormer_init
|
| 194 |
+
self.activation_fn = activation_fn
|
| 195 |
+
self.embed_scale = embed_scale
|
| 196 |
+
self.freeze_embeddings = freeze_embeddings
|
| 197 |
+
self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
|
| 198 |
+
self.share_input_output_embed = share_input_output_embed
|
| 199 |
+
self.traceable = traceable
|
| 200 |
+
self.q_noise = q_noise
|
| 201 |
+
self.qn_block_size = qn_block_size
|
| 202 |
+
|
| 203 |
+
# These parameters are here for future extensions
|
| 204 |
+
# atm, the model only supports self attention
|
| 205 |
+
self.kdim = kdim
|
| 206 |
+
self.vdim = vdim
|
| 207 |
+
self.self_attention = self_attention
|
| 208 |
+
self.bias = bias
|
| 209 |
+
|
| 210 |
+
super().__init__(
|
| 211 |
+
pad_token_id=pad_token_id,
|
| 212 |
+
bos_token_id=bos_token_id,
|
| 213 |
+
eos_token_id=eos_token_id,
|
| 214 |
+
**kwargs,
|
| 215 |
+
)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/graphormer/modeling_graphormer.py
ADDED
|
@@ -0,0 +1,908 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Microsoft, clefourrier The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch Graphormer model."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from typing import Iterable, Iterator, List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 23 |
+
|
| 24 |
+
from ....activations import ACT2FN
|
| 25 |
+
from ....modeling_outputs import (
|
| 26 |
+
BaseModelOutputWithNoAttention,
|
| 27 |
+
SequenceClassifierOutput,
|
| 28 |
+
)
|
| 29 |
+
from ....modeling_utils import PreTrainedModel
|
| 30 |
+
from ....utils import logging
|
| 31 |
+
from .configuration_graphormer import GraphormerConfig
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
_CHECKPOINT_FOR_DOC = "graphormer-base-pcqm4mv1"
|
| 37 |
+
_CONFIG_FOR_DOC = "GraphormerConfig"
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def quant_noise(module: nn.Module, p: float, block_size: int):
|
| 41 |
+
"""
|
| 42 |
+
From:
|
| 43 |
+
https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/quant_noise.py
|
| 44 |
+
|
| 45 |
+
Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product
|
| 46 |
+
Quantization as described in "Training with Quantization Noise for Extreme Model Compression"
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
- module: nn.Module
|
| 50 |
+
- p: amount of Quantization Noise
|
| 51 |
+
- block_size: size of the blocks for subsequent quantization with iPQ
|
| 52 |
+
|
| 53 |
+
Remarks:
|
| 54 |
+
- Module weights must have the right sizes wrt the block size
|
| 55 |
+
- Only Linear, Embedding and Conv2d modules are supported for the moment
|
| 56 |
+
- For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down:
|
| 57 |
+
Revisiting the Quantization of Neural Networks"
|
| 58 |
+
- We implement the simplest form of noise here as stated in the paper which consists in randomly dropping
|
| 59 |
+
blocks
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
# if no quantization noise, don't register hook
|
| 63 |
+
if p <= 0:
|
| 64 |
+
return module
|
| 65 |
+
|
| 66 |
+
# supported modules
|
| 67 |
+
if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)):
|
| 68 |
+
raise NotImplementedError("Module unsupported for quant_noise.")
|
| 69 |
+
|
| 70 |
+
# test whether module.weight has the right sizes wrt block_size
|
| 71 |
+
is_conv = module.weight.ndim == 4
|
| 72 |
+
|
| 73 |
+
# 2D matrix
|
| 74 |
+
if not is_conv:
|
| 75 |
+
if module.weight.size(1) % block_size != 0:
|
| 76 |
+
raise AssertionError("Input features must be a multiple of block sizes")
|
| 77 |
+
|
| 78 |
+
# 4D matrix
|
| 79 |
+
else:
|
| 80 |
+
# 1x1 convolutions
|
| 81 |
+
if module.kernel_size == (1, 1):
|
| 82 |
+
if module.in_channels % block_size != 0:
|
| 83 |
+
raise AssertionError("Input channels must be a multiple of block sizes")
|
| 84 |
+
# regular convolutions
|
| 85 |
+
else:
|
| 86 |
+
k = module.kernel_size[0] * module.kernel_size[1]
|
| 87 |
+
if k % block_size != 0:
|
| 88 |
+
raise AssertionError("Kernel size must be a multiple of block size")
|
| 89 |
+
|
| 90 |
+
def _forward_pre_hook(mod, input):
|
| 91 |
+
# no noise for evaluation
|
| 92 |
+
if mod.training:
|
| 93 |
+
if not is_conv:
|
| 94 |
+
# gather weight and sizes
|
| 95 |
+
weight = mod.weight
|
| 96 |
+
in_features = weight.size(1)
|
| 97 |
+
out_features = weight.size(0)
|
| 98 |
+
|
| 99 |
+
# split weight matrix into blocks and randomly drop selected blocks
|
| 100 |
+
mask = torch.zeros(in_features // block_size * out_features, device=weight.device)
|
| 101 |
+
mask.bernoulli_(p)
|
| 102 |
+
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
|
| 103 |
+
|
| 104 |
+
else:
|
| 105 |
+
# gather weight and sizes
|
| 106 |
+
weight = mod.weight
|
| 107 |
+
in_channels = mod.in_channels
|
| 108 |
+
out_channels = mod.out_channels
|
| 109 |
+
|
| 110 |
+
# split weight matrix into blocks and randomly drop selected blocks
|
| 111 |
+
if mod.kernel_size == (1, 1):
|
| 112 |
+
mask = torch.zeros(
|
| 113 |
+
int(in_channels // block_size * out_channels),
|
| 114 |
+
device=weight.device,
|
| 115 |
+
)
|
| 116 |
+
mask.bernoulli_(p)
|
| 117 |
+
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
|
| 118 |
+
else:
|
| 119 |
+
mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)
|
| 120 |
+
mask.bernoulli_(p)
|
| 121 |
+
mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
|
| 122 |
+
|
| 123 |
+
# scale weights and apply mask
|
| 124 |
+
mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript
|
| 125 |
+
s = 1 / (1 - p)
|
| 126 |
+
mod.weight.data = s * weight.masked_fill(mask, 0)
|
| 127 |
+
|
| 128 |
+
module.register_forward_pre_hook(_forward_pre_hook)
|
| 129 |
+
return module
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class LayerDropModuleList(nn.ModuleList):
|
| 133 |
+
"""
|
| 134 |
+
From:
|
| 135 |
+
https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py
|
| 136 |
+
A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in
|
| 137 |
+
https://arxiv.org/abs/1909.11556.
|
| 138 |
+
|
| 139 |
+
We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During
|
| 140 |
+
evaluation we always iterate over all layers.
|
| 141 |
+
|
| 142 |
+
Usage:
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
|
| 146 |
+
for layer in layers: # this might iterate over layers 1 and 3
|
| 147 |
+
x = layer(x)
|
| 148 |
+
for layer in layers: # this might iterate over all layers
|
| 149 |
+
x = layer(x)
|
| 150 |
+
for layer in layers: # this might not iterate over any layers
|
| 151 |
+
x = layer(x)
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
p (float): probability of dropping out each layer
|
| 156 |
+
modules (iterable, optional): an iterable of modules to add
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
def __init__(self, p: float, modules: Optional[Iterable[nn.Module]] = None):
|
| 160 |
+
super().__init__(modules)
|
| 161 |
+
self.p = p
|
| 162 |
+
|
| 163 |
+
def __iter__(self) -> Iterator[nn.Module]:
|
| 164 |
+
dropout_probs = torch.empty(len(self)).uniform_()
|
| 165 |
+
for i, m in enumerate(super().__iter__()):
|
| 166 |
+
if not self.training or (dropout_probs[i] > self.p):
|
| 167 |
+
yield m
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class GraphormerGraphNodeFeature(nn.Module):
|
| 171 |
+
"""
|
| 172 |
+
Compute node features for each node in the graph.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, config: GraphormerConfig):
|
| 176 |
+
super().__init__()
|
| 177 |
+
self.num_heads = config.num_attention_heads
|
| 178 |
+
self.num_atoms = config.num_atoms
|
| 179 |
+
|
| 180 |
+
self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id)
|
| 181 |
+
self.in_degree_encoder = nn.Embedding(
|
| 182 |
+
config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id
|
| 183 |
+
)
|
| 184 |
+
self.out_degree_encoder = nn.Embedding(
|
| 185 |
+
config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
self.graph_token = nn.Embedding(1, config.hidden_size)
|
| 189 |
+
|
| 190 |
+
def forward(
|
| 191 |
+
self,
|
| 192 |
+
input_nodes: torch.LongTensor,
|
| 193 |
+
in_degree: torch.LongTensor,
|
| 194 |
+
out_degree: torch.LongTensor,
|
| 195 |
+
) -> torch.Tensor:
|
| 196 |
+
n_graph, n_node = input_nodes.size()[:2]
|
| 197 |
+
|
| 198 |
+
node_feature = ( # node feature + graph token
|
| 199 |
+
self.atom_encoder(input_nodes).sum(dim=-2) # [n_graph, n_node, n_hidden]
|
| 200 |
+
+ self.in_degree_encoder(in_degree)
|
| 201 |
+
+ self.out_degree_encoder(out_degree)
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
|
| 205 |
+
|
| 206 |
+
graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
|
| 207 |
+
|
| 208 |
+
return graph_node_feature
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class GraphormerGraphAttnBias(nn.Module):
|
| 212 |
+
"""
|
| 213 |
+
Compute attention bias for each head.
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
def __init__(self, config: GraphormerConfig):
|
| 217 |
+
super().__init__()
|
| 218 |
+
self.num_heads = config.num_attention_heads
|
| 219 |
+
self.multi_hop_max_dist = config.multi_hop_max_dist
|
| 220 |
+
|
| 221 |
+
# We do not change edge feature embedding learning, as edge embeddings are represented as a combination of the original features
|
| 222 |
+
# + shortest path
|
| 223 |
+
self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0)
|
| 224 |
+
|
| 225 |
+
self.edge_type = config.edge_type
|
| 226 |
+
if self.edge_type == "multi_hop":
|
| 227 |
+
self.edge_dis_encoder = nn.Embedding(
|
| 228 |
+
config.num_edge_dis * config.num_attention_heads * config.num_attention_heads,
|
| 229 |
+
1,
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0)
|
| 233 |
+
|
| 234 |
+
self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads)
|
| 235 |
+
|
| 236 |
+
def forward(
|
| 237 |
+
self,
|
| 238 |
+
input_nodes: torch.LongTensor,
|
| 239 |
+
attn_bias: torch.Tensor,
|
| 240 |
+
spatial_pos: torch.LongTensor,
|
| 241 |
+
input_edges: torch.LongTensor,
|
| 242 |
+
attn_edge_type: torch.LongTensor,
|
| 243 |
+
) -> torch.Tensor:
|
| 244 |
+
n_graph, n_node = input_nodes.size()[:2]
|
| 245 |
+
graph_attn_bias = attn_bias.clone()
|
| 246 |
+
graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(
|
| 247 |
+
1, self.num_heads, 1, 1
|
| 248 |
+
) # [n_graph, n_head, n_node+1, n_node+1]
|
| 249 |
+
|
| 250 |
+
# spatial pos
|
| 251 |
+
# [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
|
| 252 |
+
spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2)
|
| 253 |
+
graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias
|
| 254 |
+
|
| 255 |
+
# reset spatial pos here
|
| 256 |
+
t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
|
| 257 |
+
graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t
|
| 258 |
+
graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t
|
| 259 |
+
|
| 260 |
+
# edge feature
|
| 261 |
+
if self.edge_type == "multi_hop":
|
| 262 |
+
spatial_pos_ = spatial_pos.clone()
|
| 263 |
+
|
| 264 |
+
spatial_pos_[spatial_pos_ == 0] = 1 # set pad to 1
|
| 265 |
+
# set 1 to 1, input_nodes > 1 to input_nodes - 1
|
| 266 |
+
spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_)
|
| 267 |
+
if self.multi_hop_max_dist > 0:
|
| 268 |
+
spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist)
|
| 269 |
+
input_edges = input_edges[:, :, :, : self.multi_hop_max_dist, :]
|
| 270 |
+
# [n_graph, n_node, n_node, max_dist, n_head]
|
| 271 |
+
|
| 272 |
+
input_edges = self.edge_encoder(input_edges).mean(-2)
|
| 273 |
+
max_dist = input_edges.size(-2)
|
| 274 |
+
edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads)
|
| 275 |
+
edge_input_flat = torch.bmm(
|
| 276 |
+
edge_input_flat,
|
| 277 |
+
self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :],
|
| 278 |
+
)
|
| 279 |
+
input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(
|
| 280 |
+
1, 2, 3, 0, 4
|
| 281 |
+
)
|
| 282 |
+
input_edges = (input_edges.sum(-2) / (spatial_pos_.float().unsqueeze(-1))).permute(0, 3, 1, 2)
|
| 283 |
+
else:
|
| 284 |
+
# [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
|
| 285 |
+
input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
|
| 286 |
+
|
| 287 |
+
graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges
|
| 288 |
+
graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1) # reset
|
| 289 |
+
|
| 290 |
+
return graph_attn_bias
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class GraphormerMultiheadAttention(nn.Module):
|
| 294 |
+
"""Multi-headed attention.
|
| 295 |
+
|
| 296 |
+
See "Attention Is All You Need" for more details.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
def __init__(self, config: GraphormerConfig):
|
| 300 |
+
super().__init__()
|
| 301 |
+
self.embedding_dim = config.embedding_dim
|
| 302 |
+
self.kdim = config.kdim if config.kdim is not None else config.embedding_dim
|
| 303 |
+
self.vdim = config.vdim if config.vdim is not None else config.embedding_dim
|
| 304 |
+
self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim
|
| 305 |
+
|
| 306 |
+
self.num_heads = config.num_attention_heads
|
| 307 |
+
self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False)
|
| 308 |
+
|
| 309 |
+
self.head_dim = config.embedding_dim // config.num_attention_heads
|
| 310 |
+
if not (self.head_dim * config.num_attention_heads == self.embedding_dim):
|
| 311 |
+
raise AssertionError("The embedding_dim must be divisible by num_heads.")
|
| 312 |
+
self.scaling = self.head_dim**-0.5
|
| 313 |
+
|
| 314 |
+
self.self_attention = True # config.self_attention
|
| 315 |
+
if not (self.self_attention):
|
| 316 |
+
raise NotImplementedError("The Graphormer model only supports self attention for now.")
|
| 317 |
+
if self.self_attention and not self.qkv_same_dim:
|
| 318 |
+
raise AssertionError("Self-attention requires query, key and value to be of the same size.")
|
| 319 |
+
|
| 320 |
+
self.k_proj = quant_noise(
|
| 321 |
+
nn.Linear(self.kdim, config.embedding_dim, bias=config.bias),
|
| 322 |
+
config.q_noise,
|
| 323 |
+
config.qn_block_size,
|
| 324 |
+
)
|
| 325 |
+
self.v_proj = quant_noise(
|
| 326 |
+
nn.Linear(self.vdim, config.embedding_dim, bias=config.bias),
|
| 327 |
+
config.q_noise,
|
| 328 |
+
config.qn_block_size,
|
| 329 |
+
)
|
| 330 |
+
self.q_proj = quant_noise(
|
| 331 |
+
nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
|
| 332 |
+
config.q_noise,
|
| 333 |
+
config.qn_block_size,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
self.out_proj = quant_noise(
|
| 337 |
+
nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
|
| 338 |
+
config.q_noise,
|
| 339 |
+
config.qn_block_size,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
self.onnx_trace = False
|
| 343 |
+
|
| 344 |
+
def reset_parameters(self):
|
| 345 |
+
if self.qkv_same_dim:
|
| 346 |
+
# Empirically observed the convergence to be much better with
|
| 347 |
+
# the scaled initialization
|
| 348 |
+
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
|
| 349 |
+
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
|
| 350 |
+
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
|
| 351 |
+
else:
|
| 352 |
+
nn.init.xavier_uniform_(self.k_proj.weight)
|
| 353 |
+
nn.init.xavier_uniform_(self.v_proj.weight)
|
| 354 |
+
nn.init.xavier_uniform_(self.q_proj.weight)
|
| 355 |
+
|
| 356 |
+
nn.init.xavier_uniform_(self.out_proj.weight)
|
| 357 |
+
if self.out_proj.bias is not None:
|
| 358 |
+
nn.init.constant_(self.out_proj.bias, 0.0)
|
| 359 |
+
|
| 360 |
+
def forward(
|
| 361 |
+
self,
|
| 362 |
+
query: torch.LongTensor,
|
| 363 |
+
key: Optional[torch.Tensor],
|
| 364 |
+
value: Optional[torch.Tensor],
|
| 365 |
+
attn_bias: Optional[torch.Tensor],
|
| 366 |
+
key_padding_mask: Optional[torch.Tensor] = None,
|
| 367 |
+
need_weights: bool = True,
|
| 368 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 369 |
+
before_softmax: bool = False,
|
| 370 |
+
need_head_weights: bool = False,
|
| 371 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 372 |
+
"""
|
| 373 |
+
Args:
|
| 374 |
+
key_padding_mask (Bytetorch.Tensor, optional): mask to exclude
|
| 375 |
+
keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.
|
| 376 |
+
need_weights (bool, optional): return the attention weights,
|
| 377 |
+
averaged over heads (default: False).
|
| 378 |
+
attn_mask (Bytetorch.Tensor, optional): typically used to
|
| 379 |
+
implement causal attention, where the mask prevents the attention from looking forward in time
|
| 380 |
+
(default: None).
|
| 381 |
+
before_softmax (bool, optional): return the raw attention
|
| 382 |
+
weights and values before the attention softmax.
|
| 383 |
+
need_head_weights (bool, optional): return the attention
|
| 384 |
+
weights for each head. Implies *need_weights*. Default: return the average attention weights over all
|
| 385 |
+
heads.
|
| 386 |
+
"""
|
| 387 |
+
if need_head_weights:
|
| 388 |
+
need_weights = True
|
| 389 |
+
|
| 390 |
+
tgt_len, bsz, embedding_dim = query.size()
|
| 391 |
+
src_len = tgt_len
|
| 392 |
+
if not (embedding_dim == self.embedding_dim):
|
| 393 |
+
raise AssertionError(
|
| 394 |
+
f"The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim"
|
| 395 |
+
f" {self.embedding_dim}."
|
| 396 |
+
)
|
| 397 |
+
if not (list(query.size()) == [tgt_len, bsz, embedding_dim]):
|
| 398 |
+
raise AssertionError("Query size incorrect in Graphormer, compared to model dimensions.")
|
| 399 |
+
|
| 400 |
+
if key is not None:
|
| 401 |
+
src_len, key_bsz, _ = key.size()
|
| 402 |
+
if not torch.jit.is_scripting():
|
| 403 |
+
if (key_bsz != bsz) or (value is None) or not (src_len, bsz == value.shape[:2]):
|
| 404 |
+
raise AssertionError(
|
| 405 |
+
"The batch shape does not match the key or value shapes provided to the attention."
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
q = self.q_proj(query)
|
| 409 |
+
k = self.k_proj(query)
|
| 410 |
+
v = self.v_proj(query)
|
| 411 |
+
|
| 412 |
+
q *= self.scaling
|
| 413 |
+
|
| 414 |
+
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
|
| 415 |
+
if k is not None:
|
| 416 |
+
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
|
| 417 |
+
if v is not None:
|
| 418 |
+
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
|
| 419 |
+
|
| 420 |
+
if (k is None) or not (k.size(1) == src_len):
|
| 421 |
+
raise AssertionError("The shape of the key generated in the attention is incorrect")
|
| 422 |
+
|
| 423 |
+
# This is part of a workaround to get around fork/join parallelism
|
| 424 |
+
# not supporting Optional types.
|
| 425 |
+
if key_padding_mask is not None and key_padding_mask.dim() == 0:
|
| 426 |
+
key_padding_mask = None
|
| 427 |
+
|
| 428 |
+
if key_padding_mask is not None:
|
| 429 |
+
if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len:
|
| 430 |
+
raise AssertionError(
|
| 431 |
+
"The shape of the generated padding mask for the key does not match expected dimensions."
|
| 432 |
+
)
|
| 433 |
+
attn_weights = torch.bmm(q, k.transpose(1, 2))
|
| 434 |
+
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
|
| 435 |
+
|
| 436 |
+
if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]:
|
| 437 |
+
raise AssertionError("The attention weights generated do not match the expected dimensions.")
|
| 438 |
+
|
| 439 |
+
if attn_bias is not None:
|
| 440 |
+
attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len)
|
| 441 |
+
|
| 442 |
+
if attn_mask is not None:
|
| 443 |
+
attn_mask = attn_mask.unsqueeze(0)
|
| 444 |
+
attn_weights += attn_mask
|
| 445 |
+
|
| 446 |
+
if key_padding_mask is not None:
|
| 447 |
+
# don't attend to padding symbols
|
| 448 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 449 |
+
attn_weights = attn_weights.masked_fill(
|
| 450 |
+
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
|
| 451 |
+
)
|
| 452 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 453 |
+
|
| 454 |
+
if before_softmax:
|
| 455 |
+
return attn_weights, v
|
| 456 |
+
|
| 457 |
+
attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1)
|
| 458 |
+
attn_weights = attn_weights_float.type_as(attn_weights)
|
| 459 |
+
attn_probs = self.attention_dropout_module(attn_weights)
|
| 460 |
+
|
| 461 |
+
if v is None:
|
| 462 |
+
raise AssertionError("No value generated")
|
| 463 |
+
attn = torch.bmm(attn_probs, v)
|
| 464 |
+
if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]:
|
| 465 |
+
raise AssertionError("The attention generated do not match the expected dimensions.")
|
| 466 |
+
|
| 467 |
+
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim)
|
| 468 |
+
attn: torch.Tensor = self.out_proj(attn)
|
| 469 |
+
|
| 470 |
+
attn_weights = None
|
| 471 |
+
if need_weights:
|
| 472 |
+
attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
|
| 473 |
+
if not need_head_weights:
|
| 474 |
+
# average attention weights over heads
|
| 475 |
+
attn_weights = attn_weights.mean(dim=0)
|
| 476 |
+
|
| 477 |
+
return attn, attn_weights
|
| 478 |
+
|
| 479 |
+
def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:
|
| 480 |
+
return attn_weights
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class GraphormerGraphEncoderLayer(nn.Module):
|
| 484 |
+
def __init__(self, config: GraphormerConfig) -> None:
|
| 485 |
+
super().__init__()
|
| 486 |
+
|
| 487 |
+
# Initialize parameters
|
| 488 |
+
self.embedding_dim = config.embedding_dim
|
| 489 |
+
self.num_attention_heads = config.num_attention_heads
|
| 490 |
+
self.q_noise = config.q_noise
|
| 491 |
+
self.qn_block_size = config.qn_block_size
|
| 492 |
+
self.pre_layernorm = config.pre_layernorm
|
| 493 |
+
|
| 494 |
+
self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
|
| 495 |
+
|
| 496 |
+
self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False)
|
| 497 |
+
|
| 498 |
+
# Initialize blocks
|
| 499 |
+
self.activation_fn = ACT2FN[config.activation_fn]
|
| 500 |
+
self.self_attn = GraphormerMultiheadAttention(config)
|
| 501 |
+
|
| 502 |
+
# layer norm associated with the self attention layer
|
| 503 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)
|
| 504 |
+
|
| 505 |
+
self.fc1 = self.build_fc(
|
| 506 |
+
self.embedding_dim,
|
| 507 |
+
config.ffn_embedding_dim,
|
| 508 |
+
q_noise=config.q_noise,
|
| 509 |
+
qn_block_size=config.qn_block_size,
|
| 510 |
+
)
|
| 511 |
+
self.fc2 = self.build_fc(
|
| 512 |
+
config.ffn_embedding_dim,
|
| 513 |
+
self.embedding_dim,
|
| 514 |
+
q_noise=config.q_noise,
|
| 515 |
+
qn_block_size=config.qn_block_size,
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
# layer norm associated with the position wise feed-forward NN
|
| 519 |
+
self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
|
| 520 |
+
|
| 521 |
+
def build_fc(
|
| 522 |
+
self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int
|
| 523 |
+
) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:
|
| 524 |
+
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
|
| 525 |
+
|
| 526 |
+
def forward(
|
| 527 |
+
self,
|
| 528 |
+
input_nodes: torch.Tensor,
|
| 529 |
+
self_attn_bias: Optional[torch.Tensor] = None,
|
| 530 |
+
self_attn_mask: Optional[torch.Tensor] = None,
|
| 531 |
+
self_attn_padding_mask: Optional[torch.Tensor] = None,
|
| 532 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 533 |
+
"""
|
| 534 |
+
nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original
|
| 535 |
+
Transformer implementation.
|
| 536 |
+
"""
|
| 537 |
+
residual = input_nodes
|
| 538 |
+
if self.pre_layernorm:
|
| 539 |
+
input_nodes = self.self_attn_layer_norm(input_nodes)
|
| 540 |
+
|
| 541 |
+
input_nodes, attn = self.self_attn(
|
| 542 |
+
query=input_nodes,
|
| 543 |
+
key=input_nodes,
|
| 544 |
+
value=input_nodes,
|
| 545 |
+
attn_bias=self_attn_bias,
|
| 546 |
+
key_padding_mask=self_attn_padding_mask,
|
| 547 |
+
need_weights=False,
|
| 548 |
+
attn_mask=self_attn_mask,
|
| 549 |
+
)
|
| 550 |
+
input_nodes = self.dropout_module(input_nodes)
|
| 551 |
+
input_nodes = residual + input_nodes
|
| 552 |
+
if not self.pre_layernorm:
|
| 553 |
+
input_nodes = self.self_attn_layer_norm(input_nodes)
|
| 554 |
+
|
| 555 |
+
residual = input_nodes
|
| 556 |
+
if self.pre_layernorm:
|
| 557 |
+
input_nodes = self.final_layer_norm(input_nodes)
|
| 558 |
+
input_nodes = self.activation_fn(self.fc1(input_nodes))
|
| 559 |
+
input_nodes = self.activation_dropout_module(input_nodes)
|
| 560 |
+
input_nodes = self.fc2(input_nodes)
|
| 561 |
+
input_nodes = self.dropout_module(input_nodes)
|
| 562 |
+
input_nodes = residual + input_nodes
|
| 563 |
+
if not self.pre_layernorm:
|
| 564 |
+
input_nodes = self.final_layer_norm(input_nodes)
|
| 565 |
+
|
| 566 |
+
return input_nodes, attn
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
class GraphormerGraphEncoder(nn.Module):
|
| 570 |
+
def __init__(self, config: GraphormerConfig):
|
| 571 |
+
super().__init__()
|
| 572 |
+
|
| 573 |
+
self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
|
| 574 |
+
self.layerdrop = config.layerdrop
|
| 575 |
+
self.embedding_dim = config.embedding_dim
|
| 576 |
+
self.apply_graphormer_init = config.apply_graphormer_init
|
| 577 |
+
self.traceable = config.traceable
|
| 578 |
+
|
| 579 |
+
self.graph_node_feature = GraphormerGraphNodeFeature(config)
|
| 580 |
+
self.graph_attn_bias = GraphormerGraphAttnBias(config)
|
| 581 |
+
|
| 582 |
+
self.embed_scale = config.embed_scale
|
| 583 |
+
|
| 584 |
+
if config.q_noise > 0:
|
| 585 |
+
self.quant_noise = quant_noise(
|
| 586 |
+
nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
|
| 587 |
+
config.q_noise,
|
| 588 |
+
config.qn_block_size,
|
| 589 |
+
)
|
| 590 |
+
else:
|
| 591 |
+
self.quant_noise = None
|
| 592 |
+
|
| 593 |
+
if config.encoder_normalize_before:
|
| 594 |
+
self.emb_layer_norm = nn.LayerNorm(self.embedding_dim)
|
| 595 |
+
else:
|
| 596 |
+
self.emb_layer_norm = None
|
| 597 |
+
|
| 598 |
+
if config.pre_layernorm:
|
| 599 |
+
self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
|
| 600 |
+
|
| 601 |
+
if self.layerdrop > 0.0:
|
| 602 |
+
self.layers = LayerDropModuleList(p=self.layerdrop)
|
| 603 |
+
else:
|
| 604 |
+
self.layers = nn.ModuleList([])
|
| 605 |
+
self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 606 |
+
|
| 607 |
+
# Apply initialization of model params after building the model
|
| 608 |
+
if config.freeze_embeddings:
|
| 609 |
+
raise NotImplementedError("Freezing embeddings is not implemented yet.")
|
| 610 |
+
|
| 611 |
+
for layer in range(config.num_trans_layers_to_freeze):
|
| 612 |
+
m = self.layers[layer]
|
| 613 |
+
if m is not None:
|
| 614 |
+
for p in m.parameters():
|
| 615 |
+
p.requires_grad = False
|
| 616 |
+
|
| 617 |
+
def forward(
|
| 618 |
+
self,
|
| 619 |
+
input_nodes: torch.LongTensor,
|
| 620 |
+
input_edges: torch.LongTensor,
|
| 621 |
+
attn_bias: torch.Tensor,
|
| 622 |
+
in_degree: torch.LongTensor,
|
| 623 |
+
out_degree: torch.LongTensor,
|
| 624 |
+
spatial_pos: torch.LongTensor,
|
| 625 |
+
attn_edge_type: torch.LongTensor,
|
| 626 |
+
perturb=None,
|
| 627 |
+
last_state_only: bool = False,
|
| 628 |
+
token_embeddings: Optional[torch.Tensor] = None,
|
| 629 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 630 |
+
) -> Tuple[Union[torch.Tensor, List[torch.LongTensor]], torch.Tensor]:
|
| 631 |
+
# compute padding mask. This is needed for multi-head attention
|
| 632 |
+
data_x = input_nodes
|
| 633 |
+
n_graph, n_node = data_x.size()[:2]
|
| 634 |
+
padding_mask = (data_x[:, :, 0]).eq(0)
|
| 635 |
+
padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype)
|
| 636 |
+
padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
|
| 637 |
+
|
| 638 |
+
attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type)
|
| 639 |
+
|
| 640 |
+
if token_embeddings is not None:
|
| 641 |
+
input_nodes = token_embeddings
|
| 642 |
+
else:
|
| 643 |
+
input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree)
|
| 644 |
+
|
| 645 |
+
if perturb is not None:
|
| 646 |
+
input_nodes[:, 1:, :] += perturb
|
| 647 |
+
|
| 648 |
+
if self.embed_scale is not None:
|
| 649 |
+
input_nodes = input_nodes * self.embed_scale
|
| 650 |
+
|
| 651 |
+
if self.quant_noise is not None:
|
| 652 |
+
input_nodes = self.quant_noise(input_nodes)
|
| 653 |
+
|
| 654 |
+
if self.emb_layer_norm is not None:
|
| 655 |
+
input_nodes = self.emb_layer_norm(input_nodes)
|
| 656 |
+
|
| 657 |
+
input_nodes = self.dropout_module(input_nodes)
|
| 658 |
+
|
| 659 |
+
input_nodes = input_nodes.transpose(0, 1)
|
| 660 |
+
|
| 661 |
+
inner_states = []
|
| 662 |
+
if not last_state_only:
|
| 663 |
+
inner_states.append(input_nodes)
|
| 664 |
+
|
| 665 |
+
for layer in self.layers:
|
| 666 |
+
input_nodes, _ = layer(
|
| 667 |
+
input_nodes,
|
| 668 |
+
self_attn_padding_mask=padding_mask,
|
| 669 |
+
self_attn_mask=attn_mask,
|
| 670 |
+
self_attn_bias=attn_bias,
|
| 671 |
+
)
|
| 672 |
+
if not last_state_only:
|
| 673 |
+
inner_states.append(input_nodes)
|
| 674 |
+
|
| 675 |
+
graph_rep = input_nodes[0, :, :]
|
| 676 |
+
|
| 677 |
+
if last_state_only:
|
| 678 |
+
inner_states = [input_nodes]
|
| 679 |
+
|
| 680 |
+
if self.traceable:
|
| 681 |
+
return torch.stack(inner_states), graph_rep
|
| 682 |
+
else:
|
| 683 |
+
return inner_states, graph_rep
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
class GraphormerDecoderHead(nn.Module):
|
| 687 |
+
def __init__(self, embedding_dim: int, num_classes: int):
|
| 688 |
+
super().__init__()
|
| 689 |
+
"""num_classes should be 1 for regression, or the number of classes for classification"""
|
| 690 |
+
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
|
| 691 |
+
self.classifier = nn.Linear(embedding_dim, num_classes, bias=False)
|
| 692 |
+
self.num_classes = num_classes
|
| 693 |
+
|
| 694 |
+
def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:
|
| 695 |
+
input_nodes = self.classifier(input_nodes)
|
| 696 |
+
input_nodes = input_nodes + self.lm_output_learned_bias
|
| 697 |
+
return input_nodes
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
class GraphormerPreTrainedModel(PreTrainedModel):
|
| 701 |
+
"""
|
| 702 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 703 |
+
models.
|
| 704 |
+
"""
|
| 705 |
+
|
| 706 |
+
config_class = GraphormerConfig
|
| 707 |
+
base_model_prefix = "graphormer"
|
| 708 |
+
main_input_name_nodes = "input_nodes"
|
| 709 |
+
main_input_name_edges = "input_edges"
|
| 710 |
+
|
| 711 |
+
def normal_(self, data: torch.Tensor):
|
| 712 |
+
# with FSDP, module params will be on CUDA, so we cast them back to CPU
|
| 713 |
+
# so that the RNG is consistent with and without FSDP
|
| 714 |
+
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
|
| 715 |
+
|
| 716 |
+
def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):
|
| 717 |
+
"""
|
| 718 |
+
Initialize the weights specific to the Graphormer Model.
|
| 719 |
+
"""
|
| 720 |
+
if isinstance(module, nn.Linear):
|
| 721 |
+
self.normal_(module.weight.data)
|
| 722 |
+
if module.bias is not None:
|
| 723 |
+
module.bias.data.zero_()
|
| 724 |
+
if isinstance(module, nn.Embedding):
|
| 725 |
+
self.normal_(module.weight.data)
|
| 726 |
+
if module.padding_idx is not None:
|
| 727 |
+
module.weight.data[module.padding_idx].zero_()
|
| 728 |
+
if isinstance(module, GraphormerMultiheadAttention):
|
| 729 |
+
self.normal_(module.q_proj.weight.data)
|
| 730 |
+
self.normal_(module.k_proj.weight.data)
|
| 731 |
+
self.normal_(module.v_proj.weight.data)
|
| 732 |
+
|
| 733 |
+
def _init_weights(
|
| 734 |
+
self,
|
| 735 |
+
module: Union[
|
| 736 |
+
nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder
|
| 737 |
+
],
|
| 738 |
+
):
|
| 739 |
+
"""
|
| 740 |
+
Initialize the weights
|
| 741 |
+
"""
|
| 742 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 743 |
+
# We might be missing part of the Linear init, dependant on the layer num
|
| 744 |
+
module.weight.data.normal_(mean=0.0, std=0.02)
|
| 745 |
+
if module.bias is not None:
|
| 746 |
+
module.bias.data.zero_()
|
| 747 |
+
elif isinstance(module, nn.Embedding):
|
| 748 |
+
module.weight.data.normal_(mean=0.0, std=0.02)
|
| 749 |
+
if module.padding_idx is not None:
|
| 750 |
+
module.weight.data[module.padding_idx].zero_()
|
| 751 |
+
elif isinstance(module, GraphormerMultiheadAttention):
|
| 752 |
+
module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
|
| 753 |
+
module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
|
| 754 |
+
module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
|
| 755 |
+
module.reset_parameters()
|
| 756 |
+
elif isinstance(module, nn.LayerNorm):
|
| 757 |
+
module.bias.data.zero_()
|
| 758 |
+
module.weight.data.fill_(1.0)
|
| 759 |
+
elif isinstance(module, GraphormerGraphEncoder):
|
| 760 |
+
if module.apply_graphormer_init:
|
| 761 |
+
module.apply(self.init_graphormer_params)
|
| 762 |
+
|
| 763 |
+
elif isinstance(module, nn.LayerNorm):
|
| 764 |
+
module.bias.data.zero_()
|
| 765 |
+
module.weight.data.fill_(1.0)
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
class GraphormerModel(GraphormerPreTrainedModel):
|
| 769 |
+
"""The Graphormer model is a graph-encoder model.
|
| 770 |
+
|
| 771 |
+
It goes from a graph to its representation. If you want to use the model for a downstream classification task, use
|
| 772 |
+
GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine
|
| 773 |
+
this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.
|
| 774 |
+
"""
|
| 775 |
+
|
| 776 |
+
def __init__(self, config: GraphormerConfig):
|
| 777 |
+
super().__init__(config)
|
| 778 |
+
self.max_nodes = config.max_nodes
|
| 779 |
+
|
| 780 |
+
self.graph_encoder = GraphormerGraphEncoder(config)
|
| 781 |
+
|
| 782 |
+
self.share_input_output_embed = config.share_input_output_embed
|
| 783 |
+
self.lm_output_learned_bias = None
|
| 784 |
+
|
| 785 |
+
# Remove head is set to true during fine-tuning
|
| 786 |
+
self.load_softmax = not getattr(config, "remove_head", False)
|
| 787 |
+
|
| 788 |
+
self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim)
|
| 789 |
+
self.activation_fn = ACT2FN[config.activation_fn]
|
| 790 |
+
self.layer_norm = nn.LayerNorm(config.embedding_dim)
|
| 791 |
+
|
| 792 |
+
self.post_init()
|
| 793 |
+
|
| 794 |
+
def reset_output_layer_parameters(self):
|
| 795 |
+
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
|
| 796 |
+
|
| 797 |
+
def forward(
|
| 798 |
+
self,
|
| 799 |
+
input_nodes: torch.LongTensor,
|
| 800 |
+
input_edges: torch.LongTensor,
|
| 801 |
+
attn_bias: torch.Tensor,
|
| 802 |
+
in_degree: torch.LongTensor,
|
| 803 |
+
out_degree: torch.LongTensor,
|
| 804 |
+
spatial_pos: torch.LongTensor,
|
| 805 |
+
attn_edge_type: torch.LongTensor,
|
| 806 |
+
perturb: Optional[torch.FloatTensor] = None,
|
| 807 |
+
masked_tokens: None = None,
|
| 808 |
+
return_dict: Optional[bool] = None,
|
| 809 |
+
**unused,
|
| 810 |
+
) -> Union[Tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:
|
| 811 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 812 |
+
|
| 813 |
+
inner_states, graph_rep = self.graph_encoder(
|
| 814 |
+
input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb
|
| 815 |
+
)
|
| 816 |
+
|
| 817 |
+
# last inner state, then revert Batch and Graph len
|
| 818 |
+
input_nodes = inner_states[-1].transpose(0, 1)
|
| 819 |
+
|
| 820 |
+
# project masked tokens only
|
| 821 |
+
if masked_tokens is not None:
|
| 822 |
+
raise NotImplementedError
|
| 823 |
+
|
| 824 |
+
input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes)))
|
| 825 |
+
|
| 826 |
+
# project back to size of vocabulary
|
| 827 |
+
if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, "weight"):
|
| 828 |
+
input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight)
|
| 829 |
+
|
| 830 |
+
if not return_dict:
|
| 831 |
+
return tuple(x for x in [input_nodes, inner_states] if x is not None)
|
| 832 |
+
return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states)
|
| 833 |
+
|
| 834 |
+
def max_nodes(self):
|
| 835 |
+
"""Maximum output length supported by the encoder."""
|
| 836 |
+
return self.max_nodes
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
class GraphormerForGraphClassification(GraphormerPreTrainedModel):
|
| 840 |
+
"""
|
| 841 |
+
This model can be used for graph-level classification or regression tasks.
|
| 842 |
+
|
| 843 |
+
It can be trained on
|
| 844 |
+
- regression (by setting config.num_classes to 1); there should be one float-type label per graph
|
| 845 |
+
- one task classification (by setting config.num_classes to the number of classes); there should be one integer
|
| 846 |
+
label per graph
|
| 847 |
+
- binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list
|
| 848 |
+
of integer labels for each graph.
|
| 849 |
+
"""
|
| 850 |
+
|
| 851 |
+
def __init__(self, config: GraphormerConfig):
|
| 852 |
+
super().__init__(config)
|
| 853 |
+
self.encoder = GraphormerModel(config)
|
| 854 |
+
self.embedding_dim = config.embedding_dim
|
| 855 |
+
self.num_classes = config.num_classes
|
| 856 |
+
self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes)
|
| 857 |
+
self.is_encoder_decoder = True
|
| 858 |
+
|
| 859 |
+
# Initialize weights and apply final processing
|
| 860 |
+
self.post_init()
|
| 861 |
+
|
| 862 |
+
def forward(
|
| 863 |
+
self,
|
| 864 |
+
input_nodes: torch.LongTensor,
|
| 865 |
+
input_edges: torch.LongTensor,
|
| 866 |
+
attn_bias: torch.Tensor,
|
| 867 |
+
in_degree: torch.LongTensor,
|
| 868 |
+
out_degree: torch.LongTensor,
|
| 869 |
+
spatial_pos: torch.LongTensor,
|
| 870 |
+
attn_edge_type: torch.LongTensor,
|
| 871 |
+
labels: Optional[torch.LongTensor] = None,
|
| 872 |
+
return_dict: Optional[bool] = None,
|
| 873 |
+
**unused,
|
| 874 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
| 875 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 876 |
+
|
| 877 |
+
encoder_outputs = self.encoder(
|
| 878 |
+
input_nodes,
|
| 879 |
+
input_edges,
|
| 880 |
+
attn_bias,
|
| 881 |
+
in_degree,
|
| 882 |
+
out_degree,
|
| 883 |
+
spatial_pos,
|
| 884 |
+
attn_edge_type,
|
| 885 |
+
return_dict=True,
|
| 886 |
+
)
|
| 887 |
+
outputs, hidden_states = encoder_outputs["last_hidden_state"], encoder_outputs["hidden_states"]
|
| 888 |
+
|
| 889 |
+
head_outputs = self.classifier(outputs)
|
| 890 |
+
logits = head_outputs[:, 0, :].contiguous()
|
| 891 |
+
|
| 892 |
+
loss = None
|
| 893 |
+
if labels is not None:
|
| 894 |
+
mask = ~torch.isnan(labels)
|
| 895 |
+
|
| 896 |
+
if self.num_classes == 1: # regression
|
| 897 |
+
loss_fct = MSELoss()
|
| 898 |
+
loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float())
|
| 899 |
+
elif self.num_classes > 1 and len(labels.shape) == 1: # One task classification
|
| 900 |
+
loss_fct = CrossEntropyLoss()
|
| 901 |
+
loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1))
|
| 902 |
+
else: # Binary multi-task classification
|
| 903 |
+
loss_fct = BCEWithLogitsLoss(reduction="sum")
|
| 904 |
+
loss = loss_fct(logits[mask], labels[mask])
|
| 905 |
+
|
| 906 |
+
if not return_dict:
|
| 907 |
+
return tuple(x for x in [loss, logits, hidden_states] if x is not None)
|
| 908 |
+
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__init__.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import TYPE_CHECKING
|
| 16 |
+
|
| 17 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_import_structure = {
|
| 21 |
+
"configuration_jukebox": [
|
| 22 |
+
"JukeboxConfig",
|
| 23 |
+
"JukeboxPriorConfig",
|
| 24 |
+
"JukeboxVQVAEConfig",
|
| 25 |
+
],
|
| 26 |
+
"tokenization_jukebox": ["JukeboxTokenizer"],
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
if not is_torch_available():
|
| 31 |
+
raise OptionalDependencyNotAvailable()
|
| 32 |
+
except OptionalDependencyNotAvailable:
|
| 33 |
+
pass
|
| 34 |
+
else:
|
| 35 |
+
_import_structure["modeling_jukebox"] = [
|
| 36 |
+
"JukeboxModel",
|
| 37 |
+
"JukeboxPreTrainedModel",
|
| 38 |
+
"JukeboxVQVAE",
|
| 39 |
+
"JukeboxPrior",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
if TYPE_CHECKING:
|
| 43 |
+
from .configuration_jukebox import (
|
| 44 |
+
JukeboxConfig,
|
| 45 |
+
JukeboxPriorConfig,
|
| 46 |
+
JukeboxVQVAEConfig,
|
| 47 |
+
)
|
| 48 |
+
from .tokenization_jukebox import JukeboxTokenizer
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
if not is_torch_available():
|
| 52 |
+
raise OptionalDependencyNotAvailable()
|
| 53 |
+
except OptionalDependencyNotAvailable:
|
| 54 |
+
pass
|
| 55 |
+
else:
|
| 56 |
+
from .modeling_jukebox import (
|
| 57 |
+
JukeboxModel,
|
| 58 |
+
JukeboxPreTrainedModel,
|
| 59 |
+
JukeboxPrior,
|
| 60 |
+
JukeboxVQVAE,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
else:
|
| 64 |
+
import sys
|
| 65 |
+
|
| 66 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (983 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc
ADDED
|
Binary file (21.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc
ADDED
|
Binary file (81.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/configuration_jukebox.py
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Jukebox configuration"""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
from typing import List, Union
|
| 19 |
+
|
| 20 |
+
from ....configuration_utils import PretrainedConfig
|
| 21 |
+
from ....utils import logging
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_LARGE_ATTENTION = [
|
| 28 |
+
"block_attn",
|
| 29 |
+
"transpose_block_attn",
|
| 30 |
+
"prev_block_attn",
|
| 31 |
+
"block_attn",
|
| 32 |
+
"transpose_block_attn",
|
| 33 |
+
"prev_block_attn",
|
| 34 |
+
"block_attn",
|
| 35 |
+
"transpose_block_attn",
|
| 36 |
+
"prev_block_attn",
|
| 37 |
+
"block_attn",
|
| 38 |
+
"transpose_block_attn",
|
| 39 |
+
"prev_block_attn",
|
| 40 |
+
"block_attn",
|
| 41 |
+
"transpose_block_attn",
|
| 42 |
+
"prev_block_attn",
|
| 43 |
+
"block_attn",
|
| 44 |
+
"transpose_block_attn",
|
| 45 |
+
"prev_block_attn",
|
| 46 |
+
"cross_attention",
|
| 47 |
+
"block_attn",
|
| 48 |
+
"transpose_block_attn",
|
| 49 |
+
"prev_block_attn",
|
| 50 |
+
"block_attn",
|
| 51 |
+
"transpose_block_attn",
|
| 52 |
+
"prev_block_attn",
|
| 53 |
+
"block_attn",
|
| 54 |
+
"transpose_block_attn",
|
| 55 |
+
"prev_block_attn",
|
| 56 |
+
"cross_attention",
|
| 57 |
+
"block_attn",
|
| 58 |
+
"transpose_block_attn",
|
| 59 |
+
"prev_block_attn",
|
| 60 |
+
"block_attn",
|
| 61 |
+
"transpose_block_attn",
|
| 62 |
+
"prev_block_attn",
|
| 63 |
+
"block_attn",
|
| 64 |
+
"transpose_block_attn",
|
| 65 |
+
"prev_block_attn",
|
| 66 |
+
"cross_attention",
|
| 67 |
+
"block_attn",
|
| 68 |
+
"transpose_block_attn",
|
| 69 |
+
"prev_block_attn",
|
| 70 |
+
"block_attn",
|
| 71 |
+
"transpose_block_attn",
|
| 72 |
+
"prev_block_attn",
|
| 73 |
+
"block_attn",
|
| 74 |
+
"transpose_block_attn",
|
| 75 |
+
"prev_block_attn",
|
| 76 |
+
"cross_attention",
|
| 77 |
+
"block_attn",
|
| 78 |
+
"transpose_block_attn",
|
| 79 |
+
"prev_block_attn",
|
| 80 |
+
"block_attn",
|
| 81 |
+
"transpose_block_attn",
|
| 82 |
+
"prev_block_attn",
|
| 83 |
+
"block_attn",
|
| 84 |
+
"transpose_block_attn",
|
| 85 |
+
"prev_block_attn",
|
| 86 |
+
"cross_attention",
|
| 87 |
+
"block_attn",
|
| 88 |
+
"transpose_block_attn",
|
| 89 |
+
"prev_block_attn",
|
| 90 |
+
"block_attn",
|
| 91 |
+
"transpose_block_attn",
|
| 92 |
+
"prev_block_attn",
|
| 93 |
+
"block_attn",
|
| 94 |
+
"transpose_block_attn",
|
| 95 |
+
"prev_block_attn",
|
| 96 |
+
"cross_attention",
|
| 97 |
+
"block_attn",
|
| 98 |
+
"transpose_block_attn",
|
| 99 |
+
"prev_block_attn",
|
| 100 |
+
"block_attn",
|
| 101 |
+
"transpose_block_attn",
|
| 102 |
+
"prev_block_attn",
|
| 103 |
+
"block_attn",
|
| 104 |
+
"transpose_block_attn",
|
| 105 |
+
"prev_block_attn",
|
| 106 |
+
"cross_attention",
|
| 107 |
+
]
|
| 108 |
+
_RawColumnPreviousRowAttention = ["block_attn", "transpose_block_attn", "prev_block_attn"]
|
| 109 |
+
_FullDenseAttention = ["dense_attention"]
|
| 110 |
+
_PrimePrimeDenseAttention = ["prime_attn", "prime_attn", "dense_attn"]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def full_dense_attention(layer):
|
| 114 |
+
return _FullDenseAttention[0]
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def raw_column_previous_row_attention(layer):
|
| 118 |
+
return _RawColumnPreviousRowAttention[layer % 3]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def large_separated_enc_dec_w_lyrics(layer):
|
| 122 |
+
return _LARGE_ATTENTION[layer % 79]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def enc_dec_with_lyrics(layer):
|
| 126 |
+
if layer % 16 == 15:
|
| 127 |
+
return _PrimePrimeDenseAttention[layer % 3]
|
| 128 |
+
return _RawColumnPreviousRowAttention[layer % 3]
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
ATTENTION_PATTERNS = {
|
| 132 |
+
"full_dense_attention": full_dense_attention,
|
| 133 |
+
"raw_column_previous_row_attention": raw_column_previous_row_attention, # Alternate row, column and previous row attn
|
| 134 |
+
"large_separated_enc_dec_w_lyrics": large_separated_enc_dec_w_lyrics, # Used by large separated_enc_dec model with lyrics
|
| 135 |
+
"enc_dec_with_lyrics": enc_dec_with_lyrics, # Used by encoder_decoder model with lyrics
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class JukeboxPriorConfig(PretrainedConfig):
|
| 140 |
+
"""
|
| 141 |
+
This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a
|
| 142 |
+
`JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a
|
| 143 |
+
configuration with the defaults will yield a similar configuration to that of the top level prior from the
|
| 144 |
+
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox
|
| 145 |
+
-1b-lyrics) architecture.
|
| 146 |
+
|
| 147 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 148 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
act_fn (`str`, *optional*, defaults to `"quick_gelu"`):
|
| 154 |
+
Activation function.
|
| 155 |
+
alignment_head (`int`, *optional*, defaults to 2):
|
| 156 |
+
Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio
|
| 157 |
+
alignment
|
| 158 |
+
alignment_layer (`int`, *optional*, defaults to 68):
|
| 159 |
+
Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the
|
| 160 |
+
lyric to audio alignment
|
| 161 |
+
attention_multiplier (`float`, *optional*, defaults to 0.25):
|
| 162 |
+
Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that
|
| 163 |
+
0.25*width of the model will be used.
|
| 164 |
+
attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`):
|
| 165 |
+
Which attention pattern to use for the decoder/
|
| 166 |
+
attn_dropout (`int`, *optional*, defaults to 0):
|
| 167 |
+
Dropout probability for the post-attention layer dropout in the decoder.
|
| 168 |
+
attn_res_scale (`bool`, *optional*, defaults to `False`):
|
| 169 |
+
Whether or not to scale the residuals in the attention conditioner block.
|
| 170 |
+
blocks (`int`, *optional*, defaults to 64):
|
| 171 |
+
Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len //
|
| 172 |
+
blocks]` in the `JukeboxAttention` layer.
|
| 173 |
+
conv_res_scale (`int`, *optional*):
|
| 174 |
+
Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a
|
| 175 |
+
conditioner, the default value is to None and should not be modified.
|
| 176 |
+
num_layers (`int`, *optional*, defaults to 72):
|
| 177 |
+
Number of layers of the transformer architecture.
|
| 178 |
+
emb_dropout (`int`, *optional*, defaults to 0):
|
| 179 |
+
Embedding dropout used in the lyric decoder.
|
| 180 |
+
encoder_config (`JukeboxPriorConfig`, *optional*) :
|
| 181 |
+
Configuration of the encoder which models the prior on the lyrics.
|
| 182 |
+
encoder_loss_fraction (`float`, *optional*, defaults to 0.4):
|
| 183 |
+
Multiplication factor used in front of the lyric encoder loss.
|
| 184 |
+
hidden_size (`int`, *optional*, defaults to 2048):
|
| 185 |
+
Hidden dimension of the attention layers.
|
| 186 |
+
init_scale (`float`, *optional*, defaults to 0.2):
|
| 187 |
+
Initialization scales for the prior modules.
|
| 188 |
+
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
|
| 189 |
+
Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is
|
| 190 |
+
greater than 0, the `encoder` args should be specified for the lyric encoding.
|
| 191 |
+
mask (`bool`, *optional*, defaults to `False`):
|
| 192 |
+
Whether or not to mask the previous positions in the attention.
|
| 193 |
+
max_duration (`int`, *optional*, defaults to 600):
|
| 194 |
+
Maximum supported duration of the generated song in seconds.
|
| 195 |
+
max_nb_genres (`int`, *optional*, defaults to 1):
|
| 196 |
+
Maximum number of genres that can be used to condition the model.
|
| 197 |
+
merged_decoder (`bool`, *optional*, defaults to `True`):
|
| 198 |
+
Whether or not the decoder and the encoder inputs are merged. This is used for the separated
|
| 199 |
+
encoder-decoder architecture
|
| 200 |
+
metadata_conditioning (`bool`, *optional*, defaults to `True)`:
|
| 201 |
+
Whether or not to condition on the artist and genre metadata.
|
| 202 |
+
metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`):
|
| 203 |
+
Number of genres and the number of artists that were used to train the embedding layers of the prior
|
| 204 |
+
models.
|
| 205 |
+
min_duration (`int`, *optional*, defaults to 0):
|
| 206 |
+
Minimum duration of the generated audio on which the model was trained.
|
| 207 |
+
mlp_multiplier (`float`, *optional*, defaults to 1.0):
|
| 208 |
+
Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of
|
| 209 |
+
the model will be used.
|
| 210 |
+
music_vocab_size (`int`, *optional*, defaults to 2048):
|
| 211 |
+
Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`.
|
| 212 |
+
n_ctx (`int`, *optional*, defaults to 6144):
|
| 213 |
+
Number of context tokens for each prior. The context tokens are the music tokens that are attended to when
|
| 214 |
+
generating music tokens.
|
| 215 |
+
n_heads (`int`, *optional*, defaults to 2):
|
| 216 |
+
Number of attention heads.
|
| 217 |
+
nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384):
|
| 218 |
+
Number of lyric tokens that are used when sampling a single window of length `n_ctx`
|
| 219 |
+
res_conv_depth (`int`, *optional*, defaults to 3):
|
| 220 |
+
Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
|
| 221 |
+
`JukeboxMusicTokenConditioner`.
|
| 222 |
+
res_conv_width (`int`, *optional*, defaults to 128):
|
| 223 |
+
Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
|
| 224 |
+
`JukeboxMusicTokenConditioner`.
|
| 225 |
+
res_convolution_multiplier (`int`, *optional*, defaults to 1):
|
| 226 |
+
Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`.
|
| 227 |
+
res_dilation_cycle (`int`, *optional*):
|
| 228 |
+
Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the
|
| 229 |
+
corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level
|
| 230 |
+
tokens.
|
| 231 |
+
res_dilation_growth_rate (`int`, *optional*, defaults to 1):
|
| 232 |
+
Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner`
|
| 233 |
+
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
|
| 234 |
+
Downsampling rates used in the audio conditioning network
|
| 235 |
+
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
|
| 236 |
+
Striding used in the audio conditioning network
|
| 237 |
+
resid_dropout (`int`, *optional*, defaults to 0):
|
| 238 |
+
Residual dropout used in the attention pattern.
|
| 239 |
+
sampling_rate (`int`, *optional*, defaults to 44100):
|
| 240 |
+
Sampling rate used for training.
|
| 241 |
+
spread (`int`, *optional*):
|
| 242 |
+
Spread used in the `summary_spread_attention` pattern
|
| 243 |
+
timing_dims (`int`, *optional*, defaults to 64):
|
| 244 |
+
Dimension of the timing embedding.
|
| 245 |
+
zero_out (`bool`, *optional*, defaults to `False`):
|
| 246 |
+
Whether or not to zero out convolution weights when initializing.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
model_type = "jukebox_prior"
|
| 250 |
+
attribute_map = {
|
| 251 |
+
"max_position_embeddings": "n_positions",
|
| 252 |
+
"num_attention_heads": "n_head",
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
def __init__(
|
| 256 |
+
self,
|
| 257 |
+
act_fn="quick_gelu",
|
| 258 |
+
level=0,
|
| 259 |
+
alignment_head=2,
|
| 260 |
+
alignment_layer=68,
|
| 261 |
+
attention_multiplier=0.25,
|
| 262 |
+
attention_pattern="enc_dec_with_lyrics",
|
| 263 |
+
attn_dropout=0,
|
| 264 |
+
attn_res_scale=False,
|
| 265 |
+
blocks=64,
|
| 266 |
+
conv_res_scale=None,
|
| 267 |
+
num_layers=72,
|
| 268 |
+
emb_dropout=0,
|
| 269 |
+
encoder_config=None,
|
| 270 |
+
encoder_loss_fraction=0.4,
|
| 271 |
+
hidden_size=2048,
|
| 272 |
+
init_scale=0.2,
|
| 273 |
+
is_encoder_decoder=True,
|
| 274 |
+
lyric_vocab_size=80,
|
| 275 |
+
mask=False,
|
| 276 |
+
max_duration=600,
|
| 277 |
+
max_nb_genres=1,
|
| 278 |
+
merged_decoder=True,
|
| 279 |
+
metadata_conditioning=True,
|
| 280 |
+
metadata_dims=[604, 7898],
|
| 281 |
+
min_duration=0,
|
| 282 |
+
mlp_multiplier=1.0,
|
| 283 |
+
music_vocab_size=2048,
|
| 284 |
+
n_ctx=6144,
|
| 285 |
+
n_heads=2,
|
| 286 |
+
nb_relevant_lyric_tokens=384,
|
| 287 |
+
res_conv_depth=3,
|
| 288 |
+
res_conv_width=128,
|
| 289 |
+
res_convolution_multiplier=1,
|
| 290 |
+
res_dilation_cycle=None,
|
| 291 |
+
res_dilation_growth_rate=1,
|
| 292 |
+
res_downs_t=[3, 2, 2],
|
| 293 |
+
res_strides_t=[2, 2, 2],
|
| 294 |
+
resid_dropout=0,
|
| 295 |
+
sampling_rate=44100,
|
| 296 |
+
spread=None,
|
| 297 |
+
timing_dims=64,
|
| 298 |
+
zero_out=False,
|
| 299 |
+
**kwargs,
|
| 300 |
+
):
|
| 301 |
+
self.act_fn = act_fn
|
| 302 |
+
self.alignment_head = alignment_head
|
| 303 |
+
self.alignment_layer = alignment_layer
|
| 304 |
+
self.attention_multiplier = attention_multiplier
|
| 305 |
+
self.attention_pattern = attention_pattern
|
| 306 |
+
self.attn_dropout = attn_dropout
|
| 307 |
+
self.attn_res_scale = attn_res_scale
|
| 308 |
+
self.blocks = blocks
|
| 309 |
+
self.conv_res_scale = conv_res_scale
|
| 310 |
+
self.num_layers = num_layers
|
| 311 |
+
self.emb_dropout = emb_dropout
|
| 312 |
+
self.music_vocab_size = music_vocab_size
|
| 313 |
+
if encoder_config is not None:
|
| 314 |
+
self.encoder_config = JukeboxPriorConfig(**encoder_config)
|
| 315 |
+
else:
|
| 316 |
+
self.encoder_config = None
|
| 317 |
+
self.encoder_loss_fraction = encoder_loss_fraction
|
| 318 |
+
self.init_scale = init_scale
|
| 319 |
+
self.is_encoder_decoder = is_encoder_decoder
|
| 320 |
+
self.lyric_vocab_size = lyric_vocab_size
|
| 321 |
+
self.level = level
|
| 322 |
+
self.mask = mask
|
| 323 |
+
self.max_duration = max_duration
|
| 324 |
+
self.max_nb_genres = max_nb_genres
|
| 325 |
+
self.merged_decoder = merged_decoder
|
| 326 |
+
self.metadata_conditioning = metadata_conditioning
|
| 327 |
+
self.metadata_dims = metadata_dims
|
| 328 |
+
self.min_duration = min_duration
|
| 329 |
+
self.mlp_multiplier = mlp_multiplier
|
| 330 |
+
self.n_ctx = n_ctx
|
| 331 |
+
self.n_heads = n_heads
|
| 332 |
+
self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens
|
| 333 |
+
self.res_conv_depth = res_conv_depth
|
| 334 |
+
self.res_conv_width = res_conv_width
|
| 335 |
+
self.res_convolution_multiplier = res_convolution_multiplier
|
| 336 |
+
self.res_dilation_cycle = res_dilation_cycle
|
| 337 |
+
self.res_dilation_growth_rate = res_dilation_growth_rate
|
| 338 |
+
self.res_downs_t = res_downs_t
|
| 339 |
+
self.res_strides_t = res_strides_t
|
| 340 |
+
self.resid_dropout = resid_dropout
|
| 341 |
+
self.sampling_rate = sampling_rate
|
| 342 |
+
self.spread = spread
|
| 343 |
+
self.timing_dims = timing_dims
|
| 344 |
+
self.hidden_size = hidden_size
|
| 345 |
+
self.zero_out = zero_out
|
| 346 |
+
|
| 347 |
+
@classmethod
|
| 348 |
+
def from_pretrained(
|
| 349 |
+
cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs
|
| 350 |
+
) -> "PretrainedConfig":
|
| 351 |
+
cls._set_token_in_kwargs(kwargs)
|
| 352 |
+
|
| 353 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 354 |
+
|
| 355 |
+
# get the prior config dict if we are loading from JukeboxConfig
|
| 356 |
+
if config_dict.get("model_type") == "jukebox":
|
| 357 |
+
config_dict = config_dict[f"prior_{level}"]
|
| 358 |
+
|
| 359 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 360 |
+
logger.warning(
|
| 361 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 362 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
class JukeboxVQVAEConfig(PretrainedConfig):
|
| 369 |
+
"""
|
| 370 |
+
This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a
|
| 371 |
+
`JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 372 |
+
with the defaults will yield a similar configuration to that of the VQVAE from
|
| 373 |
+
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
|
| 374 |
+
|
| 375 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 376 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 377 |
+
|
| 378 |
+
Args:
|
| 379 |
+
act_fn (`str`, *optional*, defaults to `"relu"`):
|
| 380 |
+
Activation function of the model.
|
| 381 |
+
nb_discrete_codes (`int`, *optional*, defaults to 2048):
|
| 382 |
+
Number of codes of the VQVAE.
|
| 383 |
+
commit (`float`, *optional*, defaults to 0.02):
|
| 384 |
+
Commit loss multiplier.
|
| 385 |
+
conv_input_shape (`int`, *optional*, defaults to 1):
|
| 386 |
+
Number of audio channels.
|
| 387 |
+
conv_res_scale (`bool`, *optional*, defaults to `False`):
|
| 388 |
+
Whether or not to scale the residuals of the `JukeboxResConv1DBlock`.
|
| 389 |
+
embed_dim (`int`, *optional*, defaults to 64):
|
| 390 |
+
Embedding dimension of the codebook vectors.
|
| 391 |
+
hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`):
|
| 392 |
+
Fraction of non-intersecting window used when continuing the sampling process.
|
| 393 |
+
levels (`int`, *optional*, defaults to 3):
|
| 394 |
+
Number of hierarchical levels that used in the VQVAE.
|
| 395 |
+
lmu (`float`, *optional*, defaults to 0.99):
|
| 396 |
+
Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1
|
| 397 |
+
of the original [VQVAE paper](https://arxiv.org/pdf/1711.00937v2.pdf)
|
| 398 |
+
multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
|
| 399 |
+
Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth`
|
| 400 |
+
res_conv_depth (`int`, *optional*, defaults to 4):
|
| 401 |
+
Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
|
| 402 |
+
res_conv_width (`int`, *optional*, defaults to 32):
|
| 403 |
+
Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
|
| 404 |
+
res_convolution_multiplier (`int`, *optional*, defaults to 1):
|
| 405 |
+
Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`.
|
| 406 |
+
res_dilation_cycle (`int`, *optional*):
|
| 407 |
+
Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth
|
| 408 |
+
reduced by a power of `res_dilation_cycle`.
|
| 409 |
+
res_dilation_growth_rate (`int`, *optional*, defaults to 3):
|
| 410 |
+
Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth)
|
| 411 |
+
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
|
| 412 |
+
Downsampling rate for each level of the hierarchical VQ-VAE.
|
| 413 |
+
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
|
| 414 |
+
Stride used for each level of the hierarchical VQ-VAE.
|
| 415 |
+
sample_length (`int`, *optional*, defaults to 1058304):
|
| 416 |
+
Provides the max input shape of the VQVAE. Is used to compute the input shape of each level.
|
| 417 |
+
init_scale (`float`, *optional*, defaults to 0.2):
|
| 418 |
+
Initialization scale.
|
| 419 |
+
zero_out (`bool`, *optional*, defaults to `False`):
|
| 420 |
+
Whether or not to zero out convolution weights when initializing.
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
model_type = "jukebox_vqvae"
|
| 424 |
+
|
| 425 |
+
def __init__(
|
| 426 |
+
self,
|
| 427 |
+
act_fn="relu",
|
| 428 |
+
nb_discrete_codes=2048,
|
| 429 |
+
commit=0.02,
|
| 430 |
+
conv_input_shape=1,
|
| 431 |
+
conv_res_scale=False,
|
| 432 |
+
embed_dim=64,
|
| 433 |
+
hop_fraction=[0.125, 0.5, 0.5],
|
| 434 |
+
levels=3,
|
| 435 |
+
lmu=0.99,
|
| 436 |
+
multipliers=[2, 1, 1],
|
| 437 |
+
res_conv_depth=4,
|
| 438 |
+
res_conv_width=32,
|
| 439 |
+
res_convolution_multiplier=1,
|
| 440 |
+
res_dilation_cycle=None,
|
| 441 |
+
res_dilation_growth_rate=3,
|
| 442 |
+
res_downs_t=[3, 2, 2],
|
| 443 |
+
res_strides_t=[2, 2, 2],
|
| 444 |
+
sample_length=1058304,
|
| 445 |
+
init_scale=0.2,
|
| 446 |
+
zero_out=False,
|
| 447 |
+
**kwargs,
|
| 448 |
+
):
|
| 449 |
+
self.hop_fraction = hop_fraction
|
| 450 |
+
self.conv_input_shape = conv_input_shape
|
| 451 |
+
self.sample_length = sample_length
|
| 452 |
+
|
| 453 |
+
# VQVAE parameters (all used)
|
| 454 |
+
self.levels = levels
|
| 455 |
+
self.embed_dim = embed_dim
|
| 456 |
+
self.nb_discrete_codes = nb_discrete_codes
|
| 457 |
+
self.res_conv_width = res_conv_width
|
| 458 |
+
self.res_conv_depth = res_conv_depth
|
| 459 |
+
self.res_convolution_multiplier = res_convolution_multiplier
|
| 460 |
+
self.res_dilation_growth_rate = res_dilation_growth_rate
|
| 461 |
+
self.res_dilation_cycle = res_dilation_cycle
|
| 462 |
+
self.multipliers = multipliers
|
| 463 |
+
self.res_downs_t = res_downs_t
|
| 464 |
+
self.res_strides_t = res_strides_t
|
| 465 |
+
self.lmu = lmu
|
| 466 |
+
self.commit = commit
|
| 467 |
+
self.conv_res_scale = conv_res_scale
|
| 468 |
+
self.act_fn = act_fn
|
| 469 |
+
self.init_scale = init_scale
|
| 470 |
+
self.zero_out = zero_out
|
| 471 |
+
|
| 472 |
+
@classmethod
|
| 473 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 474 |
+
cls._set_token_in_kwargs(kwargs)
|
| 475 |
+
|
| 476 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 477 |
+
|
| 478 |
+
# get the text config dict if we are loading from CLIPConfig
|
| 479 |
+
if config_dict.get("model_type") == "jukebox":
|
| 480 |
+
config_dict = config_dict["vqvae_config"]
|
| 481 |
+
|
| 482 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 483 |
+
logger.warning(
|
| 484 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 485 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
class JukeboxConfig(PretrainedConfig):
|
| 492 |
+
"""
|
| 493 |
+
This is the configuration class to store the configuration of a [`JukeboxModel`].
|
| 494 |
+
|
| 495 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 496 |
+
documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will
|
| 497 |
+
yield a similar configuration to that of
|
| 498 |
+
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling =
|
| 502 |
+
(5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256
|
| 503 |
+
to get the second level codes. This is mostly true for training the top level prior and the upsamplers.
|
| 504 |
+
|
| 505 |
+
Args:
|
| 506 |
+
vqvae_config (`JukeboxVQVAEConfig`, *optional*):
|
| 507 |
+
Configuration for the `JukeboxVQVAE` model.
|
| 508 |
+
prior_config_list (`List[JukeboxPriorConfig]`, *optional*):
|
| 509 |
+
List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors.
|
| 510 |
+
nb_priors (`int`, *optional*, defaults to 3):
|
| 511 |
+
Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive
|
| 512 |
+
(decoder) model, apart from the top prior, which can include a lyric encoder. The available models were
|
| 513 |
+
trained using a top prior and 2 upsampler priors.
|
| 514 |
+
sampling_rate (`int`, *optional*, defaults to 44100):
|
| 515 |
+
Sampling rate of the raw audio.
|
| 516 |
+
timing_dims (`int`, *optional*, defaults to 64):
|
| 517 |
+
Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding
|
| 518 |
+
layer. The timing embedding layer converts the absolute and relative position in the currently sampled
|
| 519 |
+
audio to a tensor of length `timing_dims` that will be added to the music tokens.
|
| 520 |
+
min_duration (`int`, *optional*, defaults to 0):
|
| 521 |
+
Minimum duration of the audios to generate
|
| 522 |
+
max_duration (`float`, *optional*, defaults to 600.0):
|
| 523 |
+
Maximum duration of the audios to generate
|
| 524 |
+
max_nb_genres (`int`, *optional*, defaults to 5):
|
| 525 |
+
Maximum number of genres that can be used to condition a single sample.
|
| 526 |
+
metadata_conditioning (`bool`, *optional*, defaults to `True`):
|
| 527 |
+
Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum
|
| 528 |
+
duration.
|
| 529 |
+
|
| 530 |
+
Example:
|
| 531 |
+
|
| 532 |
+
```python
|
| 533 |
+
>>> from transformers import JukeboxModel, JukeboxConfig
|
| 534 |
+
|
| 535 |
+
>>> # Initializing a Jukebox configuration
|
| 536 |
+
>>> configuration = JukeboxConfig()
|
| 537 |
+
|
| 538 |
+
>>> # Initializing a model from the configuration
|
| 539 |
+
>>> model = JukeboxModel(configuration)
|
| 540 |
+
|
| 541 |
+
>>> # Accessing the model configuration
|
| 542 |
+
>>> configuration = model.config
|
| 543 |
+
```
|
| 544 |
+
"""
|
| 545 |
+
|
| 546 |
+
model_type = "jukebox"
|
| 547 |
+
|
| 548 |
+
def __init__(
|
| 549 |
+
self,
|
| 550 |
+
vqvae_config=None,
|
| 551 |
+
prior_config_list=None,
|
| 552 |
+
nb_priors=3,
|
| 553 |
+
sampling_rate=44100,
|
| 554 |
+
timing_dims=64,
|
| 555 |
+
min_duration=0,
|
| 556 |
+
max_duration=600.0,
|
| 557 |
+
max_nb_genres=5,
|
| 558 |
+
metadata_conditioning=True,
|
| 559 |
+
**kwargs,
|
| 560 |
+
):
|
| 561 |
+
if vqvae_config is None:
|
| 562 |
+
vqvae_config = {}
|
| 563 |
+
logger.info("vqvae_config is None. initializing the JukeboxVQVAE with default values.")
|
| 564 |
+
|
| 565 |
+
self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config)
|
| 566 |
+
if prior_config_list is not None:
|
| 567 |
+
self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list]
|
| 568 |
+
else:
|
| 569 |
+
self.prior_configs = []
|
| 570 |
+
for prior_idx in range(nb_priors):
|
| 571 |
+
prior_config = kwargs.pop(f"prior_{prior_idx}", None)
|
| 572 |
+
if prior_config is None:
|
| 573 |
+
prior_config = {}
|
| 574 |
+
logger.info(
|
| 575 |
+
f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default"
|
| 576 |
+
" values."
|
| 577 |
+
)
|
| 578 |
+
self.prior_configs.append(JukeboxPriorConfig(**prior_config))
|
| 579 |
+
|
| 580 |
+
self.hop_fraction = self.vqvae_config.hop_fraction
|
| 581 |
+
|
| 582 |
+
self.nb_priors = nb_priors
|
| 583 |
+
|
| 584 |
+
# Metadata conditioning
|
| 585 |
+
self.max_nb_genres = max_nb_genres
|
| 586 |
+
self.sampling_rate = sampling_rate
|
| 587 |
+
self.timing_dims = timing_dims
|
| 588 |
+
self.min_duration = min_duration
|
| 589 |
+
self.max_duration = max_duration
|
| 590 |
+
self.metadata_conditioning = metadata_conditioning
|
| 591 |
+
|
| 592 |
+
super().__init__(**kwargs)
|
| 593 |
+
|
| 594 |
+
@classmethod
|
| 595 |
+
def from_configs(cls, prior_configs: List[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs):
|
| 596 |
+
r"""
|
| 597 |
+
Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model
|
| 598 |
+
configuration.
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
[`JukeboxConfig`]: An instance of a configuration object
|
| 602 |
+
"""
|
| 603 |
+
prior_config_list = [config.to_dict() for config in prior_configs]
|
| 604 |
+
return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs)
|
| 605 |
+
|
| 606 |
+
def to_dict(self):
|
| 607 |
+
# Override the default to_dict to apply to_dict to the list of prior configs.
|
| 608 |
+
result = super().to_dict()
|
| 609 |
+
result["prior_config_list"] = [config.to_dict() for config in result.pop("prior_configs")]
|
| 610 |
+
return result
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/modeling_jukebox.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/jukebox/tokenization_jukebox.py
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The Open AI Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for OpenAI Jukebox."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
import re
|
| 20 |
+
import unicodedata
|
| 21 |
+
from json.encoder import INFINITY
|
| 22 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 23 |
+
|
| 24 |
+
import numpy as np
|
| 25 |
+
import regex
|
| 26 |
+
|
| 27 |
+
from ....tokenization_utils import AddedToken, PreTrainedTokenizer
|
| 28 |
+
from ....tokenization_utils_base import BatchEncoding
|
| 29 |
+
from ....utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
|
| 30 |
+
from ....utils.generic import _is_jax, _is_numpy
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
logger = logging.get_logger(__name__)
|
| 34 |
+
|
| 35 |
+
VOCAB_FILES_NAMES = {
|
| 36 |
+
"artists_file": "artists.json",
|
| 37 |
+
"lyrics_file": "lyrics.json",
|
| 38 |
+
"genres_file": "genres.json",
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class JukeboxTokenizer(PreTrainedTokenizer):
|
| 43 |
+
"""
|
| 44 |
+
Constructs a Jukebox tokenizer. Jukebox can be conditioned on 3 different inputs :
|
| 45 |
+
- Artists, unique ids are associated to each artist from the provided dictionary.
|
| 46 |
+
- Genres, unique ids are associated to each genre from the provided dictionary.
|
| 47 |
+
- Lyrics, character based tokenization. Must be initialized with the list of characters that are inside the
|
| 48 |
+
vocabulary.
|
| 49 |
+
|
| 50 |
+
This tokenizer does not require training. It should be able to process a different number of inputs:
|
| 51 |
+
as the conditioning of the model can be done on the three different queries. If None is provided, defaults values will be used.:
|
| 52 |
+
|
| 53 |
+
Depending on the number of genres on which the model should be conditioned (`n_genres`).
|
| 54 |
+
```python
|
| 55 |
+
>>> from transformers import JukeboxTokenizer
|
| 56 |
+
|
| 57 |
+
>>> tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
|
| 58 |
+
>>> tokenizer("Alan Jackson", "Country Rock", "old town road")["input_ids"]
|
| 59 |
+
[tensor([[ 0, 0, 0, 6785, 546, 41, 38, 30, 76, 46, 41, 49,
|
| 60 |
+
40, 76, 44, 41, 27, 30]]), tensor([[ 0, 0, 0, 145, 0]]), tensor([[ 0, 0, 0, 145, 0]])]
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
|
| 64 |
+
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
|
| 65 |
+
|
| 66 |
+
<Tip>
|
| 67 |
+
|
| 68 |
+
If nothing is provided, the genres and the artist will either be selected randomly or set to None
|
| 69 |
+
|
| 70 |
+
</Tip>
|
| 71 |
+
|
| 72 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to:
|
| 73 |
+
this superclass for more information regarding those methods.
|
| 74 |
+
|
| 75 |
+
However the code does not allow that and only supports composing from various genres.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
artists_file (`str`):
|
| 79 |
+
Path to the vocabulary file which contains a mapping between artists and ids. The default file supports
|
| 80 |
+
both "v2" and "v3"
|
| 81 |
+
genres_file (`str`):
|
| 82 |
+
Path to the vocabulary file which contain a mapping between genres and ids.
|
| 83 |
+
lyrics_file (`str`):
|
| 84 |
+
Path to the vocabulary file which contains the accepted characters for the lyrics tokenization.
|
| 85 |
+
version (`List[str]`, `optional`, default to `["v3", "v2", "v2"]`) :
|
| 86 |
+
List of the tokenizer versions. The `5b-lyrics`'s top level prior model was trained using `v3` instead of
|
| 87 |
+
`v2`.
|
| 88 |
+
n_genres (`int`, `optional`, defaults to 1):
|
| 89 |
+
Maximum number of genres to use for composition.
|
| 90 |
+
max_n_lyric_tokens (`int`, `optional`, defaults to 512):
|
| 91 |
+
Maximum number of lyric tokens to keep.
|
| 92 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 93 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 94 |
+
token instead.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 98 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
artists_file,
|
| 103 |
+
genres_file,
|
| 104 |
+
lyrics_file,
|
| 105 |
+
version=["v3", "v2", "v2"],
|
| 106 |
+
max_n_lyric_tokens=512,
|
| 107 |
+
n_genres=5,
|
| 108 |
+
unk_token="<|endoftext|>",
|
| 109 |
+
**kwargs,
|
| 110 |
+
):
|
| 111 |
+
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
|
| 112 |
+
self.version = version
|
| 113 |
+
self.max_n_lyric_tokens = max_n_lyric_tokens
|
| 114 |
+
self.n_genres = n_genres
|
| 115 |
+
self._added_tokens_decoder = {0: unk_token}
|
| 116 |
+
|
| 117 |
+
with open(artists_file, encoding="utf-8") as vocab_handle:
|
| 118 |
+
self.artists_encoder = json.load(vocab_handle)
|
| 119 |
+
|
| 120 |
+
with open(genres_file, encoding="utf-8") as vocab_handle:
|
| 121 |
+
self.genres_encoder = json.load(vocab_handle)
|
| 122 |
+
|
| 123 |
+
with open(lyrics_file, encoding="utf-8") as vocab_handle:
|
| 124 |
+
self.lyrics_encoder = json.load(vocab_handle)
|
| 125 |
+
|
| 126 |
+
oov = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
|
| 127 |
+
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
|
| 128 |
+
if len(self.lyrics_encoder) == 79:
|
| 129 |
+
oov = oov.replace(r"\-'", r"\-+'")
|
| 130 |
+
|
| 131 |
+
self.out_of_vocab = regex.compile(oov)
|
| 132 |
+
self.artists_decoder = {v: k for k, v in self.artists_encoder.items()}
|
| 133 |
+
self.genres_decoder = {v: k for k, v in self.genres_encoder.items()}
|
| 134 |
+
self.lyrics_decoder = {v: k for k, v in self.lyrics_encoder.items()}
|
| 135 |
+
super().__init__(
|
| 136 |
+
unk_token=unk_token,
|
| 137 |
+
n_genres=n_genres,
|
| 138 |
+
version=version,
|
| 139 |
+
max_n_lyric_tokens=max_n_lyric_tokens,
|
| 140 |
+
**kwargs,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def vocab_size(self):
|
| 145 |
+
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
|
| 146 |
+
|
| 147 |
+
def get_vocab(self):
|
| 148 |
+
return {
|
| 149 |
+
"artists_encoder": self.artists_encoder,
|
| 150 |
+
"genres_encoder": self.genres_encoder,
|
| 151 |
+
"lyrics_encoder": self.lyrics_encoder,
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
def _convert_token_to_id(self, list_artists, list_genres, list_lyrics):
|
| 155 |
+
"""Converts the artist, genre and lyrics tokens to their index using the vocabulary.
|
| 156 |
+
The total_length, offset and duration have to be provided in order to select relevant lyrics and add padding to
|
| 157 |
+
the lyrics token sequence.
|
| 158 |
+
"""
|
| 159 |
+
artists_id = [self.artists_encoder.get(artist, 0) for artist in list_artists]
|
| 160 |
+
for genres in range(len(list_genres)):
|
| 161 |
+
list_genres[genres] = [self.genres_encoder.get(genre, 0) for genre in list_genres[genres]]
|
| 162 |
+
list_genres[genres] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
|
| 163 |
+
|
| 164 |
+
lyric_ids = [[self.lyrics_encoder.get(character, 0) for character in list_lyrics[0]], [], []]
|
| 165 |
+
return artists_id, list_genres, lyric_ids
|
| 166 |
+
|
| 167 |
+
def _tokenize(self, lyrics):
|
| 168 |
+
"""
|
| 169 |
+
Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
| 170 |
+
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
| 171 |
+
|
| 172 |
+
Do NOT take care of added tokens. Only the lyrics are split into character for the character-based vocabulary.
|
| 173 |
+
"""
|
| 174 |
+
# only lyrics are not tokenized, but character based is easily handled
|
| 175 |
+
return list(lyrics)
|
| 176 |
+
|
| 177 |
+
def tokenize(self, artist, genre, lyrics, **kwargs):
|
| 178 |
+
"""
|
| 179 |
+
Converts three strings in a 3 sequence of tokens using the tokenizer
|
| 180 |
+
"""
|
| 181 |
+
artist, genre, lyrics = self.prepare_for_tokenization(artist, genre, lyrics)
|
| 182 |
+
lyrics = self._tokenize(lyrics)
|
| 183 |
+
return artist, genre, lyrics
|
| 184 |
+
|
| 185 |
+
def prepare_for_tokenization(
|
| 186 |
+
self, artists: str, genres: str, lyrics: str, is_split_into_words: bool = False
|
| 187 |
+
) -> Tuple[str, str, str, Dict[str, Any]]:
|
| 188 |
+
"""
|
| 189 |
+
Performs any necessary transformations before tokenization.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
artist (`str`):
|
| 193 |
+
The artist name to prepare. This will mostly lower the string
|
| 194 |
+
genres (`str`):
|
| 195 |
+
The genre name to prepare. This will mostly lower the string.
|
| 196 |
+
lyrics (`str`):
|
| 197 |
+
The lyrics to prepare.
|
| 198 |
+
is_split_into_words (`bool`, *optional*, defaults to `False`):
|
| 199 |
+
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
|
| 200 |
+
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
|
| 201 |
+
which it will tokenize. This is useful for NER or token classification.
|
| 202 |
+
"""
|
| 203 |
+
for idx in range(len(self.version)):
|
| 204 |
+
if self.version[idx] == "v3":
|
| 205 |
+
artists[idx] = artists[idx].lower()
|
| 206 |
+
genres[idx] = [genres[idx].lower()]
|
| 207 |
+
else:
|
| 208 |
+
artists[idx] = self._normalize(artists[idx]) + ".v2"
|
| 209 |
+
genres[idx] = [
|
| 210 |
+
self._normalize(genre) + ".v2" for genre in genres[idx].split("_")
|
| 211 |
+
] # split is for the full dictionary with combined genres
|
| 212 |
+
|
| 213 |
+
if self.version[0] == "v2":
|
| 214 |
+
self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+")
|
| 215 |
+
vocab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
|
| 216 |
+
self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))}
|
| 217 |
+
self.vocab["<unk>"] = 0
|
| 218 |
+
self.n_vocab = len(vocab) + 1
|
| 219 |
+
self.lyrics_encoder = self.vocab
|
| 220 |
+
self.lyrics_decoder = {v: k for k, v in self.vocab.items()}
|
| 221 |
+
self.lyrics_decoder[0] = ""
|
| 222 |
+
else:
|
| 223 |
+
self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+")
|
| 224 |
+
|
| 225 |
+
lyrics = self._run_strip_accents(lyrics)
|
| 226 |
+
lyrics = lyrics.replace("\\", "\n")
|
| 227 |
+
lyrics = self.out_of_vocab.sub("", lyrics), [], []
|
| 228 |
+
return artists, genres, lyrics
|
| 229 |
+
|
| 230 |
+
def _run_strip_accents(self, text):
|
| 231 |
+
"""Strips accents from a piece of text."""
|
| 232 |
+
text = unicodedata.normalize("NFD", text)
|
| 233 |
+
output = []
|
| 234 |
+
for char in text:
|
| 235 |
+
cat = unicodedata.category(char)
|
| 236 |
+
if cat == "Mn":
|
| 237 |
+
continue
|
| 238 |
+
output.append(char)
|
| 239 |
+
return "".join(output)
|
| 240 |
+
|
| 241 |
+
def _normalize(self, text: str) -> str:
|
| 242 |
+
"""
|
| 243 |
+
Normalizes the input text. This process is for the genres and the artist
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
text (`str`):
|
| 247 |
+
Artist or Genre string to normalize
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
accepted = (
|
| 251 |
+
[chr(i) for i in range(ord("a"), ord("z") + 1)]
|
| 252 |
+
+ [chr(i) for i in range(ord("A"), ord("Z") + 1)]
|
| 253 |
+
+ [chr(i) for i in range(ord("0"), ord("9") + 1)]
|
| 254 |
+
+ ["."]
|
| 255 |
+
)
|
| 256 |
+
accepted = frozenset(accepted)
|
| 257 |
+
pattern = re.compile(r"_+")
|
| 258 |
+
text = "".join([c if c in accepted else "_" for c in text.lower()])
|
| 259 |
+
text = pattern.sub("_", text).strip("_")
|
| 260 |
+
return text
|
| 261 |
+
|
| 262 |
+
def convert_lyric_tokens_to_string(self, lyrics: List[str]) -> str:
|
| 263 |
+
return " ".join(lyrics)
|
| 264 |
+
|
| 265 |
+
def convert_to_tensors(
|
| 266 |
+
self, inputs, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
|
| 267 |
+
):
|
| 268 |
+
"""
|
| 269 |
+
Convert the inner content to tensors.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
tensor_type (`str` or [`~utils.TensorType`], *optional*):
|
| 273 |
+
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
|
| 274 |
+
unset, no modification is done.
|
| 275 |
+
prepend_batch_axis (`int`, *optional*, defaults to `False`):
|
| 276 |
+
Whether or not to add the batch dimension during the conversion.
|
| 277 |
+
"""
|
| 278 |
+
# Convert to TensorType
|
| 279 |
+
if not isinstance(tensor_type, TensorType):
|
| 280 |
+
tensor_type = TensorType(tensor_type)
|
| 281 |
+
|
| 282 |
+
# Get a function reference for the correct framework
|
| 283 |
+
if tensor_type == TensorType.TENSORFLOW:
|
| 284 |
+
if not is_tf_available():
|
| 285 |
+
raise ImportError(
|
| 286 |
+
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
|
| 287 |
+
)
|
| 288 |
+
import tensorflow as tf
|
| 289 |
+
|
| 290 |
+
as_tensor = tf.constant
|
| 291 |
+
is_tensor = tf.is_tensor
|
| 292 |
+
elif tensor_type == TensorType.PYTORCH:
|
| 293 |
+
if not is_torch_available():
|
| 294 |
+
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
|
| 295 |
+
import torch
|
| 296 |
+
|
| 297 |
+
as_tensor = torch.tensor
|
| 298 |
+
is_tensor = torch.is_tensor
|
| 299 |
+
elif tensor_type == TensorType.JAX:
|
| 300 |
+
if not is_flax_available():
|
| 301 |
+
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
|
| 302 |
+
import jax.numpy as jnp # noqa: F811
|
| 303 |
+
|
| 304 |
+
as_tensor = jnp.array
|
| 305 |
+
is_tensor = _is_jax
|
| 306 |
+
else:
|
| 307 |
+
as_tensor = np.asarray
|
| 308 |
+
is_tensor = _is_numpy
|
| 309 |
+
|
| 310 |
+
# Do the tensor conversion in batch
|
| 311 |
+
|
| 312 |
+
try:
|
| 313 |
+
if prepend_batch_axis:
|
| 314 |
+
inputs = [inputs]
|
| 315 |
+
|
| 316 |
+
if not is_tensor(inputs):
|
| 317 |
+
inputs = as_tensor(inputs)
|
| 318 |
+
except: # noqa E722
|
| 319 |
+
raise ValueError(
|
| 320 |
+
"Unable to create tensor, you should probably activate truncation and/or padding "
|
| 321 |
+
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
return inputs
|
| 325 |
+
|
| 326 |
+
def __call__(self, artist, genres, lyrics="", return_tensors="pt") -> BatchEncoding:
|
| 327 |
+
"""Convert the raw string to a list of token ids
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
artist (`str`):
|
| 331 |
+
Name of the artist.
|
| 332 |
+
genres (`str`):
|
| 333 |
+
List of genres that will be mixed to condition the audio
|
| 334 |
+
lyrics (`str`, *optional*, defaults to `""`):
|
| 335 |
+
Lyrics used to condition the generation
|
| 336 |
+
"""
|
| 337 |
+
input_ids = [0, 0, 0]
|
| 338 |
+
artist = [artist] * len(self.version)
|
| 339 |
+
genres = [genres] * len(self.version)
|
| 340 |
+
|
| 341 |
+
artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics)
|
| 342 |
+
artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens)
|
| 343 |
+
|
| 344 |
+
attention_masks = [-INFINITY] * len(full_tokens[-1])
|
| 345 |
+
input_ids = [
|
| 346 |
+
self.convert_to_tensors(
|
| 347 |
+
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors
|
| 348 |
+
)
|
| 349 |
+
for i in range(len(self.version))
|
| 350 |
+
]
|
| 351 |
+
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks})
|
| 352 |
+
|
| 353 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 354 |
+
"""
|
| 355 |
+
Saves the tokenizer's vocabulary dictionary to the provided save_directory.
|
| 356 |
+
|
| 357 |
+
Args:
|
| 358 |
+
save_directory (`str`):
|
| 359 |
+
A path to the directory where to saved. It will be created if it doesn't exist.
|
| 360 |
+
|
| 361 |
+
filename_prefix (`Optional[str]`, *optional*):
|
| 362 |
+
A prefix to add to the names of the files saved by the tokenizer.
|
| 363 |
+
|
| 364 |
+
"""
|
| 365 |
+
if not os.path.isdir(save_directory):
|
| 366 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
artists_file = os.path.join(
|
| 370 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"]
|
| 371 |
+
)
|
| 372 |
+
with open(artists_file, "w", encoding="utf-8") as f:
|
| 373 |
+
f.write(json.dumps(self.artists_encoder, ensure_ascii=False))
|
| 374 |
+
|
| 375 |
+
genres_file = os.path.join(
|
| 376 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"]
|
| 377 |
+
)
|
| 378 |
+
with open(genres_file, "w", encoding="utf-8") as f:
|
| 379 |
+
f.write(json.dumps(self.genres_encoder, ensure_ascii=False))
|
| 380 |
+
|
| 381 |
+
lyrics_file = os.path.join(
|
| 382 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"]
|
| 383 |
+
)
|
| 384 |
+
with open(lyrics_file, "w", encoding="utf-8") as f:
|
| 385 |
+
f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False))
|
| 386 |
+
|
| 387 |
+
return (artists_file, genres_file, lyrics_file)
|
| 388 |
+
|
| 389 |
+
def _convert_id_to_token(self, artists_index, genres_index, lyric_index):
|
| 390 |
+
"""
|
| 391 |
+
Converts an index (integer) in a token (str) using the vocab.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
artists_index (`int`):
|
| 395 |
+
Index of the artist in its corresponding dictionary.
|
| 396 |
+
genres_index (`Union[List[int], int]`):
|
| 397 |
+
Index of the genre in its corresponding dictionary.
|
| 398 |
+
lyric_index (`List[int]`):
|
| 399 |
+
List of character indices, which each correspond to a character.
|
| 400 |
+
"""
|
| 401 |
+
artist = self.artists_decoder.get(artists_index)
|
| 402 |
+
genres = [self.genres_decoder.get(genre) for genre in genres_index]
|
| 403 |
+
lyrics = [self.lyrics_decoder.get(character) for character in lyric_index]
|
| 404 |
+
return artist, genres, lyrics
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__init__.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {"configuration_nat": ["NatConfig"]}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
if not is_torch_available():
|
| 24 |
+
raise OptionalDependencyNotAvailable()
|
| 25 |
+
except OptionalDependencyNotAvailable:
|
| 26 |
+
pass
|
| 27 |
+
else:
|
| 28 |
+
_import_structure["modeling_nat"] = [
|
| 29 |
+
"NatForImageClassification",
|
| 30 |
+
"NatModel",
|
| 31 |
+
"NatPreTrainedModel",
|
| 32 |
+
"NatBackbone",
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
if TYPE_CHECKING:
|
| 36 |
+
from .configuration_nat import NatConfig
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
if not is_torch_available():
|
| 40 |
+
raise OptionalDependencyNotAvailable()
|
| 41 |
+
except OptionalDependencyNotAvailable:
|
| 42 |
+
pass
|
| 43 |
+
else:
|
| 44 |
+
from .modeling_nat import (
|
| 45 |
+
NatBackbone,
|
| 46 |
+
NatForImageClassification,
|
| 47 |
+
NatModel,
|
| 48 |
+
NatPreTrainedModel,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
else:
|
| 52 |
+
import sys
|
| 53 |
+
|
| 54 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (835 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/configuration_nat.cpython-310.pyc
ADDED
|
Binary file (6.08 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/__pycache__/modeling_nat.cpython-310.pyc
ADDED
|
Binary file (32.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/configuration_nat.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Neighborhood Attention Transformer model configuration"""
|
| 16 |
+
|
| 17 |
+
from ....configuration_utils import PretrainedConfig
|
| 18 |
+
from ....utils import logging
|
| 19 |
+
from ....utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class NatConfig(BackboneConfigMixin, PretrainedConfig):
|
| 26 |
+
r"""
|
| 27 |
+
This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model
|
| 28 |
+
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 29 |
+
defaults will yield a similar configuration to that of the Nat
|
| 30 |
+
[shi-labs/nat-mini-in1k-224](https://huggingface.co/shi-labs/nat-mini-in1k-224) architecture.
|
| 31 |
+
|
| 32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 33 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
patch_size (`int`, *optional*, defaults to 4):
|
| 37 |
+
The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
|
| 38 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 39 |
+
The number of input channels.
|
| 40 |
+
embed_dim (`int`, *optional*, defaults to 64):
|
| 41 |
+
Dimensionality of patch embedding.
|
| 42 |
+
depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
|
| 43 |
+
Number of layers in each level of the encoder.
|
| 44 |
+
num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
|
| 45 |
+
Number of attention heads in each layer of the Transformer encoder.
|
| 46 |
+
kernel_size (`int`, *optional*, defaults to 7):
|
| 47 |
+
Neighborhood Attention kernel size.
|
| 48 |
+
mlp_ratio (`float`, *optional*, defaults to 3.0):
|
| 49 |
+
Ratio of MLP hidden dimensionality to embedding dimensionality.
|
| 50 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 51 |
+
Whether or not a learnable bias should be added to the queries, keys and values.
|
| 52 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 53 |
+
The dropout probability for all fully connected layers in the embeddings and encoder.
|
| 54 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 55 |
+
The dropout ratio for the attention probabilities.
|
| 56 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
| 57 |
+
Stochastic depth rate.
|
| 58 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 59 |
+
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
|
| 60 |
+
`"selu"` and `"gelu_new"` are supported.
|
| 61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 63 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 64 |
+
The epsilon used by the layer normalization layers.
|
| 65 |
+
layer_scale_init_value (`float`, *optional*, defaults to 0.0):
|
| 66 |
+
The initial value for the layer scale. Disabled if <=0.
|
| 67 |
+
out_features (`List[str]`, *optional*):
|
| 68 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
| 69 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
| 70 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
| 71 |
+
same order as defined in the `stage_names` attribute.
|
| 72 |
+
out_indices (`List[int]`, *optional*):
|
| 73 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
| 74 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
| 75 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
| 76 |
+
same order as defined in the `stage_names` attribute.
|
| 77 |
+
|
| 78 |
+
Example:
|
| 79 |
+
|
| 80 |
+
```python
|
| 81 |
+
>>> from transformers import NatConfig, NatModel
|
| 82 |
+
|
| 83 |
+
>>> # Initializing a Nat shi-labs/nat-mini-in1k-224 style configuration
|
| 84 |
+
>>> configuration = NatConfig()
|
| 85 |
+
|
| 86 |
+
>>> # Initializing a model (with random weights) from the shi-labs/nat-mini-in1k-224 style configuration
|
| 87 |
+
>>> model = NatModel(configuration)
|
| 88 |
+
|
| 89 |
+
>>> # Accessing the model configuration
|
| 90 |
+
>>> configuration = model.config
|
| 91 |
+
```"""
|
| 92 |
+
|
| 93 |
+
model_type = "nat"
|
| 94 |
+
|
| 95 |
+
attribute_map = {
|
| 96 |
+
"num_attention_heads": "num_heads",
|
| 97 |
+
"num_hidden_layers": "num_layers",
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
patch_size=4,
|
| 103 |
+
num_channels=3,
|
| 104 |
+
embed_dim=64,
|
| 105 |
+
depths=[3, 4, 6, 5],
|
| 106 |
+
num_heads=[2, 4, 8, 16],
|
| 107 |
+
kernel_size=7,
|
| 108 |
+
mlp_ratio=3.0,
|
| 109 |
+
qkv_bias=True,
|
| 110 |
+
hidden_dropout_prob=0.0,
|
| 111 |
+
attention_probs_dropout_prob=0.0,
|
| 112 |
+
drop_path_rate=0.1,
|
| 113 |
+
hidden_act="gelu",
|
| 114 |
+
initializer_range=0.02,
|
| 115 |
+
layer_norm_eps=1e-5,
|
| 116 |
+
layer_scale_init_value=0.0,
|
| 117 |
+
out_features=None,
|
| 118 |
+
out_indices=None,
|
| 119 |
+
**kwargs,
|
| 120 |
+
):
|
| 121 |
+
super().__init__(**kwargs)
|
| 122 |
+
|
| 123 |
+
self.patch_size = patch_size
|
| 124 |
+
self.num_channels = num_channels
|
| 125 |
+
self.embed_dim = embed_dim
|
| 126 |
+
self.depths = depths
|
| 127 |
+
self.num_layers = len(depths)
|
| 128 |
+
self.num_heads = num_heads
|
| 129 |
+
self.kernel_size = kernel_size
|
| 130 |
+
self.mlp_ratio = mlp_ratio
|
| 131 |
+
self.qkv_bias = qkv_bias
|
| 132 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 133 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 134 |
+
self.drop_path_rate = drop_path_rate
|
| 135 |
+
self.hidden_act = hidden_act
|
| 136 |
+
self.layer_norm_eps = layer_norm_eps
|
| 137 |
+
self.initializer_range = initializer_range
|
| 138 |
+
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
|
| 139 |
+
# this indicates the channel dimension after the last stage of the model
|
| 140 |
+
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
|
| 141 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 142 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
|
| 143 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 144 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 145 |
+
)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/nat/modeling_nat.py
ADDED
|
@@ -0,0 +1,950 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch Neighborhood Attention Transformer model."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 25 |
+
|
| 26 |
+
from ....activations import ACT2FN
|
| 27 |
+
from ....modeling_outputs import BackboneOutput
|
| 28 |
+
from ....modeling_utils import PreTrainedModel
|
| 29 |
+
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
| 30 |
+
from ....utils import (
|
| 31 |
+
ModelOutput,
|
| 32 |
+
OptionalDependencyNotAvailable,
|
| 33 |
+
add_code_sample_docstrings,
|
| 34 |
+
add_start_docstrings,
|
| 35 |
+
add_start_docstrings_to_model_forward,
|
| 36 |
+
is_natten_available,
|
| 37 |
+
logging,
|
| 38 |
+
replace_return_docstrings,
|
| 39 |
+
requires_backends,
|
| 40 |
+
)
|
| 41 |
+
from ....utils.backbone_utils import BackboneMixin
|
| 42 |
+
from .configuration_nat import NatConfig
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if is_natten_available():
|
| 46 |
+
from natten.functional import natten2dav, natten2dqkrpb
|
| 47 |
+
else:
|
| 48 |
+
|
| 49 |
+
def natten2dqkrpb(*args, **kwargs):
|
| 50 |
+
raise OptionalDependencyNotAvailable()
|
| 51 |
+
|
| 52 |
+
def natten2dav(*args, **kwargs):
|
| 53 |
+
raise OptionalDependencyNotAvailable()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__)
|
| 57 |
+
|
| 58 |
+
# General docstring
|
| 59 |
+
_CONFIG_FOR_DOC = "NatConfig"
|
| 60 |
+
|
| 61 |
+
# Base docstring
|
| 62 |
+
_CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224"
|
| 63 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512]
|
| 64 |
+
|
| 65 |
+
# Image classification docstring
|
| 66 |
+
_IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224"
|
| 67 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# drop_path and NatDropPath are from the timm library.
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@dataclass
|
| 74 |
+
class NatEncoderOutput(ModelOutput):
|
| 75 |
+
"""
|
| 76 |
+
Nat encoder's outputs, with potential hidden states and attentions.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 80 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 81 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 82 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 83 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 84 |
+
|
| 85 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 86 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 87 |
+
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
|
| 88 |
+
sequence_length)`.
|
| 89 |
+
|
| 90 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 91 |
+
heads.
|
| 92 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 93 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 94 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 95 |
+
|
| 96 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 97 |
+
include the spatial dimensions.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
last_hidden_state: torch.FloatTensor = None
|
| 101 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 102 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 103 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@dataclass
|
| 107 |
+
class NatModelOutput(ModelOutput):
|
| 108 |
+
"""
|
| 109 |
+
Nat model's outputs that also contains a pooling of the last hidden states.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 113 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 114 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
|
| 115 |
+
Average pooling of the last layer hidden-state.
|
| 116 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 117 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 118 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 119 |
+
|
| 120 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 121 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 122 |
+
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
|
| 123 |
+
sequence_length)`.
|
| 124 |
+
|
| 125 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 126 |
+
heads.
|
| 127 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 128 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 129 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 130 |
+
|
| 131 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 132 |
+
include the spatial dimensions.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
last_hidden_state: torch.FloatTensor = None
|
| 136 |
+
pooler_output: Optional[torch.FloatTensor] = None
|
| 137 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 138 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 139 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@dataclass
|
| 143 |
+
class NatImageClassifierOutput(ModelOutput):
|
| 144 |
+
"""
|
| 145 |
+
Nat outputs for image classification.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 149 |
+
Classification (or regression if config.num_labels==1) loss.
|
| 150 |
+
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
| 151 |
+
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
| 152 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 153 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 154 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 155 |
+
|
| 156 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 157 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 158 |
+
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
|
| 159 |
+
sequence_length)`.
|
| 160 |
+
|
| 161 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 162 |
+
heads.
|
| 163 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 164 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 165 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 166 |
+
|
| 167 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 168 |
+
include the spatial dimensions.
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
loss: Optional[torch.FloatTensor] = None
|
| 172 |
+
logits: torch.FloatTensor = None
|
| 173 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 174 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 175 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class NatEmbeddings(nn.Module):
|
| 179 |
+
"""
|
| 180 |
+
Construct the patch and position embeddings.
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def __init__(self, config):
|
| 184 |
+
super().__init__()
|
| 185 |
+
|
| 186 |
+
self.patch_embeddings = NatPatchEmbeddings(config)
|
| 187 |
+
|
| 188 |
+
self.norm = nn.LayerNorm(config.embed_dim)
|
| 189 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 190 |
+
|
| 191 |
+
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]:
|
| 192 |
+
embeddings = self.patch_embeddings(pixel_values)
|
| 193 |
+
embeddings = self.norm(embeddings)
|
| 194 |
+
|
| 195 |
+
embeddings = self.dropout(embeddings)
|
| 196 |
+
|
| 197 |
+
return embeddings
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class NatPatchEmbeddings(nn.Module):
|
| 201 |
+
"""
|
| 202 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
| 203 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
|
| 204 |
+
Transformer.
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
def __init__(self, config):
|
| 208 |
+
super().__init__()
|
| 209 |
+
patch_size = config.patch_size
|
| 210 |
+
num_channels, hidden_size = config.num_channels, config.embed_dim
|
| 211 |
+
self.num_channels = num_channels
|
| 212 |
+
|
| 213 |
+
if patch_size == 4:
|
| 214 |
+
pass
|
| 215 |
+
else:
|
| 216 |
+
# TODO: Support arbitrary patch sizes.
|
| 217 |
+
raise ValueError("Dinat only supports patch size of 4 at the moment.")
|
| 218 |
+
|
| 219 |
+
self.projection = nn.Sequential(
|
| 220 |
+
nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
|
| 221 |
+
nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
|
| 225 |
+
_, num_channels, height, width = pixel_values.shape
|
| 226 |
+
if num_channels != self.num_channels:
|
| 227 |
+
raise ValueError(
|
| 228 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 229 |
+
)
|
| 230 |
+
embeddings = self.projection(pixel_values)
|
| 231 |
+
embeddings = embeddings.permute(0, 2, 3, 1)
|
| 232 |
+
|
| 233 |
+
return embeddings
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
class NatDownsampler(nn.Module):
|
| 237 |
+
"""
|
| 238 |
+
Convolutional Downsampling Layer.
|
| 239 |
+
|
| 240 |
+
Args:
|
| 241 |
+
dim (`int`):
|
| 242 |
+
Number of input channels.
|
| 243 |
+
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
|
| 244 |
+
Normalization layer class.
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
|
| 248 |
+
super().__init__()
|
| 249 |
+
self.dim = dim
|
| 250 |
+
self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
|
| 251 |
+
self.norm = norm_layer(2 * dim)
|
| 252 |
+
|
| 253 |
+
def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
|
| 254 |
+
input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
|
| 255 |
+
input_feature = self.norm(input_feature)
|
| 256 |
+
return input_feature
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 260 |
+
"""
|
| 261 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 262 |
+
|
| 263 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 264 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 265 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 266 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 267 |
+
argument.
|
| 268 |
+
"""
|
| 269 |
+
if drop_prob == 0.0 or not training:
|
| 270 |
+
return input
|
| 271 |
+
keep_prob = 1 - drop_prob
|
| 272 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 273 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 274 |
+
random_tensor.floor_() # binarize
|
| 275 |
+
output = input.div(keep_prob) * random_tensor
|
| 276 |
+
return output
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class NatDropPath(nn.Module):
|
| 280 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 281 |
+
|
| 282 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 283 |
+
super().__init__()
|
| 284 |
+
self.drop_prob = drop_prob
|
| 285 |
+
|
| 286 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 287 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 288 |
+
|
| 289 |
+
def extra_repr(self) -> str:
|
| 290 |
+
return "p={}".format(self.drop_prob)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class NeighborhoodAttention(nn.Module):
|
| 294 |
+
def __init__(self, config, dim, num_heads, kernel_size):
|
| 295 |
+
super().__init__()
|
| 296 |
+
if dim % num_heads != 0:
|
| 297 |
+
raise ValueError(
|
| 298 |
+
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
self.num_attention_heads = num_heads
|
| 302 |
+
self.attention_head_size = int(dim / num_heads)
|
| 303 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 304 |
+
self.kernel_size = kernel_size
|
| 305 |
+
|
| 306 |
+
# rpb is learnable relative positional biases; same concept is used Swin.
|
| 307 |
+
self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1)))
|
| 308 |
+
|
| 309 |
+
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
|
| 310 |
+
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
|
| 311 |
+
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
|
| 312 |
+
|
| 313 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 314 |
+
|
| 315 |
+
def transpose_for_scores(self, x):
|
| 316 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 317 |
+
x = x.view(new_x_shape)
|
| 318 |
+
return x.permute(0, 3, 1, 2, 4)
|
| 319 |
+
|
| 320 |
+
def forward(
|
| 321 |
+
self,
|
| 322 |
+
hidden_states: torch.Tensor,
|
| 323 |
+
output_attentions: Optional[bool] = False,
|
| 324 |
+
) -> Tuple[torch.Tensor]:
|
| 325 |
+
query_layer = self.transpose_for_scores(self.query(hidden_states))
|
| 326 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 327 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 328 |
+
|
| 329 |
+
# Apply the scale factor before computing attention weights. It's usually more efficient because
|
| 330 |
+
# attention weights are typically a bigger tensor compared to query.
|
| 331 |
+
# It gives identical results because scalars are commutable in matrix multiplication.
|
| 332 |
+
query_layer = query_layer / math.sqrt(self.attention_head_size)
|
| 333 |
+
|
| 334 |
+
# Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases.
|
| 335 |
+
attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1)
|
| 336 |
+
|
| 337 |
+
# Normalize the attention scores to probabilities.
|
| 338 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 339 |
+
|
| 340 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 341 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 342 |
+
attention_probs = self.dropout(attention_probs)
|
| 343 |
+
|
| 344 |
+
context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1)
|
| 345 |
+
context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
|
| 346 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 347 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 348 |
+
|
| 349 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 350 |
+
|
| 351 |
+
return outputs
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class NeighborhoodAttentionOutput(nn.Module):
|
| 355 |
+
def __init__(self, config, dim):
|
| 356 |
+
super().__init__()
|
| 357 |
+
self.dense = nn.Linear(dim, dim)
|
| 358 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 359 |
+
|
| 360 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 361 |
+
hidden_states = self.dense(hidden_states)
|
| 362 |
+
hidden_states = self.dropout(hidden_states)
|
| 363 |
+
|
| 364 |
+
return hidden_states
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
class NeighborhoodAttentionModule(nn.Module):
|
| 368 |
+
def __init__(self, config, dim, num_heads, kernel_size):
|
| 369 |
+
super().__init__()
|
| 370 |
+
self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size)
|
| 371 |
+
self.output = NeighborhoodAttentionOutput(config, dim)
|
| 372 |
+
self.pruned_heads = set()
|
| 373 |
+
|
| 374 |
+
def prune_heads(self, heads):
|
| 375 |
+
if len(heads) == 0:
|
| 376 |
+
return
|
| 377 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 378 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Prune linear layers
|
| 382 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
| 383 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
| 384 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
| 385 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 386 |
+
|
| 387 |
+
# Update hyper params and store pruned heads
|
| 388 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
| 389 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
| 390 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 391 |
+
|
| 392 |
+
def forward(
|
| 393 |
+
self,
|
| 394 |
+
hidden_states: torch.Tensor,
|
| 395 |
+
output_attentions: Optional[bool] = False,
|
| 396 |
+
) -> Tuple[torch.Tensor]:
|
| 397 |
+
self_outputs = self.self(hidden_states, output_attentions)
|
| 398 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 399 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 400 |
+
return outputs
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
class NatIntermediate(nn.Module):
|
| 404 |
+
def __init__(self, config, dim):
|
| 405 |
+
super().__init__()
|
| 406 |
+
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
|
| 407 |
+
if isinstance(config.hidden_act, str):
|
| 408 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 409 |
+
else:
|
| 410 |
+
self.intermediate_act_fn = config.hidden_act
|
| 411 |
+
|
| 412 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 413 |
+
hidden_states = self.dense(hidden_states)
|
| 414 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 415 |
+
return hidden_states
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
class NatOutput(nn.Module):
|
| 419 |
+
def __init__(self, config, dim):
|
| 420 |
+
super().__init__()
|
| 421 |
+
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
|
| 422 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 423 |
+
|
| 424 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 425 |
+
hidden_states = self.dense(hidden_states)
|
| 426 |
+
hidden_states = self.dropout(hidden_states)
|
| 427 |
+
return hidden_states
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class NatLayer(nn.Module):
|
| 431 |
+
def __init__(self, config, dim, num_heads, drop_path_rate=0.0):
|
| 432 |
+
super().__init__()
|
| 433 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 434 |
+
self.kernel_size = config.kernel_size
|
| 435 |
+
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
|
| 436 |
+
self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size)
|
| 437 |
+
self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
|
| 438 |
+
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
|
| 439 |
+
self.intermediate = NatIntermediate(config, dim)
|
| 440 |
+
self.output = NatOutput(config, dim)
|
| 441 |
+
self.layer_scale_parameters = (
|
| 442 |
+
nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True)
|
| 443 |
+
if config.layer_scale_init_value > 0
|
| 444 |
+
else None
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
def maybe_pad(self, hidden_states, height, width):
|
| 448 |
+
window_size = self.kernel_size
|
| 449 |
+
pad_values = (0, 0, 0, 0, 0, 0)
|
| 450 |
+
if height < window_size or width < window_size:
|
| 451 |
+
pad_l = pad_t = 0
|
| 452 |
+
pad_r = max(0, window_size - width)
|
| 453 |
+
pad_b = max(0, window_size - height)
|
| 454 |
+
pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
|
| 455 |
+
hidden_states = nn.functional.pad(hidden_states, pad_values)
|
| 456 |
+
return hidden_states, pad_values
|
| 457 |
+
|
| 458 |
+
def forward(
|
| 459 |
+
self,
|
| 460 |
+
hidden_states: torch.Tensor,
|
| 461 |
+
output_attentions: Optional[bool] = False,
|
| 462 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 463 |
+
batch_size, height, width, channels = hidden_states.size()
|
| 464 |
+
shortcut = hidden_states
|
| 465 |
+
|
| 466 |
+
hidden_states = self.layernorm_before(hidden_states)
|
| 467 |
+
# pad hidden_states if they are smaller than kernel size
|
| 468 |
+
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
|
| 469 |
+
|
| 470 |
+
_, height_pad, width_pad, _ = hidden_states.shape
|
| 471 |
+
|
| 472 |
+
attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
|
| 473 |
+
|
| 474 |
+
attention_output = attention_outputs[0]
|
| 475 |
+
|
| 476 |
+
was_padded = pad_values[3] > 0 or pad_values[5] > 0
|
| 477 |
+
if was_padded:
|
| 478 |
+
attention_output = attention_output[:, :height, :width, :].contiguous()
|
| 479 |
+
|
| 480 |
+
if self.layer_scale_parameters is not None:
|
| 481 |
+
attention_output = self.layer_scale_parameters[0] * attention_output
|
| 482 |
+
|
| 483 |
+
hidden_states = shortcut + self.drop_path(attention_output)
|
| 484 |
+
|
| 485 |
+
layer_output = self.layernorm_after(hidden_states)
|
| 486 |
+
layer_output = self.output(self.intermediate(layer_output))
|
| 487 |
+
|
| 488 |
+
if self.layer_scale_parameters is not None:
|
| 489 |
+
layer_output = self.layer_scale_parameters[1] * layer_output
|
| 490 |
+
|
| 491 |
+
layer_output = hidden_states + self.drop_path(layer_output)
|
| 492 |
+
|
| 493 |
+
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
|
| 494 |
+
return layer_outputs
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class NatStage(nn.Module):
|
| 498 |
+
def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample):
|
| 499 |
+
super().__init__()
|
| 500 |
+
self.config = config
|
| 501 |
+
self.dim = dim
|
| 502 |
+
self.layers = nn.ModuleList(
|
| 503 |
+
[
|
| 504 |
+
NatLayer(
|
| 505 |
+
config=config,
|
| 506 |
+
dim=dim,
|
| 507 |
+
num_heads=num_heads,
|
| 508 |
+
drop_path_rate=drop_path_rate[i],
|
| 509 |
+
)
|
| 510 |
+
for i in range(depth)
|
| 511 |
+
]
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
# patch merging layer
|
| 515 |
+
if downsample is not None:
|
| 516 |
+
self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
|
| 517 |
+
else:
|
| 518 |
+
self.downsample = None
|
| 519 |
+
|
| 520 |
+
self.pointing = False
|
| 521 |
+
|
| 522 |
+
def forward(
|
| 523 |
+
self,
|
| 524 |
+
hidden_states: torch.Tensor,
|
| 525 |
+
output_attentions: Optional[bool] = False,
|
| 526 |
+
) -> Tuple[torch.Tensor]:
|
| 527 |
+
_, height, width, _ = hidden_states.size()
|
| 528 |
+
for i, layer_module in enumerate(self.layers):
|
| 529 |
+
layer_outputs = layer_module(hidden_states, output_attentions)
|
| 530 |
+
hidden_states = layer_outputs[0]
|
| 531 |
+
|
| 532 |
+
hidden_states_before_downsampling = hidden_states
|
| 533 |
+
if self.downsample is not None:
|
| 534 |
+
hidden_states = self.downsample(hidden_states_before_downsampling)
|
| 535 |
+
|
| 536 |
+
stage_outputs = (hidden_states, hidden_states_before_downsampling)
|
| 537 |
+
|
| 538 |
+
if output_attentions:
|
| 539 |
+
stage_outputs += layer_outputs[1:]
|
| 540 |
+
return stage_outputs
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
class NatEncoder(nn.Module):
|
| 544 |
+
def __init__(self, config):
|
| 545 |
+
super().__init__()
|
| 546 |
+
self.num_levels = len(config.depths)
|
| 547 |
+
self.config = config
|
| 548 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
|
| 549 |
+
self.levels = nn.ModuleList(
|
| 550 |
+
[
|
| 551 |
+
NatStage(
|
| 552 |
+
config=config,
|
| 553 |
+
dim=int(config.embed_dim * 2**i_layer),
|
| 554 |
+
depth=config.depths[i_layer],
|
| 555 |
+
num_heads=config.num_heads[i_layer],
|
| 556 |
+
drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
|
| 557 |
+
downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None,
|
| 558 |
+
)
|
| 559 |
+
for i_layer in range(self.num_levels)
|
| 560 |
+
]
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
def forward(
|
| 564 |
+
self,
|
| 565 |
+
hidden_states: torch.Tensor,
|
| 566 |
+
output_attentions: Optional[bool] = False,
|
| 567 |
+
output_hidden_states: Optional[bool] = False,
|
| 568 |
+
output_hidden_states_before_downsampling: Optional[bool] = False,
|
| 569 |
+
return_dict: Optional[bool] = True,
|
| 570 |
+
) -> Union[Tuple, NatEncoderOutput]:
|
| 571 |
+
all_hidden_states = () if output_hidden_states else None
|
| 572 |
+
all_reshaped_hidden_states = () if output_hidden_states else None
|
| 573 |
+
all_self_attentions = () if output_attentions else None
|
| 574 |
+
|
| 575 |
+
if output_hidden_states:
|
| 576 |
+
# rearrange b h w c -> b c h w
|
| 577 |
+
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
|
| 578 |
+
all_hidden_states += (hidden_states,)
|
| 579 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 580 |
+
|
| 581 |
+
for i, layer_module in enumerate(self.levels):
|
| 582 |
+
layer_outputs = layer_module(hidden_states, output_attentions)
|
| 583 |
+
|
| 584 |
+
hidden_states = layer_outputs[0]
|
| 585 |
+
hidden_states_before_downsampling = layer_outputs[1]
|
| 586 |
+
|
| 587 |
+
if output_hidden_states and output_hidden_states_before_downsampling:
|
| 588 |
+
# rearrange b h w c -> b c h w
|
| 589 |
+
reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
|
| 590 |
+
all_hidden_states += (hidden_states_before_downsampling,)
|
| 591 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 592 |
+
elif output_hidden_states and not output_hidden_states_before_downsampling:
|
| 593 |
+
# rearrange b h w c -> b c h w
|
| 594 |
+
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
|
| 595 |
+
all_hidden_states += (hidden_states,)
|
| 596 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 597 |
+
|
| 598 |
+
if output_attentions:
|
| 599 |
+
all_self_attentions += layer_outputs[2:]
|
| 600 |
+
|
| 601 |
+
if not return_dict:
|
| 602 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 603 |
+
|
| 604 |
+
return NatEncoderOutput(
|
| 605 |
+
last_hidden_state=hidden_states,
|
| 606 |
+
hidden_states=all_hidden_states,
|
| 607 |
+
attentions=all_self_attentions,
|
| 608 |
+
reshaped_hidden_states=all_reshaped_hidden_states,
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class NatPreTrainedModel(PreTrainedModel):
|
| 613 |
+
"""
|
| 614 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 615 |
+
models.
|
| 616 |
+
"""
|
| 617 |
+
|
| 618 |
+
config_class = NatConfig
|
| 619 |
+
base_model_prefix = "nat"
|
| 620 |
+
main_input_name = "pixel_values"
|
| 621 |
+
|
| 622 |
+
def _init_weights(self, module):
|
| 623 |
+
"""Initialize the weights"""
|
| 624 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 625 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 626 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 627 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 628 |
+
if module.bias is not None:
|
| 629 |
+
module.bias.data.zero_()
|
| 630 |
+
elif isinstance(module, nn.LayerNorm):
|
| 631 |
+
module.bias.data.zero_()
|
| 632 |
+
module.weight.data.fill_(1.0)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
NAT_START_DOCSTRING = r"""
|
| 636 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 637 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 638 |
+
behavior.
|
| 639 |
+
|
| 640 |
+
Parameters:
|
| 641 |
+
config ([`NatConfig`]): Model configuration class with all the parameters of the model.
|
| 642 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 643 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 644 |
+
"""
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
NAT_INPUTS_DOCSTRING = r"""
|
| 648 |
+
Args:
|
| 649 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 650 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
|
| 651 |
+
for details.
|
| 652 |
+
|
| 653 |
+
output_attentions (`bool`, *optional*):
|
| 654 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 655 |
+
tensors for more detail.
|
| 656 |
+
output_hidden_states (`bool`, *optional*):
|
| 657 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 658 |
+
more detail.
|
| 659 |
+
return_dict (`bool`, *optional*):
|
| 660 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 661 |
+
"""
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
@add_start_docstrings(
|
| 665 |
+
"The bare Nat Model transformer outputting raw hidden-states without any specific head on top.",
|
| 666 |
+
NAT_START_DOCSTRING,
|
| 667 |
+
)
|
| 668 |
+
class NatModel(NatPreTrainedModel):
|
| 669 |
+
def __init__(self, config, add_pooling_layer=True):
|
| 670 |
+
super().__init__(config)
|
| 671 |
+
|
| 672 |
+
requires_backends(self, ["natten"])
|
| 673 |
+
|
| 674 |
+
self.config = config
|
| 675 |
+
self.num_levels = len(config.depths)
|
| 676 |
+
self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
|
| 677 |
+
|
| 678 |
+
self.embeddings = NatEmbeddings(config)
|
| 679 |
+
self.encoder = NatEncoder(config)
|
| 680 |
+
|
| 681 |
+
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
|
| 682 |
+
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
|
| 683 |
+
|
| 684 |
+
# Initialize weights and apply final processing
|
| 685 |
+
self.post_init()
|
| 686 |
+
|
| 687 |
+
def get_input_embeddings(self):
|
| 688 |
+
return self.embeddings.patch_embeddings
|
| 689 |
+
|
| 690 |
+
def _prune_heads(self, heads_to_prune):
|
| 691 |
+
"""
|
| 692 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 693 |
+
class PreTrainedModel
|
| 694 |
+
"""
|
| 695 |
+
for layer, heads in heads_to_prune.items():
|
| 696 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 697 |
+
|
| 698 |
+
@add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
|
| 699 |
+
@add_code_sample_docstrings(
|
| 700 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 701 |
+
output_type=NatModelOutput,
|
| 702 |
+
config_class=_CONFIG_FOR_DOC,
|
| 703 |
+
modality="vision",
|
| 704 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 705 |
+
)
|
| 706 |
+
def forward(
|
| 707 |
+
self,
|
| 708 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 709 |
+
output_attentions: Optional[bool] = None,
|
| 710 |
+
output_hidden_states: Optional[bool] = None,
|
| 711 |
+
return_dict: Optional[bool] = None,
|
| 712 |
+
) -> Union[Tuple, NatModelOutput]:
|
| 713 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 714 |
+
output_hidden_states = (
|
| 715 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 716 |
+
)
|
| 717 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 718 |
+
|
| 719 |
+
if pixel_values is None:
|
| 720 |
+
raise ValueError("You have to specify pixel_values")
|
| 721 |
+
|
| 722 |
+
embedding_output = self.embeddings(pixel_values)
|
| 723 |
+
|
| 724 |
+
encoder_outputs = self.encoder(
|
| 725 |
+
embedding_output,
|
| 726 |
+
output_attentions=output_attentions,
|
| 727 |
+
output_hidden_states=output_hidden_states,
|
| 728 |
+
return_dict=return_dict,
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
sequence_output = encoder_outputs[0]
|
| 732 |
+
sequence_output = self.layernorm(sequence_output)
|
| 733 |
+
|
| 734 |
+
pooled_output = None
|
| 735 |
+
if self.pooler is not None:
|
| 736 |
+
pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
|
| 737 |
+
pooled_output = torch.flatten(pooled_output, 1)
|
| 738 |
+
|
| 739 |
+
if not return_dict:
|
| 740 |
+
output = (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 741 |
+
|
| 742 |
+
return output
|
| 743 |
+
|
| 744 |
+
return NatModelOutput(
|
| 745 |
+
last_hidden_state=sequence_output,
|
| 746 |
+
pooler_output=pooled_output,
|
| 747 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 748 |
+
attentions=encoder_outputs.attentions,
|
| 749 |
+
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
@add_start_docstrings(
|
| 754 |
+
"""
|
| 755 |
+
Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
|
| 756 |
+
the [CLS] token) e.g. for ImageNet.
|
| 757 |
+
""",
|
| 758 |
+
NAT_START_DOCSTRING,
|
| 759 |
+
)
|
| 760 |
+
class NatForImageClassification(NatPreTrainedModel):
|
| 761 |
+
def __init__(self, config):
|
| 762 |
+
super().__init__(config)
|
| 763 |
+
|
| 764 |
+
requires_backends(self, ["natten"])
|
| 765 |
+
|
| 766 |
+
self.num_labels = config.num_labels
|
| 767 |
+
self.nat = NatModel(config)
|
| 768 |
+
|
| 769 |
+
# Classifier head
|
| 770 |
+
self.classifier = (
|
| 771 |
+
nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
# Initialize weights and apply final processing
|
| 775 |
+
self.post_init()
|
| 776 |
+
|
| 777 |
+
@add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
|
| 778 |
+
@add_code_sample_docstrings(
|
| 779 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 780 |
+
output_type=NatImageClassifierOutput,
|
| 781 |
+
config_class=_CONFIG_FOR_DOC,
|
| 782 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 783 |
+
)
|
| 784 |
+
def forward(
|
| 785 |
+
self,
|
| 786 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 787 |
+
labels: Optional[torch.LongTensor] = None,
|
| 788 |
+
output_attentions: Optional[bool] = None,
|
| 789 |
+
output_hidden_states: Optional[bool] = None,
|
| 790 |
+
return_dict: Optional[bool] = None,
|
| 791 |
+
) -> Union[Tuple, NatImageClassifierOutput]:
|
| 792 |
+
r"""
|
| 793 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 794 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 795 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 796 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 797 |
+
"""
|
| 798 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 799 |
+
|
| 800 |
+
outputs = self.nat(
|
| 801 |
+
pixel_values,
|
| 802 |
+
output_attentions=output_attentions,
|
| 803 |
+
output_hidden_states=output_hidden_states,
|
| 804 |
+
return_dict=return_dict,
|
| 805 |
+
)
|
| 806 |
+
|
| 807 |
+
pooled_output = outputs[1]
|
| 808 |
+
|
| 809 |
+
logits = self.classifier(pooled_output)
|
| 810 |
+
|
| 811 |
+
loss = None
|
| 812 |
+
if labels is not None:
|
| 813 |
+
if self.config.problem_type is None:
|
| 814 |
+
if self.num_labels == 1:
|
| 815 |
+
self.config.problem_type = "regression"
|
| 816 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 817 |
+
self.config.problem_type = "single_label_classification"
|
| 818 |
+
else:
|
| 819 |
+
self.config.problem_type = "multi_label_classification"
|
| 820 |
+
|
| 821 |
+
if self.config.problem_type == "regression":
|
| 822 |
+
loss_fct = MSELoss()
|
| 823 |
+
if self.num_labels == 1:
|
| 824 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 825 |
+
else:
|
| 826 |
+
loss = loss_fct(logits, labels)
|
| 827 |
+
elif self.config.problem_type == "single_label_classification":
|
| 828 |
+
loss_fct = CrossEntropyLoss()
|
| 829 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 830 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 831 |
+
loss_fct = BCEWithLogitsLoss()
|
| 832 |
+
loss = loss_fct(logits, labels)
|
| 833 |
+
|
| 834 |
+
if not return_dict:
|
| 835 |
+
output = (logits,) + outputs[2:]
|
| 836 |
+
return ((loss,) + output) if loss is not None else output
|
| 837 |
+
|
| 838 |
+
return NatImageClassifierOutput(
|
| 839 |
+
loss=loss,
|
| 840 |
+
logits=logits,
|
| 841 |
+
hidden_states=outputs.hidden_states,
|
| 842 |
+
attentions=outputs.attentions,
|
| 843 |
+
reshaped_hidden_states=outputs.reshaped_hidden_states,
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
@add_start_docstrings(
|
| 848 |
+
"NAT backbone, to be used with frameworks like DETR and MaskFormer.",
|
| 849 |
+
NAT_START_DOCSTRING,
|
| 850 |
+
)
|
| 851 |
+
class NatBackbone(NatPreTrainedModel, BackboneMixin):
|
| 852 |
+
def __init__(self, config):
|
| 853 |
+
super().__init__(config)
|
| 854 |
+
super()._init_backbone(config)
|
| 855 |
+
|
| 856 |
+
requires_backends(self, ["natten"])
|
| 857 |
+
|
| 858 |
+
self.embeddings = NatEmbeddings(config)
|
| 859 |
+
self.encoder = NatEncoder(config)
|
| 860 |
+
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
|
| 861 |
+
|
| 862 |
+
# Add layer norms to hidden states of out_features
|
| 863 |
+
hidden_states_norms = {}
|
| 864 |
+
for stage, num_channels in zip(self.out_features, self.channels):
|
| 865 |
+
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
|
| 866 |
+
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
|
| 867 |
+
|
| 868 |
+
# Initialize weights and apply final processing
|
| 869 |
+
self.post_init()
|
| 870 |
+
|
| 871 |
+
def get_input_embeddings(self):
|
| 872 |
+
return self.embeddings.patch_embeddings
|
| 873 |
+
|
| 874 |
+
@add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
|
| 875 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 876 |
+
def forward(
|
| 877 |
+
self,
|
| 878 |
+
pixel_values: torch.Tensor,
|
| 879 |
+
output_hidden_states: Optional[bool] = None,
|
| 880 |
+
output_attentions: Optional[bool] = None,
|
| 881 |
+
return_dict: Optional[bool] = None,
|
| 882 |
+
) -> BackboneOutput:
|
| 883 |
+
"""
|
| 884 |
+
Returns:
|
| 885 |
+
|
| 886 |
+
Examples:
|
| 887 |
+
|
| 888 |
+
```python
|
| 889 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 890 |
+
>>> import torch
|
| 891 |
+
>>> from PIL import Image
|
| 892 |
+
>>> import requests
|
| 893 |
+
|
| 894 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 895 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 896 |
+
|
| 897 |
+
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
|
| 898 |
+
>>> model = AutoBackbone.from_pretrained(
|
| 899 |
+
... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
|
| 900 |
+
... )
|
| 901 |
+
|
| 902 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 903 |
+
|
| 904 |
+
>>> outputs = model(**inputs)
|
| 905 |
+
|
| 906 |
+
>>> feature_maps = outputs.feature_maps
|
| 907 |
+
>>> list(feature_maps[-1].shape)
|
| 908 |
+
[1, 512, 7, 7]
|
| 909 |
+
```"""
|
| 910 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 911 |
+
output_hidden_states = (
|
| 912 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 913 |
+
)
|
| 914 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 915 |
+
|
| 916 |
+
embedding_output = self.embeddings(pixel_values)
|
| 917 |
+
|
| 918 |
+
outputs = self.encoder(
|
| 919 |
+
embedding_output,
|
| 920 |
+
output_attentions=output_attentions,
|
| 921 |
+
output_hidden_states=True,
|
| 922 |
+
output_hidden_states_before_downsampling=True,
|
| 923 |
+
return_dict=True,
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
hidden_states = outputs.reshaped_hidden_states
|
| 927 |
+
|
| 928 |
+
feature_maps = ()
|
| 929 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
| 930 |
+
if stage in self.out_features:
|
| 931 |
+
# TODO can we simplify this?
|
| 932 |
+
batch_size, num_channels, height, width = hidden_state.shape
|
| 933 |
+
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
|
| 934 |
+
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
|
| 935 |
+
hidden_state = self.hidden_states_norms[stage](hidden_state)
|
| 936 |
+
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
|
| 937 |
+
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
|
| 938 |
+
feature_maps += (hidden_state,)
|
| 939 |
+
|
| 940 |
+
if not return_dict:
|
| 941 |
+
output = (feature_maps,)
|
| 942 |
+
if output_hidden_states:
|
| 943 |
+
output += (outputs.hidden_states,)
|
| 944 |
+
return output
|
| 945 |
+
|
| 946 |
+
return BackboneOutput(
|
| 947 |
+
feature_maps=feature_maps,
|
| 948 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 949 |
+
attentions=outputs.attentions,
|
| 950 |
+
)
|
janus/lib/python3.10/site-packages/transformers/models/deprecated/qdqbert/__init__.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {"configuration_qdqbert": ["QDQBertConfig"]}
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
if not is_torch_available():
|
| 23 |
+
raise OptionalDependencyNotAvailable()
|
| 24 |
+
except OptionalDependencyNotAvailable:
|
| 25 |
+
pass
|
| 26 |
+
else:
|
| 27 |
+
_import_structure["modeling_qdqbert"] = [
|
| 28 |
+
"QDQBertForMaskedLM",
|
| 29 |
+
"QDQBertForMultipleChoice",
|
| 30 |
+
"QDQBertForNextSentencePrediction",
|
| 31 |
+
"QDQBertForQuestionAnswering",
|
| 32 |
+
"QDQBertForSequenceClassification",
|
| 33 |
+
"QDQBertForTokenClassification",
|
| 34 |
+
"QDQBertLayer",
|
| 35 |
+
"QDQBertLMHeadModel",
|
| 36 |
+
"QDQBertModel",
|
| 37 |
+
"QDQBertPreTrainedModel",
|
| 38 |
+
"load_tf_weights_in_qdqbert",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
if TYPE_CHECKING:
|
| 43 |
+
from .configuration_qdqbert import QDQBertConfig
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
if not is_torch_available():
|
| 47 |
+
raise OptionalDependencyNotAvailable()
|
| 48 |
+
except OptionalDependencyNotAvailable:
|
| 49 |
+
pass
|
| 50 |
+
else:
|
| 51 |
+
from .modeling_qdqbert import (
|
| 52 |
+
QDQBertForMaskedLM,
|
| 53 |
+
QDQBertForMultipleChoice,
|
| 54 |
+
QDQBertForNextSentencePrediction,
|
| 55 |
+
QDQBertForQuestionAnswering,
|
| 56 |
+
QDQBertForSequenceClassification,
|
| 57 |
+
QDQBertForTokenClassification,
|
| 58 |
+
QDQBertLayer,
|
| 59 |
+
QDQBertLMHeadModel,
|
| 60 |
+
QDQBertModel,
|
| 61 |
+
QDQBertPreTrainedModel,
|
| 62 |
+
load_tf_weights_in_qdqbert,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
else:
|
| 67 |
+
import sys
|
| 68 |
+
|
| 69 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|