Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double.h +30 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h +23 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h +23 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2_native.h +22 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h +25 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/lt_native.h +30 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn.h +39 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/sin.h +44 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py +29 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py +116 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py +844 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py +931 -0
- phi4/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py +251 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py +28 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py +218 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py +1369 -0
- phi4/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py +521 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__init__.py +29 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py +219 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py +721 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py +1407 -0
- phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py +1107 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/__init__.py +28 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/configuration_phimoe.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/modeling_phimoe.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/configuration_phimoe.py +203 -0
- phi4/lib/python3.10/site-packages/transformers/models/phimoe/modeling_phimoe.py +1631 -0
- phi4/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/transformers/utils/__init__.py +320 -0
- phi4/lib/python3.10/site-packages/transformers/utils/backbone_utils.py +377 -0
- phi4/lib/python3.10/site-packages/transformers/utils/bitsandbytes.py +28 -0
- phi4/lib/python3.10/site-packages/transformers/utils/chat_template_utils.py +430 -0
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cast_Double_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
|
| 26 |
+
inline at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false) {
|
| 27 |
+
return at::_ops::_cast_Double::call(self, non_blocking);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 24 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 20 |
+
TORCH_API const at::Tensor & fft_ihfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> linalg_eig(const at::Tensor & self);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/lt_native.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/lt_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_lt_Scalar_out : public at::meta::structured_lt_Scalar {
|
| 20 |
+
void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Scalar & other);
|
| 23 |
+
TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 24 |
+
struct TORCH_API structured_lt_Tensor_out : public at::meta::structured_lt_Tensor {
|
| 25 |
+
void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Tensor & other);
|
| 28 |
+
TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 29 |
+
} // namespace native
|
| 30 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/miopen_rnn_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
|
| 27 |
+
return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
|
| 32 |
+
return at::_ops::miopen_rnn_out::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
|
| 33 |
+
}
|
| 34 |
+
// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
|
| 36 |
+
return at::_ops::miopen_rnn_out::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/sin.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/sin_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::sin(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor sin(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::sin::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::sin_(Tensor(a!) self) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & sin_(at::Tensor & self) {
|
| 32 |
+
return at::_ops::sin_::call(self);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 36 |
+
inline at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
|
| 37 |
+
return at::_ops::sin_out::call(self, out);
|
| 38 |
+
}
|
| 39 |
+
// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 40 |
+
inline at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
|
| 41 |
+
return at::_ops::sin_out::call(self, out);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor special_modified_bessel_i0(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor to_dense_backward(const at::Tensor & grad, const at::Tensor & input);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_ctrl import *
|
| 22 |
+
from .modeling_ctrl import *
|
| 23 |
+
from .modeling_tf_ctrl import *
|
| 24 |
+
from .tokenization_ctrl import *
|
| 25 |
+
else:
|
| 26 |
+
import sys
|
| 27 |
+
|
| 28 |
+
_file = globals()["__file__"]
|
| 29 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (587 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc
ADDED
|
Binary file (4.13 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc
ADDED
|
Binary file (26.6 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc
ADDED
|
Binary file (29 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc
ADDED
|
Binary file (7.53 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Salesforce CTRL configuration"""
|
| 16 |
+
|
| 17 |
+
from ...configuration_utils import PretrainedConfig
|
| 18 |
+
from ...utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class CTRLConfig(PretrainedConfig):
|
| 25 |
+
"""
|
| 26 |
+
This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to
|
| 27 |
+
instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a
|
| 28 |
+
configuration with the defaults will yield a similar configuration to that of the
|
| 29 |
+
[Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce.
|
| 30 |
+
|
| 31 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 32 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
vocab_size (`int`, *optional*, defaults to 246534):
|
| 36 |
+
Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the
|
| 37 |
+
`inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`].
|
| 38 |
+
n_positions (`int`, *optional*, defaults to 256):
|
| 39 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 40 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 41 |
+
n_embd (`int`, *optional*, defaults to 1280):
|
| 42 |
+
Dimensionality of the embeddings and hidden states.
|
| 43 |
+
dff (`int`, *optional*, defaults to 8192):
|
| 44 |
+
Dimensionality of the inner dimension of the feed forward networks (FFN).
|
| 45 |
+
n_layer (`int`, *optional*, defaults to 48):
|
| 46 |
+
Number of hidden layers in the Transformer encoder.
|
| 47 |
+
n_head (`int`, *optional*, defaults to 16):
|
| 48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 49 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
| 50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 51 |
+
embd_pdrop (`int`, *optional*, defaults to 0.1):
|
| 52 |
+
The dropout ratio for the embeddings.
|
| 53 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
|
| 54 |
+
The epsilon to use in the layer normalization layers
|
| 55 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 56 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 57 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 58 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
Examples:
|
| 62 |
+
|
| 63 |
+
```python
|
| 64 |
+
>>> from transformers import CTRLConfig, CTRLModel
|
| 65 |
+
|
| 66 |
+
>>> # Initializing a CTRL configuration
|
| 67 |
+
>>> configuration = CTRLConfig()
|
| 68 |
+
|
| 69 |
+
>>> # Initializing a model (with random weights) from the configuration
|
| 70 |
+
>>> model = CTRLModel(configuration)
|
| 71 |
+
|
| 72 |
+
>>> # Accessing the model configuration
|
| 73 |
+
>>> configuration = model.config
|
| 74 |
+
```"""
|
| 75 |
+
|
| 76 |
+
model_type = "ctrl"
|
| 77 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 78 |
+
attribute_map = {
|
| 79 |
+
"max_position_embeddings": "n_positions",
|
| 80 |
+
"hidden_size": "n_embd",
|
| 81 |
+
"num_attention_heads": "n_head",
|
| 82 |
+
"num_hidden_layers": "n_layer",
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
def __init__(
|
| 86 |
+
self,
|
| 87 |
+
vocab_size=246534,
|
| 88 |
+
n_positions=256,
|
| 89 |
+
n_embd=1280,
|
| 90 |
+
dff=8192,
|
| 91 |
+
n_layer=48,
|
| 92 |
+
n_head=16,
|
| 93 |
+
resid_pdrop=0.1,
|
| 94 |
+
embd_pdrop=0.1,
|
| 95 |
+
layer_norm_epsilon=1e-6,
|
| 96 |
+
initializer_range=0.02,
|
| 97 |
+
use_cache=True,
|
| 98 |
+
**kwargs,
|
| 99 |
+
):
|
| 100 |
+
self.vocab_size = vocab_size
|
| 101 |
+
self.n_positions = n_positions
|
| 102 |
+
self.n_embd = n_embd
|
| 103 |
+
self.n_layer = n_layer
|
| 104 |
+
self.n_head = n_head
|
| 105 |
+
self.dff = dff
|
| 106 |
+
self.resid_pdrop = resid_pdrop
|
| 107 |
+
self.embd_pdrop = embd_pdrop
|
| 108 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 109 |
+
self.initializer_range = initializer_range
|
| 110 |
+
|
| 111 |
+
self.use_cache = use_cache
|
| 112 |
+
|
| 113 |
+
super().__init__(**kwargs)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
__all__ = ["CTRLConfig"]
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py
ADDED
|
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""PyTorch CTRL model."""
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
from torch import nn
|
| 23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 24 |
+
|
| 25 |
+
from ...generation import GenerationMixin
|
| 26 |
+
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput
|
| 27 |
+
from ...modeling_utils import PreTrainedModel
|
| 28 |
+
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer
|
| 29 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
| 30 |
+
from .configuration_ctrl import CTRLConfig
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
logger = logging.get_logger(__name__)
|
| 34 |
+
|
| 35 |
+
_CONFIG_FOR_DOC = "CTRLConfig"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def angle_defn(pos, i, d_model_size):
|
| 39 |
+
angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)
|
| 40 |
+
return pos * angle_rates
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def positional_encoding(position, d_model_size, dtype):
|
| 44 |
+
# create the sinusoidal pattern for the positional encoding
|
| 45 |
+
angle_rads = angle_defn(
|
| 46 |
+
torch.arange(position, dtype=torch.int64).to(dtype).unsqueeze(1),
|
| 47 |
+
torch.arange(d_model_size, dtype=torch.int64).to(dtype).unsqueeze(0),
|
| 48 |
+
d_model_size,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
sines = torch.sin(angle_rads[:, 0::2])
|
| 52 |
+
cosines = torch.cos(angle_rads[:, 1::2])
|
| 53 |
+
|
| 54 |
+
pos_encoding = torch.cat([sines, cosines], dim=-1)
|
| 55 |
+
return pos_encoding
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
|
| 59 |
+
# calculate attention
|
| 60 |
+
matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
|
| 61 |
+
|
| 62 |
+
dk = k.shape[-1]
|
| 63 |
+
scaled_attention_logits = matmul_qk / np.sqrt(dk)
|
| 64 |
+
|
| 65 |
+
if mask is not None:
|
| 66 |
+
nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)
|
| 67 |
+
scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4
|
| 68 |
+
|
| 69 |
+
if attention_mask is not None:
|
| 70 |
+
# Apply the attention mask
|
| 71 |
+
scaled_attention_logits = scaled_attention_logits + attention_mask
|
| 72 |
+
|
| 73 |
+
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
|
| 74 |
+
|
| 75 |
+
# Mask heads if we want to
|
| 76 |
+
if head_mask is not None:
|
| 77 |
+
attention_weights = attention_weights * head_mask
|
| 78 |
+
|
| 79 |
+
output = torch.matmul(attention_weights, v)
|
| 80 |
+
|
| 81 |
+
return output, attention_weights
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class MultiHeadAttention(nn.Module):
|
| 85 |
+
def __init__(self, d_model_size, num_heads):
|
| 86 |
+
super().__init__()
|
| 87 |
+
self.num_heads = num_heads
|
| 88 |
+
self.d_model_size = d_model_size
|
| 89 |
+
|
| 90 |
+
self.depth = int(d_model_size / self.num_heads)
|
| 91 |
+
|
| 92 |
+
self.Wq = nn.Linear(d_model_size, d_model_size)
|
| 93 |
+
self.Wk = nn.Linear(d_model_size, d_model_size)
|
| 94 |
+
self.Wv = nn.Linear(d_model_size, d_model_size)
|
| 95 |
+
|
| 96 |
+
self.dense = nn.Linear(d_model_size, d_model_size)
|
| 97 |
+
self.pruned_heads = set()
|
| 98 |
+
|
| 99 |
+
def prune_heads(self, heads):
|
| 100 |
+
attention_head_size = self.d_model_size // self.num_heads
|
| 101 |
+
if len(heads) == 0:
|
| 102 |
+
return
|
| 103 |
+
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
|
| 104 |
+
|
| 105 |
+
# Prune linear layers
|
| 106 |
+
self.Wq = prune_linear_layer(self.Wq, index)
|
| 107 |
+
self.Wk = prune_linear_layer(self.Wk, index)
|
| 108 |
+
self.Wv = prune_linear_layer(self.Wv, index)
|
| 109 |
+
self.dense = prune_linear_layer(self.dense, index, dim=1)
|
| 110 |
+
|
| 111 |
+
# Update hyper params
|
| 112 |
+
self.num_heads = self.num_heads - len(heads)
|
| 113 |
+
self.d_model_size = attention_head_size * self.num_heads
|
| 114 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 115 |
+
|
| 116 |
+
def split_into_heads(self, x, batch_size):
|
| 117 |
+
x = x.reshape(batch_size, -1, self.num_heads, self.depth)
|
| 118 |
+
return x.permute([0, 2, 1, 3])
|
| 119 |
+
|
| 120 |
+
def forward(
|
| 121 |
+
self,
|
| 122 |
+
v,
|
| 123 |
+
k,
|
| 124 |
+
q,
|
| 125 |
+
mask,
|
| 126 |
+
layer_past=None,
|
| 127 |
+
attention_mask=None,
|
| 128 |
+
head_mask=None,
|
| 129 |
+
use_cache=False,
|
| 130 |
+
output_attentions=False,
|
| 131 |
+
):
|
| 132 |
+
batch_size = q.shape[0]
|
| 133 |
+
|
| 134 |
+
q = self.Wq(q)
|
| 135 |
+
k = self.Wk(k)
|
| 136 |
+
v = self.Wv(v)
|
| 137 |
+
|
| 138 |
+
q = self.split_into_heads(q, batch_size)
|
| 139 |
+
k = self.split_into_heads(k, batch_size)
|
| 140 |
+
v = self.split_into_heads(v, batch_size)
|
| 141 |
+
if layer_past is not None:
|
| 142 |
+
past_key, past_value = layer_past[0], layer_past[1]
|
| 143 |
+
k = torch.cat((past_key, k), dim=-2)
|
| 144 |
+
v = torch.cat((past_value, v), dim=-2)
|
| 145 |
+
|
| 146 |
+
if use_cache is True:
|
| 147 |
+
present = torch.stack((k, v))
|
| 148 |
+
else:
|
| 149 |
+
present = (None,)
|
| 150 |
+
|
| 151 |
+
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
|
| 152 |
+
scaled_attention = output[0].permute([0, 2, 1, 3])
|
| 153 |
+
attn = output[1]
|
| 154 |
+
original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
|
| 155 |
+
output = self.dense(original_size_attention)
|
| 156 |
+
|
| 157 |
+
outputs = (output, present)
|
| 158 |
+
if output_attentions:
|
| 159 |
+
outputs = outputs + (attn,)
|
| 160 |
+
return outputs
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def point_wise_feed_forward_network(d_model_size, dff):
|
| 164 |
+
return nn.Sequential(nn.Linear(d_model_size, dff), nn.ReLU(), nn.Linear(dff, d_model_size))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class EncoderLayer(nn.Module):
|
| 168 |
+
def __init__(self, d_model_size, num_heads, dff, rate=0.1):
|
| 169 |
+
super().__init__()
|
| 170 |
+
|
| 171 |
+
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads)
|
| 172 |
+
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
|
| 173 |
+
|
| 174 |
+
self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-6)
|
| 175 |
+
self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-6)
|
| 176 |
+
|
| 177 |
+
self.dropout1 = nn.Dropout(rate)
|
| 178 |
+
self.dropout2 = nn.Dropout(rate)
|
| 179 |
+
|
| 180 |
+
def forward(
|
| 181 |
+
self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False
|
| 182 |
+
):
|
| 183 |
+
normed = self.layernorm1(x)
|
| 184 |
+
attn_outputs = self.multi_head_attention(
|
| 185 |
+
normed,
|
| 186 |
+
normed,
|
| 187 |
+
normed,
|
| 188 |
+
mask,
|
| 189 |
+
layer_past=layer_past,
|
| 190 |
+
attention_mask=attention_mask,
|
| 191 |
+
head_mask=head_mask,
|
| 192 |
+
use_cache=use_cache,
|
| 193 |
+
output_attentions=output_attentions,
|
| 194 |
+
)
|
| 195 |
+
attn_output = attn_outputs[0]
|
| 196 |
+
attn_output = self.dropout1(attn_output)
|
| 197 |
+
out1 = x + attn_output
|
| 198 |
+
|
| 199 |
+
out2 = self.layernorm2(out1)
|
| 200 |
+
ffn_output = self.ffn(out2)
|
| 201 |
+
ffn_output = self.dropout2(ffn_output)
|
| 202 |
+
out2 = out1 + ffn_output
|
| 203 |
+
|
| 204 |
+
outputs = (out2,) + attn_outputs[1:]
|
| 205 |
+
return outputs
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class CTRLPreTrainedModel(PreTrainedModel):
|
| 209 |
+
"""
|
| 210 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 211 |
+
models.
|
| 212 |
+
"""
|
| 213 |
+
|
| 214 |
+
config_class = CTRLConfig
|
| 215 |
+
base_model_prefix = "transformer"
|
| 216 |
+
|
| 217 |
+
def _init_weights(self, module):
|
| 218 |
+
"""Initialize the weights."""
|
| 219 |
+
if isinstance(module, (nn.Linear, Conv1D)):
|
| 220 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 221 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 222 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 223 |
+
if module.bias is not None:
|
| 224 |
+
module.bias.data.zero_()
|
| 225 |
+
elif isinstance(module, nn.Embedding):
|
| 226 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 227 |
+
if module.padding_idx is not None:
|
| 228 |
+
module.weight.data[module.padding_idx].zero_()
|
| 229 |
+
elif isinstance(module, nn.LayerNorm):
|
| 230 |
+
module.bias.data.zero_()
|
| 231 |
+
module.weight.data.fill_(1.0)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
CTRL_START_DOCSTRING = r"""
|
| 235 |
+
|
| 236 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 237 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 238 |
+
etc.)
|
| 239 |
+
|
| 240 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 241 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 242 |
+
and behavior.
|
| 243 |
+
|
| 244 |
+
Parameters:
|
| 245 |
+
config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
|
| 246 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 247 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
CTRL_INPUTS_DOCSTRING = r"""
|
| 251 |
+
Args:
|
| 252 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 253 |
+
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`
|
| 254 |
+
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
|
| 255 |
+
|
| 256 |
+
If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as
|
| 257 |
+
`input_ids`.
|
| 258 |
+
|
| 259 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 260 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 261 |
+
|
| 262 |
+
[What are input IDs?](../glossary#input-ids)
|
| 263 |
+
past_key_values (`Tuple[Tuple[torch.FloatTensor]]` of length `config.n_layers`):
|
| 264 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
| 265 |
+
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
|
| 266 |
+
their past given to this model should not be passed as input ids as they have already been computed.
|
| 267 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 268 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 269 |
+
|
| 270 |
+
- 1 for tokens that are **not masked**,
|
| 271 |
+
- 0 for tokens that are **masked**.
|
| 272 |
+
|
| 273 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 274 |
+
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 275 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 276 |
+
1]`:
|
| 277 |
+
|
| 278 |
+
- 0 corresponds to a *sentence A* token,
|
| 279 |
+
- 1 corresponds to a *sentence B* token.
|
| 280 |
+
|
| 281 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 282 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 283 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 284 |
+
config.max_position_embeddings - 1]`.
|
| 285 |
+
|
| 286 |
+
[What are position IDs?](../glossary#position-ids)
|
| 287 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 288 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 289 |
+
|
| 290 |
+
- 1 indicates the head is **not masked**,
|
| 291 |
+
- 0 indicates the head is **masked**.
|
| 292 |
+
|
| 293 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 294 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 295 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 296 |
+
model's internal embedding lookup matrix.
|
| 297 |
+
use_cache (`bool`, *optional*):
|
| 298 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 299 |
+
`past_key_values`).
|
| 300 |
+
output_attentions (`bool`, *optional*):
|
| 301 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 302 |
+
tensors for more detail.
|
| 303 |
+
output_hidden_states (`bool`, *optional*):
|
| 304 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 305 |
+
more detail.
|
| 306 |
+
return_dict (`bool`, *optional*):
|
| 307 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
@add_start_docstrings(
|
| 312 |
+
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
|
| 313 |
+
CTRL_START_DOCSTRING,
|
| 314 |
+
)
|
| 315 |
+
class CTRLModel(CTRLPreTrainedModel):
|
| 316 |
+
def __init__(self, config):
|
| 317 |
+
super().__init__(config)
|
| 318 |
+
|
| 319 |
+
self.d_model_size = config.n_embd
|
| 320 |
+
self.num_layers = config.n_layer
|
| 321 |
+
|
| 322 |
+
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
|
| 323 |
+
|
| 324 |
+
self.w = nn.Embedding(config.vocab_size, config.n_embd)
|
| 325 |
+
|
| 326 |
+
self.dropout = nn.Dropout(config.embd_pdrop)
|
| 327 |
+
self.h = nn.ModuleList(
|
| 328 |
+
[EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop) for _ in range(config.n_layer)]
|
| 329 |
+
)
|
| 330 |
+
self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| 331 |
+
|
| 332 |
+
# Initialize weights and apply final processing
|
| 333 |
+
self.post_init()
|
| 334 |
+
|
| 335 |
+
def get_input_embeddings(self):
|
| 336 |
+
return self.w
|
| 337 |
+
|
| 338 |
+
def set_input_embeddings(self, new_embeddings):
|
| 339 |
+
self.w = new_embeddings
|
| 340 |
+
|
| 341 |
+
def _prune_heads(self, heads_to_prune):
|
| 342 |
+
"""
|
| 343 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
| 344 |
+
"""
|
| 345 |
+
for layer, heads in heads_to_prune.items():
|
| 346 |
+
self.h[layer].multi_head_attention.prune_heads(heads)
|
| 347 |
+
|
| 348 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 349 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 350 |
+
def forward(
|
| 351 |
+
self,
|
| 352 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 353 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 354 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 355 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 356 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 357 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 358 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 359 |
+
use_cache: Optional[bool] = None,
|
| 360 |
+
output_attentions: Optional[bool] = None,
|
| 361 |
+
output_hidden_states: Optional[bool] = None,
|
| 362 |
+
return_dict: Optional[bool] = None,
|
| 363 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
|
| 364 |
+
r"""
|
| 365 |
+
Returns:
|
| 366 |
+
|
| 367 |
+
Example:
|
| 368 |
+
|
| 369 |
+
```python
|
| 370 |
+
>>> from transformers import AutoTokenizer, CTRLModel
|
| 371 |
+
>>> import torch
|
| 372 |
+
|
| 373 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
| 374 |
+
>>> model = CTRLModel.from_pretrained("Salesforce/ctrl")
|
| 375 |
+
|
| 376 |
+
>>> # CTRL was trained with control codes as the first token
|
| 377 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
| 378 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
| 379 |
+
|
| 380 |
+
>>> outputs = model(**inputs)
|
| 381 |
+
|
| 382 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
| 383 |
+
>>> list(last_hidden_states.shape)
|
| 384 |
+
[1, 5, 1280]
|
| 385 |
+
```"""
|
| 386 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 387 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 388 |
+
output_hidden_states = (
|
| 389 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 390 |
+
)
|
| 391 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 392 |
+
|
| 393 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 394 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 395 |
+
elif input_ids is not None:
|
| 396 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
| 397 |
+
input_shape = input_ids.size()
|
| 398 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
| 399 |
+
batch_size = input_ids.shape[0]
|
| 400 |
+
elif inputs_embeds is not None:
|
| 401 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 402 |
+
batch_size = inputs_embeds.shape[0]
|
| 403 |
+
else:
|
| 404 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 405 |
+
|
| 406 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 407 |
+
|
| 408 |
+
if past_key_values is None:
|
| 409 |
+
past_length = 0
|
| 410 |
+
past_key_values = tuple([None] * len(self.h))
|
| 411 |
+
else:
|
| 412 |
+
past_length = past_key_values[0][0].size(-2)
|
| 413 |
+
if position_ids is None:
|
| 414 |
+
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
|
| 415 |
+
position_ids = position_ids.unsqueeze(0)
|
| 416 |
+
|
| 417 |
+
# Attention mask.
|
| 418 |
+
if attention_mask is not None:
|
| 419 |
+
if batch_size <= 0:
|
| 420 |
+
raise ValueError("batch_size has to be defined and > 0")
|
| 421 |
+
attention_mask = attention_mask.view(batch_size, -1)
|
| 422 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 423 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 424 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 425 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 426 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 427 |
+
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| 428 |
+
|
| 429 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 430 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 431 |
+
# positions we want to attend and the dtype's smallest value for masked positions.
|
| 432 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 433 |
+
# effectively the same as removing these entirely.
|
| 434 |
+
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
| 435 |
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
| 436 |
+
|
| 437 |
+
# Prepare head mask if needed
|
| 438 |
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
| 439 |
+
|
| 440 |
+
if token_type_ids is not None:
|
| 441 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
| 442 |
+
token_type_embeds = self.w(token_type_ids)
|
| 443 |
+
token_type_embeds *= np.sqrt(self.d_model_size)
|
| 444 |
+
else:
|
| 445 |
+
token_type_embeds = 0
|
| 446 |
+
|
| 447 |
+
if inputs_embeds is None:
|
| 448 |
+
inputs_embeds = self.w(input_ids)
|
| 449 |
+
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
|
| 450 |
+
seq_len = input_shape[-1]
|
| 451 |
+
mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device)
|
| 452 |
+
|
| 453 |
+
inputs_embeds *= np.sqrt(self.d_model_size)
|
| 454 |
+
|
| 455 |
+
# `self.pos_encoding` won't be sent to the correct device along the model, so we do it manually.
|
| 456 |
+
self.pos_encoding = self.pos_encoding.to(device)
|
| 457 |
+
pos_embeds = self.pos_encoding[position_ids, :]
|
| 458 |
+
|
| 459 |
+
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
|
| 460 |
+
|
| 461 |
+
hidden_states = self.dropout(hidden_states)
|
| 462 |
+
|
| 463 |
+
presents = () if use_cache else None
|
| 464 |
+
all_hidden_states = () if output_hidden_states else None
|
| 465 |
+
all_attentions = () if output_attentions else None
|
| 466 |
+
for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
|
| 467 |
+
if output_hidden_states:
|
| 468 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 469 |
+
outputs = h(
|
| 470 |
+
hidden_states,
|
| 471 |
+
mask,
|
| 472 |
+
layer_past=layer_past,
|
| 473 |
+
attention_mask=attention_mask,
|
| 474 |
+
head_mask=head_mask[i],
|
| 475 |
+
use_cache=use_cache,
|
| 476 |
+
output_attentions=output_attentions,
|
| 477 |
+
)
|
| 478 |
+
hidden_states, present = outputs[:2]
|
| 479 |
+
if use_cache is True:
|
| 480 |
+
presents = presents + (present,)
|
| 481 |
+
|
| 482 |
+
if output_attentions:
|
| 483 |
+
all_attentions += (outputs[2],)
|
| 484 |
+
|
| 485 |
+
hidden_states = self.layernorm(hidden_states)
|
| 486 |
+
if output_hidden_states:
|
| 487 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 488 |
+
|
| 489 |
+
if not return_dict:
|
| 490 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
| 491 |
+
|
| 492 |
+
return BaseModelOutputWithPast(
|
| 493 |
+
last_hidden_state=hidden_states,
|
| 494 |
+
past_key_values=presents,
|
| 495 |
+
hidden_states=all_hidden_states,
|
| 496 |
+
attentions=all_attentions,
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
@add_start_docstrings(
|
| 501 |
+
"""
|
| 502 |
+
The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
| 503 |
+
embeddings).
|
| 504 |
+
""",
|
| 505 |
+
CTRL_START_DOCSTRING,
|
| 506 |
+
)
|
| 507 |
+
class CTRLLMHeadModel(CTRLPreTrainedModel, GenerationMixin):
|
| 508 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 509 |
+
|
| 510 |
+
def __init__(self, config):
|
| 511 |
+
super().__init__(config)
|
| 512 |
+
self.transformer = CTRLModel(config)
|
| 513 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
|
| 514 |
+
|
| 515 |
+
# Initialize weights and apply final processing
|
| 516 |
+
self.post_init()
|
| 517 |
+
|
| 518 |
+
def get_output_embeddings(self):
|
| 519 |
+
return self.lm_head
|
| 520 |
+
|
| 521 |
+
def set_output_embeddings(self, new_embeddings):
|
| 522 |
+
self.lm_head = new_embeddings
|
| 523 |
+
|
| 524 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 525 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 526 |
+
def forward(
|
| 527 |
+
self,
|
| 528 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 529 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 530 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 531 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 532 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 533 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 534 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 535 |
+
labels: Optional[torch.LongTensor] = None,
|
| 536 |
+
use_cache: Optional[bool] = None,
|
| 537 |
+
output_attentions: Optional[bool] = None,
|
| 538 |
+
output_hidden_states: Optional[bool] = None,
|
| 539 |
+
return_dict: Optional[bool] = None,
|
| 540 |
+
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
|
| 541 |
+
r"""
|
| 542 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 543 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 544 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 545 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 546 |
+
|
| 547 |
+
Returns:
|
| 548 |
+
|
| 549 |
+
Example:
|
| 550 |
+
|
| 551 |
+
```python
|
| 552 |
+
>>> import torch
|
| 553 |
+
>>> from transformers import AutoTokenizer, CTRLLMHeadModel
|
| 554 |
+
|
| 555 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
| 556 |
+
>>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl")
|
| 557 |
+
|
| 558 |
+
>>> # CTRL was trained with control codes as the first token
|
| 559 |
+
>>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt")
|
| 560 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
| 561 |
+
|
| 562 |
+
>>> sequence_ids = model.generate(inputs["input_ids"])
|
| 563 |
+
>>> sequences = tokenizer.batch_decode(sequence_ids)
|
| 564 |
+
>>> sequences
|
| 565 |
+
['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,']
|
| 566 |
+
|
| 567 |
+
>>> outputs = model(**inputs, labels=inputs["input_ids"])
|
| 568 |
+
>>> round(outputs.loss.item(), 2)
|
| 569 |
+
9.21
|
| 570 |
+
|
| 571 |
+
>>> list(outputs.logits.shape)
|
| 572 |
+
[1, 5, 246534]
|
| 573 |
+
```"""
|
| 574 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 575 |
+
|
| 576 |
+
transformer_outputs = self.transformer(
|
| 577 |
+
input_ids,
|
| 578 |
+
past_key_values=past_key_values,
|
| 579 |
+
attention_mask=attention_mask,
|
| 580 |
+
token_type_ids=token_type_ids,
|
| 581 |
+
position_ids=position_ids,
|
| 582 |
+
head_mask=head_mask,
|
| 583 |
+
inputs_embeds=inputs_embeds,
|
| 584 |
+
use_cache=use_cache,
|
| 585 |
+
output_attentions=output_attentions,
|
| 586 |
+
output_hidden_states=output_hidden_states,
|
| 587 |
+
return_dict=return_dict,
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
hidden_states = transformer_outputs[0]
|
| 591 |
+
|
| 592 |
+
lm_logits = self.lm_head(hidden_states)
|
| 593 |
+
|
| 594 |
+
loss = None
|
| 595 |
+
if labels is not None:
|
| 596 |
+
# Shift so that tokens < n predict n
|
| 597 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
| 598 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 599 |
+
# Flatten the tokens
|
| 600 |
+
loss_fct = CrossEntropyLoss()
|
| 601 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 602 |
+
|
| 603 |
+
if not return_dict:
|
| 604 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
| 605 |
+
return ((loss,) + output) if loss is not None else output
|
| 606 |
+
|
| 607 |
+
return CausalLMOutputWithPast(
|
| 608 |
+
loss=loss,
|
| 609 |
+
logits=lm_logits,
|
| 610 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 611 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 612 |
+
attentions=transformer_outputs.attentions,
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs):
|
| 616 |
+
# Overwritten -- inputs_embeds not working properly
|
| 617 |
+
|
| 618 |
+
# only last tokens for inputs_ids if past is defined in kwargs
|
| 619 |
+
if past_key_values is not None:
|
| 620 |
+
past_length = past_key_values[0][0].shape[2]
|
| 621 |
+
|
| 622 |
+
# Some generation methods already pass only the last input ID
|
| 623 |
+
if input_ids.shape[1] > past_length:
|
| 624 |
+
remove_prefix_length = past_length
|
| 625 |
+
else:
|
| 626 |
+
# Default to old behavior: keep only final ID
|
| 627 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
| 628 |
+
|
| 629 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
| 630 |
+
|
| 631 |
+
return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": use_cache}
|
| 632 |
+
|
| 633 |
+
@staticmethod
|
| 634 |
+
def _reorder_cache(
|
| 635 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
| 636 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
| 637 |
+
"""
|
| 638 |
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
|
| 639 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
| 640 |
+
beam_idx at every generation step.
|
| 641 |
+
"""
|
| 642 |
+
return tuple(
|
| 643 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
| 644 |
+
for layer_past in past_key_values
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
@add_start_docstrings(
|
| 649 |
+
"""
|
| 650 |
+
The CTRL Model transformer with a sequence classification head on top (linear layer).
|
| 651 |
+
[`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 652 |
+
(e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last
|
| 653 |
+
token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in
|
| 654 |
+
each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
|
| 655 |
+
guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last
|
| 656 |
+
value in each row of the batch).
|
| 657 |
+
""",
|
| 658 |
+
CTRL_START_DOCSTRING,
|
| 659 |
+
)
|
| 660 |
+
class CTRLForSequenceClassification(CTRLPreTrainedModel):
|
| 661 |
+
def __init__(self, config):
|
| 662 |
+
super().__init__(config)
|
| 663 |
+
self.num_labels = config.num_labels
|
| 664 |
+
self.transformer = CTRLModel(config)
|
| 665 |
+
self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
| 666 |
+
|
| 667 |
+
# Initialize weights and apply final processing
|
| 668 |
+
self.post_init()
|
| 669 |
+
|
| 670 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 671 |
+
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
| 672 |
+
def forward(
|
| 673 |
+
self,
|
| 674 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 675 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 676 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 677 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 678 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 679 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 680 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 681 |
+
labels: Optional[torch.LongTensor] = None,
|
| 682 |
+
use_cache: Optional[bool] = None,
|
| 683 |
+
output_attentions: Optional[bool] = None,
|
| 684 |
+
output_hidden_states: Optional[bool] = None,
|
| 685 |
+
return_dict: Optional[bool] = None,
|
| 686 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
| 687 |
+
r"""
|
| 688 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 689 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 690 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 691 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 692 |
+
|
| 693 |
+
Returns:
|
| 694 |
+
|
| 695 |
+
Example of single-label classification:
|
| 696 |
+
|
| 697 |
+
```python
|
| 698 |
+
>>> import torch
|
| 699 |
+
>>> from transformers import AutoTokenizer, CTRLForSequenceClassification
|
| 700 |
+
|
| 701 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
| 702 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl")
|
| 703 |
+
|
| 704 |
+
>>> # CTRL was trained with control codes as the first token
|
| 705 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
| 706 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
| 707 |
+
|
| 708 |
+
>>> with torch.no_grad():
|
| 709 |
+
... logits = model(**inputs).logits
|
| 710 |
+
|
| 711 |
+
>>> predicted_class_id = logits.argmax().item()
|
| 712 |
+
>>> model.config.id2label[predicted_class_id]
|
| 713 |
+
'LABEL_0'
|
| 714 |
+
```
|
| 715 |
+
|
| 716 |
+
```python
|
| 717 |
+
>>> import torch
|
| 718 |
+
|
| 719 |
+
>>> torch.manual_seed(42) # doctest: +IGNORE_RESULT
|
| 720 |
+
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
|
| 721 |
+
>>> num_labels = len(model.config.id2label)
|
| 722 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
|
| 723 |
+
|
| 724 |
+
>>> labels = torch.tensor(1)
|
| 725 |
+
>>> loss = model(**inputs, labels=labels).loss
|
| 726 |
+
>>> round(loss.item(), 2)
|
| 727 |
+
0.93
|
| 728 |
+
```
|
| 729 |
+
|
| 730 |
+
Example of multi-label classification:
|
| 731 |
+
|
| 732 |
+
```python
|
| 733 |
+
>>> import torch
|
| 734 |
+
>>> from transformers import AutoTokenizer, CTRLForSequenceClassification
|
| 735 |
+
|
| 736 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
| 737 |
+
>>> model = CTRLForSequenceClassification.from_pretrained(
|
| 738 |
+
... "Salesforce/ctrl", problem_type="multi_label_classification"
|
| 739 |
+
... )
|
| 740 |
+
|
| 741 |
+
>>> # CTRL was trained with control codes as the first token
|
| 742 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
| 743 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
| 744 |
+
|
| 745 |
+
>>> with torch.no_grad():
|
| 746 |
+
... logits = model(**inputs).logits
|
| 747 |
+
|
| 748 |
+
>>> predicted_class_id = logits.argmax().item()
|
| 749 |
+
>>> model.config.id2label[predicted_class_id]
|
| 750 |
+
'LABEL_0'
|
| 751 |
+
```
|
| 752 |
+
|
| 753 |
+
```python
|
| 754 |
+
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
|
| 755 |
+
>>> num_labels = len(model.config.id2label)
|
| 756 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
|
| 757 |
+
|
| 758 |
+
>>> num_labels = len(model.config.id2label)
|
| 759 |
+
>>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to(
|
| 760 |
+
... torch.float
|
| 761 |
+
... )
|
| 762 |
+
>>> loss = model(**inputs, labels=labels).loss
|
| 763 |
+
>>> loss.backward() # doctest: +IGNORE_RESULT
|
| 764 |
+
```"""
|
| 765 |
+
|
| 766 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 767 |
+
|
| 768 |
+
transformer_outputs = self.transformer(
|
| 769 |
+
input_ids,
|
| 770 |
+
past_key_values=past_key_values,
|
| 771 |
+
attention_mask=attention_mask,
|
| 772 |
+
token_type_ids=token_type_ids,
|
| 773 |
+
position_ids=position_ids,
|
| 774 |
+
head_mask=head_mask,
|
| 775 |
+
inputs_embeds=inputs_embeds,
|
| 776 |
+
use_cache=use_cache,
|
| 777 |
+
output_attentions=output_attentions,
|
| 778 |
+
output_hidden_states=output_hidden_states,
|
| 779 |
+
return_dict=return_dict,
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
hidden_states = transformer_outputs[0]
|
| 783 |
+
logits = self.classifier(hidden_states)
|
| 784 |
+
|
| 785 |
+
if input_ids is not None:
|
| 786 |
+
batch_size, sequence_length = input_ids.shape[:2]
|
| 787 |
+
else:
|
| 788 |
+
batch_size, sequence_length = inputs_embeds.shape[:2]
|
| 789 |
+
|
| 790 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 791 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 792 |
+
|
| 793 |
+
if self.config.pad_token_id is None:
|
| 794 |
+
sequence_lengths = -1
|
| 795 |
+
else:
|
| 796 |
+
if input_ids is not None:
|
| 797 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 798 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 799 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 800 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 801 |
+
else:
|
| 802 |
+
sequence_lengths = -1
|
| 803 |
+
logger.warning_once(
|
| 804 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 805 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
pooled_logits = logits[range(batch_size), sequence_lengths]
|
| 809 |
+
|
| 810 |
+
loss = None
|
| 811 |
+
if labels is not None:
|
| 812 |
+
if self.config.problem_type is None:
|
| 813 |
+
if self.num_labels == 1:
|
| 814 |
+
self.config.problem_type = "regression"
|
| 815 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 816 |
+
self.config.problem_type = "single_label_classification"
|
| 817 |
+
else:
|
| 818 |
+
self.config.problem_type = "multi_label_classification"
|
| 819 |
+
|
| 820 |
+
if self.config.problem_type == "regression":
|
| 821 |
+
loss_fct = MSELoss()
|
| 822 |
+
if self.num_labels == 1:
|
| 823 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 824 |
+
else:
|
| 825 |
+
loss = loss_fct(pooled_logits, labels)
|
| 826 |
+
elif self.config.problem_type == "single_label_classification":
|
| 827 |
+
loss_fct = CrossEntropyLoss()
|
| 828 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 829 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 830 |
+
loss_fct = BCEWithLogitsLoss()
|
| 831 |
+
loss = loss_fct(pooled_logits, labels)
|
| 832 |
+
if not return_dict:
|
| 833 |
+
output = (pooled_logits,) + transformer_outputs[2:]
|
| 834 |
+
return ((loss,) + output) if loss is not None else output
|
| 835 |
+
|
| 836 |
+
return SequenceClassifierOutput(
|
| 837 |
+
loss=loss,
|
| 838 |
+
logits=pooled_logits,
|
| 839 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 840 |
+
attentions=transformer_outputs.attentions,
|
| 841 |
+
)
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
__all__ = ["CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel"]
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py
ADDED
|
@@ -0,0 +1,931 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""TF 2.0 CTRL model."""
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
from typing import Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import tensorflow as tf
|
| 24 |
+
|
| 25 |
+
from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput
|
| 26 |
+
from ...modeling_tf_utils import (
|
| 27 |
+
TFCausalLanguageModelingLoss,
|
| 28 |
+
TFModelInputType,
|
| 29 |
+
TFPreTrainedModel,
|
| 30 |
+
TFSequenceClassificationLoss,
|
| 31 |
+
get_initializer,
|
| 32 |
+
keras,
|
| 33 |
+
keras_serializable,
|
| 34 |
+
unpack_inputs,
|
| 35 |
+
)
|
| 36 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
| 37 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 38 |
+
from .configuration_ctrl import CTRLConfig
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
logger = logging.get_logger(__name__)
|
| 42 |
+
|
| 43 |
+
_CHECKPOINT_FOR_DOC = "Salesforce/ctrl"
|
| 44 |
+
_CONFIG_FOR_DOC = "CTRLConfig"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def angle_defn(pos, i, d_model_size):
|
| 48 |
+
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)
|
| 49 |
+
return pos * angle_rates
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def positional_encoding(position, d_model_size):
|
| 53 |
+
# create the sinusoidal pattern for the positional encoding
|
| 54 |
+
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
|
| 55 |
+
|
| 56 |
+
sines = np.sin(angle_rads[:, 0::2])
|
| 57 |
+
cosines = np.cos(angle_rads[:, 1::2])
|
| 58 |
+
pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))
|
| 59 |
+
|
| 60 |
+
return pos_encoding
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
|
| 64 |
+
# calculate attention
|
| 65 |
+
matmul_qk = tf.matmul(q, k, transpose_b=True)
|
| 66 |
+
|
| 67 |
+
dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)
|
| 68 |
+
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
|
| 69 |
+
|
| 70 |
+
if mask is not None:
|
| 71 |
+
scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)
|
| 72 |
+
|
| 73 |
+
if attention_mask is not None:
|
| 74 |
+
# Apply the attention mask
|
| 75 |
+
attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)
|
| 76 |
+
scaled_attention_logits = scaled_attention_logits + attention_mask
|
| 77 |
+
|
| 78 |
+
attention_weights = stable_softmax(scaled_attention_logits, axis=-1)
|
| 79 |
+
|
| 80 |
+
# Mask heads if we want to
|
| 81 |
+
if head_mask is not None:
|
| 82 |
+
attention_weights = attention_weights * head_mask
|
| 83 |
+
|
| 84 |
+
output = tf.matmul(attention_weights, v)
|
| 85 |
+
|
| 86 |
+
return output, attention_weights
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class TFMultiHeadAttention(keras.layers.Layer):
|
| 90 |
+
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
|
| 91 |
+
super().__init__(**kwargs)
|
| 92 |
+
self.num_heads = num_heads
|
| 93 |
+
self.d_model_size = d_model_size
|
| 94 |
+
self.output_attentions = output_attentions
|
| 95 |
+
|
| 96 |
+
self.depth = int(d_model_size / self.num_heads)
|
| 97 |
+
|
| 98 |
+
self.Wq = keras.layers.Dense(d_model_size, name="Wq")
|
| 99 |
+
self.Wk = keras.layers.Dense(d_model_size, name="Wk")
|
| 100 |
+
self.Wv = keras.layers.Dense(d_model_size, name="Wv")
|
| 101 |
+
|
| 102 |
+
self.dense = keras.layers.Dense(d_model_size, name="dense")
|
| 103 |
+
|
| 104 |
+
def split_into_heads(self, x, batch_size):
|
| 105 |
+
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
|
| 106 |
+
return tf.transpose(x, perm=[0, 2, 1, 3])
|
| 107 |
+
|
| 108 |
+
def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
|
| 109 |
+
batch_size = shape_list(q)[0]
|
| 110 |
+
|
| 111 |
+
q = self.Wq(q)
|
| 112 |
+
k = self.Wk(k)
|
| 113 |
+
v = self.Wv(v)
|
| 114 |
+
|
| 115 |
+
q = self.split_into_heads(q, batch_size)
|
| 116 |
+
k = self.split_into_heads(k, batch_size)
|
| 117 |
+
v = self.split_into_heads(v, batch_size)
|
| 118 |
+
|
| 119 |
+
if layer_past is not None:
|
| 120 |
+
past_key, past_value = tf.unstack(layer_past, axis=0)
|
| 121 |
+
k = tf.concat((past_key, k), axis=-2)
|
| 122 |
+
v = tf.concat((past_value, v), axis=-2)
|
| 123 |
+
|
| 124 |
+
if use_cache:
|
| 125 |
+
present = tf.stack((k, v), axis=0)
|
| 126 |
+
else:
|
| 127 |
+
present = (None,)
|
| 128 |
+
|
| 129 |
+
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
|
| 130 |
+
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
|
| 131 |
+
attn = output[1]
|
| 132 |
+
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
|
| 133 |
+
output = self.dense(original_size_attention)
|
| 134 |
+
outputs = (output, present)
|
| 135 |
+
|
| 136 |
+
if output_attentions:
|
| 137 |
+
outputs = outputs + (attn,)
|
| 138 |
+
|
| 139 |
+
return outputs
|
| 140 |
+
|
| 141 |
+
def build(self, input_shape=None):
|
| 142 |
+
if self.built:
|
| 143 |
+
return
|
| 144 |
+
self.built = True
|
| 145 |
+
if getattr(self, "Wq", None) is not None:
|
| 146 |
+
with tf.name_scope(self.Wq.name):
|
| 147 |
+
self.Wq.build([None, None, self.d_model_size])
|
| 148 |
+
if getattr(self, "Wk", None) is not None:
|
| 149 |
+
with tf.name_scope(self.Wk.name):
|
| 150 |
+
self.Wk.build([None, None, self.d_model_size])
|
| 151 |
+
if getattr(self, "Wv", None) is not None:
|
| 152 |
+
with tf.name_scope(self.Wv.name):
|
| 153 |
+
self.Wv.build([None, None, self.d_model_size])
|
| 154 |
+
if getattr(self, "dense", None) is not None:
|
| 155 |
+
with tf.name_scope(self.dense.name):
|
| 156 |
+
self.dense.build([None, None, self.d_model_size])
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class TFPointWiseFeedForwardLayer(keras.layers.Layer):
|
| 160 |
+
def __init__(self, d_model_size, dff, **kwargs):
|
| 161 |
+
super().__init__(**kwargs)
|
| 162 |
+
|
| 163 |
+
self.dense_0 = keras.layers.Dense(dff, activation="relu", name="0")
|
| 164 |
+
self.dense_2 = keras.layers.Dense(d_model_size, name="2")
|
| 165 |
+
self.d_model_size = d_model_size
|
| 166 |
+
self.dff = dff
|
| 167 |
+
|
| 168 |
+
def call(self, inputs, trainable=False):
|
| 169 |
+
dense_0_output = self.dense_0(inputs)
|
| 170 |
+
dense_2_output = self.dense_2(dense_0_output)
|
| 171 |
+
|
| 172 |
+
return dense_2_output
|
| 173 |
+
|
| 174 |
+
def build(self, input_shape=None):
|
| 175 |
+
if self.built:
|
| 176 |
+
return
|
| 177 |
+
self.built = True
|
| 178 |
+
if getattr(self, "dense_0", None) is not None:
|
| 179 |
+
with tf.name_scope(self.dense_0.name):
|
| 180 |
+
self.dense_0.build([None, None, self.d_model_size])
|
| 181 |
+
if getattr(self, "dense_2", None) is not None:
|
| 182 |
+
with tf.name_scope(self.dense_2.name):
|
| 183 |
+
self.dense_2.build([None, None, self.dff])
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class TFEncoderLayer(keras.layers.Layer):
|
| 187 |
+
def __init__(
|
| 188 |
+
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
|
| 189 |
+
):
|
| 190 |
+
super().__init__(**kwargs)
|
| 191 |
+
|
| 192 |
+
self.output_attentions = output_attentions
|
| 193 |
+
|
| 194 |
+
self.multi_head_attention = TFMultiHeadAttention(
|
| 195 |
+
d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention"
|
| 196 |
+
)
|
| 197 |
+
self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn")
|
| 198 |
+
|
| 199 |
+
self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
|
| 200 |
+
self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
|
| 201 |
+
|
| 202 |
+
self.dropout1 = keras.layers.Dropout(rate)
|
| 203 |
+
self.dropout2 = keras.layers.Dropout(rate)
|
| 204 |
+
self.d_model_size = d_model_size
|
| 205 |
+
|
| 206 |
+
def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
|
| 207 |
+
normed = self.layernorm1(x)
|
| 208 |
+
attn_outputs = self.multi_head_attention(
|
| 209 |
+
normed,
|
| 210 |
+
normed,
|
| 211 |
+
normed,
|
| 212 |
+
mask,
|
| 213 |
+
layer_past,
|
| 214 |
+
attention_mask,
|
| 215 |
+
head_mask,
|
| 216 |
+
use_cache,
|
| 217 |
+
output_attentions,
|
| 218 |
+
training=training,
|
| 219 |
+
)
|
| 220 |
+
attn_output = attn_outputs[0]
|
| 221 |
+
attn_output = self.dropout1(attn_output, training=training)
|
| 222 |
+
out1 = x + attn_output
|
| 223 |
+
|
| 224 |
+
out2 = self.layernorm2(out1)
|
| 225 |
+
ffn_output = self.ffn(out2)
|
| 226 |
+
ffn_output = self.dropout2(ffn_output, training=training)
|
| 227 |
+
out2 = out1 + ffn_output
|
| 228 |
+
|
| 229 |
+
outputs = (out2,) + attn_outputs[1:]
|
| 230 |
+
return outputs
|
| 231 |
+
|
| 232 |
+
def build(self, input_shape=None):
|
| 233 |
+
if self.built:
|
| 234 |
+
return
|
| 235 |
+
self.built = True
|
| 236 |
+
if getattr(self, "multi_head_attention", None) is not None:
|
| 237 |
+
with tf.name_scope(self.multi_head_attention.name):
|
| 238 |
+
self.multi_head_attention.build(None)
|
| 239 |
+
if getattr(self, "ffn", None) is not None:
|
| 240 |
+
with tf.name_scope(self.ffn.name):
|
| 241 |
+
self.ffn.build(None)
|
| 242 |
+
if getattr(self, "layernorm1", None) is not None:
|
| 243 |
+
with tf.name_scope(self.layernorm1.name):
|
| 244 |
+
self.layernorm1.build([None, None, self.d_model_size])
|
| 245 |
+
if getattr(self, "layernorm2", None) is not None:
|
| 246 |
+
with tf.name_scope(self.layernorm2.name):
|
| 247 |
+
self.layernorm2.build([None, None, self.d_model_size])
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@keras_serializable
|
| 251 |
+
class TFCTRLMainLayer(keras.layers.Layer):
|
| 252 |
+
config_class = CTRLConfig
|
| 253 |
+
|
| 254 |
+
def __init__(self, config, **kwargs):
|
| 255 |
+
super().__init__(**kwargs)
|
| 256 |
+
|
| 257 |
+
self.config = config
|
| 258 |
+
self.output_hidden_states = config.output_hidden_states
|
| 259 |
+
self.output_attentions = config.output_attentions
|
| 260 |
+
self.use_cache = config.use_cache
|
| 261 |
+
self.return_dict = config.use_return_dict
|
| 262 |
+
|
| 263 |
+
self.d_model_size = config.n_embd
|
| 264 |
+
self.num_layers = config.n_layer
|
| 265 |
+
|
| 266 |
+
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
|
| 267 |
+
|
| 268 |
+
self.w = keras.layers.Embedding(
|
| 269 |
+
input_dim=config.vocab_size,
|
| 270 |
+
output_dim=config.n_embd,
|
| 271 |
+
embeddings_initializer=get_initializer(config.initializer_range),
|
| 272 |
+
name="w",
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
self.dropout = keras.layers.Dropout(config.embd_pdrop)
|
| 276 |
+
self.h = [
|
| 277 |
+
TFEncoderLayer(
|
| 278 |
+
config.n_embd,
|
| 279 |
+
config.n_head,
|
| 280 |
+
config.dff,
|
| 281 |
+
config.resid_pdrop,
|
| 282 |
+
config.layer_norm_epsilon,
|
| 283 |
+
self.output_attentions,
|
| 284 |
+
name=f"h_._{i}",
|
| 285 |
+
)
|
| 286 |
+
for i in range(config.n_layer)
|
| 287 |
+
]
|
| 288 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
|
| 289 |
+
|
| 290 |
+
def get_input_embeddings(self):
|
| 291 |
+
return self.w
|
| 292 |
+
|
| 293 |
+
def set_input_embeddings(self, new_embeddings):
|
| 294 |
+
self.w = new_embeddings
|
| 295 |
+
|
| 296 |
+
def _prune_heads(self, heads_to_prune):
|
| 297 |
+
"""
|
| 298 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
| 299 |
+
"""
|
| 300 |
+
raise NotImplementedError
|
| 301 |
+
|
| 302 |
+
@unpack_inputs
|
| 303 |
+
def call(
|
| 304 |
+
self,
|
| 305 |
+
input_ids: TFModelInputType | None = None,
|
| 306 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 307 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 308 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 309 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 310 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 311 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 312 |
+
use_cache: Optional[bool] = None,
|
| 313 |
+
output_attentions: Optional[bool] = None,
|
| 314 |
+
output_hidden_states: Optional[bool] = None,
|
| 315 |
+
return_dict: Optional[bool] = None,
|
| 316 |
+
training: Optional[bool] = False,
|
| 317 |
+
) -> Union[Tuple, TFBaseModelOutputWithPast]:
|
| 318 |
+
# If using past key value states, only the last tokens
|
| 319 |
+
# should be given as an input
|
| 320 |
+
if past_key_values is not None:
|
| 321 |
+
if input_ids is not None:
|
| 322 |
+
input_ids = input_ids[:, -1:]
|
| 323 |
+
if inputs_embeds is not None:
|
| 324 |
+
inputs_embeds = inputs_embeds[:, -1:]
|
| 325 |
+
if token_type_ids is not None:
|
| 326 |
+
token_type_ids = token_type_ids[:, -1:]
|
| 327 |
+
|
| 328 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 329 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 330 |
+
elif input_ids is not None:
|
| 331 |
+
input_shape = shape_list(input_ids)
|
| 332 |
+
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
|
| 333 |
+
elif inputs_embeds is not None:
|
| 334 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 335 |
+
else:
|
| 336 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 337 |
+
|
| 338 |
+
if past_key_values is None:
|
| 339 |
+
past_length = 0
|
| 340 |
+
past_key_values = [None] * len(self.h)
|
| 341 |
+
else:
|
| 342 |
+
past_length = shape_list(past_key_values[0][0])[-2]
|
| 343 |
+
if position_ids is None:
|
| 344 |
+
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0)
|
| 345 |
+
position_ids = tf.tile(position_ids, [input_shape[0], 1])
|
| 346 |
+
|
| 347 |
+
# Attention mask.
|
| 348 |
+
if attention_mask is not None:
|
| 349 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 350 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 351 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 352 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 353 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 354 |
+
attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length))
|
| 355 |
+
|
| 356 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 357 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 358 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 359 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 360 |
+
# effectively the same as removing these entirely.
|
| 361 |
+
|
| 362 |
+
one_cst = tf.constant(1.0)
|
| 363 |
+
ten_thousand_cst = tf.constant(-10000.0)
|
| 364 |
+
attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
|
| 365 |
+
attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst)
|
| 366 |
+
|
| 367 |
+
# Prepare head mask if needed
|
| 368 |
+
# 1.0 in head_mask indicate we keep the head
|
| 369 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 370 |
+
# head_mask has shape n_layer x batch x n_heads x N x N
|
| 371 |
+
if head_mask is not None:
|
| 372 |
+
raise NotImplementedError
|
| 373 |
+
else:
|
| 374 |
+
head_mask = [None] * self.num_layers
|
| 375 |
+
|
| 376 |
+
if token_type_ids is not None:
|
| 377 |
+
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
|
| 378 |
+
token_type_embeds = self.w(token_type_ids)
|
| 379 |
+
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))
|
| 380 |
+
else:
|
| 381 |
+
token_type_embeds = tf.constant(0.0)
|
| 382 |
+
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
|
| 383 |
+
|
| 384 |
+
if inputs_embeds is None:
|
| 385 |
+
check_embeddings_within_bounds(input_ids, self.w.input_dim)
|
| 386 |
+
inputs_embeds = self.w(input_ids)
|
| 387 |
+
seq_len = input_shape[-1]
|
| 388 |
+
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
|
| 389 |
+
|
| 390 |
+
inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype))
|
| 391 |
+
|
| 392 |
+
pos_embeds = tf.gather(self.pos_encoding, position_ids)
|
| 393 |
+
pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)
|
| 394 |
+
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
|
| 395 |
+
|
| 396 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 397 |
+
|
| 398 |
+
output_shape = input_shape + [shape_list(hidden_states)[-1]]
|
| 399 |
+
presents = () if use_cache else None
|
| 400 |
+
all_hidden_states = () if output_hidden_states else None
|
| 401 |
+
all_attentions = () if output_attentions else None
|
| 402 |
+
for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
|
| 403 |
+
if output_hidden_states:
|
| 404 |
+
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
|
| 405 |
+
outputs = h(
|
| 406 |
+
hidden_states,
|
| 407 |
+
mask,
|
| 408 |
+
layer_past,
|
| 409 |
+
attention_mask,
|
| 410 |
+
head_mask[i],
|
| 411 |
+
use_cache,
|
| 412 |
+
output_attentions,
|
| 413 |
+
training=training,
|
| 414 |
+
)
|
| 415 |
+
hidden_states, present = outputs[:2]
|
| 416 |
+
|
| 417 |
+
if use_cache:
|
| 418 |
+
presents = presents + (present,)
|
| 419 |
+
|
| 420 |
+
if output_attentions:
|
| 421 |
+
all_attentions = all_attentions + (outputs[2],)
|
| 422 |
+
|
| 423 |
+
hidden_states = self.layernorm(hidden_states)
|
| 424 |
+
hidden_states = tf.reshape(hidden_states, output_shape)
|
| 425 |
+
if output_hidden_states:
|
| 426 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 427 |
+
|
| 428 |
+
if output_attentions:
|
| 429 |
+
# let the number of heads free (-1) so we can extract attention even after head pruning
|
| 430 |
+
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
|
| 431 |
+
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
|
| 432 |
+
|
| 433 |
+
if not return_dict:
|
| 434 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
| 435 |
+
|
| 436 |
+
return TFBaseModelOutputWithPast(
|
| 437 |
+
last_hidden_state=hidden_states,
|
| 438 |
+
past_key_values=presents,
|
| 439 |
+
hidden_states=all_hidden_states,
|
| 440 |
+
attentions=all_attentions,
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
def build(self, input_shape=None):
|
| 444 |
+
if self.built:
|
| 445 |
+
return
|
| 446 |
+
self.built = True
|
| 447 |
+
if getattr(self, "w", None) is not None:
|
| 448 |
+
with tf.name_scope(self.w.name):
|
| 449 |
+
self.w.build(None)
|
| 450 |
+
if getattr(self, "layernorm", None) is not None:
|
| 451 |
+
with tf.name_scope(self.layernorm.name):
|
| 452 |
+
self.layernorm.build([None, None, self.config.n_embd])
|
| 453 |
+
if getattr(self, "h", None) is not None:
|
| 454 |
+
for layer in self.h:
|
| 455 |
+
with tf.name_scope(layer.name):
|
| 456 |
+
layer.build(None)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class TFCTRLPreTrainedModel(TFPreTrainedModel):
|
| 460 |
+
"""
|
| 461 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 462 |
+
models.
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
config_class = CTRLConfig
|
| 466 |
+
base_model_prefix = "transformer"
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
CTRL_START_DOCSTRING = r"""
|
| 470 |
+
|
| 471 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 472 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 473 |
+
etc.)
|
| 474 |
+
|
| 475 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 476 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 477 |
+
behavior.
|
| 478 |
+
|
| 479 |
+
<Tip>
|
| 480 |
+
|
| 481 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 482 |
+
|
| 483 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 484 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 485 |
+
|
| 486 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 487 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 488 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 489 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 490 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 491 |
+
positional argument:
|
| 492 |
+
|
| 493 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
| 494 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 495 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
| 496 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 497 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
| 498 |
+
|
| 499 |
+
Note that when creating models and layers with
|
| 500 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 501 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 502 |
+
|
| 503 |
+
</Tip>
|
| 504 |
+
|
| 505 |
+
Parameters:
|
| 506 |
+
config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
|
| 507 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 508 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
+
CTRL_INPUTS_DOCSTRING = r"""
|
| 512 |
+
Args:
|
| 513 |
+
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
|
| 514 |
+
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
|
| 515 |
+
input past key value states).
|
| 516 |
+
|
| 517 |
+
Indices of input sequence tokens in the vocabulary.
|
| 518 |
+
|
| 519 |
+
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
|
| 520 |
+
|
| 521 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 522 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 523 |
+
|
| 524 |
+
[What are input IDs?](../glossary#input-ids)
|
| 525 |
+
past (`List[tf.Tensor]` of length `config.n_layers`):
|
| 526 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
| 527 |
+
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
|
| 528 |
+
given to this model should not be passed as input ids as they have already been computed.
|
| 529 |
+
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 530 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 531 |
+
|
| 532 |
+
- 1 for tokens that are **not masked**,
|
| 533 |
+
- 0 for tokens that are **masked**.
|
| 534 |
+
|
| 535 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 536 |
+
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 537 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 538 |
+
1]`:
|
| 539 |
+
|
| 540 |
+
- 0 corresponds to a *sentence A* token,
|
| 541 |
+
- 1 corresponds to a *sentence B* token.
|
| 542 |
+
|
| 543 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 544 |
+
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 545 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 546 |
+
config.max_position_embeddings - 1]`.
|
| 547 |
+
|
| 548 |
+
[What are position IDs?](../glossary#position-ids)
|
| 549 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 550 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 551 |
+
|
| 552 |
+
- 1 indicates the head is **not masked**,
|
| 553 |
+
- 0 indicates the head is **masked**.
|
| 554 |
+
|
| 555 |
+
inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 556 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 557 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 558 |
+
model's internal embedding lookup matrix.
|
| 559 |
+
use_cache (`bool`, *optional*):
|
| 560 |
+
If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).
|
| 561 |
+
output_attentions (`bool`, *optional*):
|
| 562 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 563 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 564 |
+
config will be used instead.
|
| 565 |
+
output_hidden_states (`bool`, *optional*):
|
| 566 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 567 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 568 |
+
used instead.
|
| 569 |
+
return_dict (`bool`, *optional*):
|
| 570 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 571 |
+
eager mode, in graph mode the value will always be set to True.
|
| 572 |
+
training (`bool`, *optional*, defaults to `False`):
|
| 573 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 574 |
+
behaviors between training and evaluation).
|
| 575 |
+
"""
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
@add_start_docstrings(
|
| 579 |
+
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
|
| 580 |
+
CTRL_START_DOCSTRING,
|
| 581 |
+
)
|
| 582 |
+
class TFCTRLModel(TFCTRLPreTrainedModel):
|
| 583 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 584 |
+
super().__init__(config, *inputs, **kwargs)
|
| 585 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
| 586 |
+
|
| 587 |
+
@unpack_inputs
|
| 588 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 589 |
+
@add_code_sample_docstrings(
|
| 590 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 591 |
+
output_type=TFBaseModelOutputWithPast,
|
| 592 |
+
config_class=_CONFIG_FOR_DOC,
|
| 593 |
+
)
|
| 594 |
+
def call(
|
| 595 |
+
self,
|
| 596 |
+
input_ids: TFModelInputType | None = None,
|
| 597 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 598 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 599 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 600 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 601 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 602 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 603 |
+
use_cache: Optional[bool] = None,
|
| 604 |
+
output_attentions: Optional[bool] = None,
|
| 605 |
+
output_hidden_states: Optional[bool] = None,
|
| 606 |
+
return_dict: Optional[bool] = None,
|
| 607 |
+
training: Optional[bool] = False,
|
| 608 |
+
) -> Union[Tuple, TFBaseModelOutputWithPast]:
|
| 609 |
+
outputs = self.transformer(
|
| 610 |
+
input_ids=input_ids,
|
| 611 |
+
past_key_values=past_key_values,
|
| 612 |
+
attention_mask=attention_mask,
|
| 613 |
+
token_type_ids=token_type_ids,
|
| 614 |
+
position_ids=position_ids,
|
| 615 |
+
head_mask=head_mask,
|
| 616 |
+
inputs_embeds=inputs_embeds,
|
| 617 |
+
use_cache=use_cache,
|
| 618 |
+
output_attentions=output_attentions,
|
| 619 |
+
output_hidden_states=output_hidden_states,
|
| 620 |
+
return_dict=return_dict,
|
| 621 |
+
training=training,
|
| 622 |
+
)
|
| 623 |
+
return outputs
|
| 624 |
+
|
| 625 |
+
def build(self, input_shape=None):
|
| 626 |
+
if self.built:
|
| 627 |
+
return
|
| 628 |
+
self.built = True
|
| 629 |
+
if getattr(self, "transformer", None) is not None:
|
| 630 |
+
with tf.name_scope(self.transformer.name):
|
| 631 |
+
self.transformer.build(None)
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class TFCTRLBiasLayer(keras.layers.Layer):
|
| 635 |
+
"""
|
| 636 |
+
Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
|
| 637 |
+
so all weights have to be registered in a layer.
|
| 638 |
+
"""
|
| 639 |
+
|
| 640 |
+
def __init__(self, shape, initializer, trainable, name, **kwargs):
|
| 641 |
+
super().__init__(name=name, **kwargs)
|
| 642 |
+
self.shape = shape
|
| 643 |
+
self.initializer = initializer
|
| 644 |
+
self.trainable = trainable
|
| 645 |
+
|
| 646 |
+
def build(self, input_shape):
|
| 647 |
+
self.bias = self.add_weight(
|
| 648 |
+
name="bias", shape=self.shape, initializer=self.initializer, trainable=self.trainable
|
| 649 |
+
)
|
| 650 |
+
super().build(input_shape)
|
| 651 |
+
|
| 652 |
+
def call(self, x):
|
| 653 |
+
return x + self.bias
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@add_start_docstrings(
|
| 657 |
+
"""
|
| 658 |
+
The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
| 659 |
+
embeddings).
|
| 660 |
+
""",
|
| 661 |
+
CTRL_START_DOCSTRING,
|
| 662 |
+
)
|
| 663 |
+
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):
|
| 664 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 665 |
+
super().__init__(config, *inputs, **kwargs)
|
| 666 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
| 667 |
+
self.bias_layer = TFCTRLBiasLayer(
|
| 668 |
+
name="lm_head", shape=[1, config.vocab_size], initializer="zeros", trainable=True
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
def get_output_embeddings(self):
|
| 672 |
+
return self.get_input_embeddings()
|
| 673 |
+
|
| 674 |
+
def set_output_embeddings(self, value):
|
| 675 |
+
self.set_input_embeddings(value)
|
| 676 |
+
|
| 677 |
+
def get_bias(self):
|
| 678 |
+
return {"lm_head.bias": self.bias_layer.bias}
|
| 679 |
+
|
| 680 |
+
def set_bias(self, value):
|
| 681 |
+
# Replaces the existing layers containing bias for correct (de)serialization.
|
| 682 |
+
vocab_size = value["lm_head.bias"].shape[-1]
|
| 683 |
+
self.bias_layer = TFCTRLBiasLayer(
|
| 684 |
+
name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=True
|
| 685 |
+
)
|
| 686 |
+
self.bias_layer.build(None)
|
| 687 |
+
self.bias_layer.bias.assign(value["lm_head.bias"])
|
| 688 |
+
|
| 689 |
+
# Copied from transformers.models.gpt2.modeling_tf_gpt2.TFGPT2LMHeadModel.prepare_inputs_for_generation
|
| 690 |
+
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
|
| 691 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
| 692 |
+
# only last token for inputs_ids if past is defined in kwargs
|
| 693 |
+
if past_key_values:
|
| 694 |
+
inputs = tf.expand_dims(inputs[:, -1], -1)
|
| 695 |
+
if token_type_ids is not None:
|
| 696 |
+
token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
|
| 697 |
+
|
| 698 |
+
position_ids = kwargs.get("position_ids", None)
|
| 699 |
+
attention_mask = kwargs.get("attention_mask", None)
|
| 700 |
+
|
| 701 |
+
if attention_mask is not None and position_ids is None:
|
| 702 |
+
position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
|
| 703 |
+
if past_key_values:
|
| 704 |
+
position_ids = tf.expand_dims(position_ids[:, -1], -1)
|
| 705 |
+
|
| 706 |
+
return {
|
| 707 |
+
"input_ids": inputs,
|
| 708 |
+
"attention_mask": attention_mask,
|
| 709 |
+
"position_ids": position_ids,
|
| 710 |
+
"past_key_values": past_key_values,
|
| 711 |
+
"use_cache": use_cache,
|
| 712 |
+
"token_type_ids": token_type_ids,
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
@unpack_inputs
|
| 716 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 717 |
+
@add_code_sample_docstrings(
|
| 718 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 719 |
+
output_type=TFCausalLMOutputWithPast,
|
| 720 |
+
config_class=_CONFIG_FOR_DOC,
|
| 721 |
+
)
|
| 722 |
+
def call(
|
| 723 |
+
self,
|
| 724 |
+
input_ids: TFModelInputType | None = None,
|
| 725 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 726 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 727 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 728 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 729 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 730 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 731 |
+
use_cache: Optional[bool] = None,
|
| 732 |
+
output_attentions: Optional[bool] = None,
|
| 733 |
+
output_hidden_states: Optional[bool] = None,
|
| 734 |
+
return_dict: Optional[bool] = None,
|
| 735 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 736 |
+
training: Optional[bool] = False,
|
| 737 |
+
) -> Union[Tuple, TFCausalLMOutputWithPast]:
|
| 738 |
+
r"""
|
| 739 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 740 |
+
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
|
| 741 |
+
config.vocab_size - 1]`.
|
| 742 |
+
"""
|
| 743 |
+
transformer_outputs = self.transformer(
|
| 744 |
+
input_ids=input_ids,
|
| 745 |
+
past_key_values=past_key_values,
|
| 746 |
+
attention_mask=attention_mask,
|
| 747 |
+
token_type_ids=token_type_ids,
|
| 748 |
+
position_ids=position_ids,
|
| 749 |
+
head_mask=head_mask,
|
| 750 |
+
inputs_embeds=inputs_embeds,
|
| 751 |
+
use_cache=use_cache,
|
| 752 |
+
output_attentions=output_attentions,
|
| 753 |
+
output_hidden_states=output_hidden_states,
|
| 754 |
+
return_dict=return_dict,
|
| 755 |
+
training=training,
|
| 756 |
+
)
|
| 757 |
+
hidden_states = transformer_outputs[0]
|
| 758 |
+
logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True)
|
| 759 |
+
logits = self.bias_layer(logits)
|
| 760 |
+
|
| 761 |
+
loss = None
|
| 762 |
+
if labels is not None:
|
| 763 |
+
# shift labels to the left and cut last logit token
|
| 764 |
+
shifted_logits = logits[:, :-1]
|
| 765 |
+
labels = labels[:, 1:]
|
| 766 |
+
loss = self.hf_compute_loss(labels, shifted_logits)
|
| 767 |
+
|
| 768 |
+
if not return_dict:
|
| 769 |
+
output = (logits,) + transformer_outputs[1:]
|
| 770 |
+
return ((loss,) + output) if loss is not None else output
|
| 771 |
+
|
| 772 |
+
return TFCausalLMOutputWithPast(
|
| 773 |
+
loss=loss,
|
| 774 |
+
logits=logits,
|
| 775 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 776 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 777 |
+
attentions=transformer_outputs.attentions,
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
def build(self, input_shape=None):
|
| 781 |
+
if self.built:
|
| 782 |
+
return
|
| 783 |
+
self.built = True
|
| 784 |
+
if getattr(self, "transformer", None) is not None:
|
| 785 |
+
with tf.name_scope(self.transformer.name):
|
| 786 |
+
self.transformer.build(None)
|
| 787 |
+
if getattr(self, "bias_layer", None) is not None:
|
| 788 |
+
with tf.name_scope(self.bias_layer.name):
|
| 789 |
+
self.bias_layer.build(None)
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
@add_start_docstrings(
|
| 793 |
+
"""
|
| 794 |
+
The CTRL Model transformer with a sequence classification head on top (linear layer).
|
| 795 |
+
|
| 796 |
+
[`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 797 |
+
(e.g. GPT-1, GPT-2) do.
|
| 798 |
+
|
| 799 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 800 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 801 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 802 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 803 |
+
each row of the batch).
|
| 804 |
+
""",
|
| 805 |
+
CTRL_START_DOCSTRING,
|
| 806 |
+
)
|
| 807 |
+
class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):
|
| 808 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 809 |
+
super().__init__(config, *inputs, **kwargs)
|
| 810 |
+
self.num_labels = config.num_labels
|
| 811 |
+
self.classifier = keras.layers.Dense(
|
| 812 |
+
config.num_labels,
|
| 813 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 814 |
+
name="classifier",
|
| 815 |
+
use_bias=False,
|
| 816 |
+
)
|
| 817 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
| 818 |
+
self.config = config
|
| 819 |
+
|
| 820 |
+
def get_output_embeddings(self):
|
| 821 |
+
# Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
|
| 822 |
+
logger.warning(
|
| 823 |
+
"Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
|
| 824 |
+
"in transformers v4.32."
|
| 825 |
+
)
|
| 826 |
+
return self.transformer.w
|
| 827 |
+
|
| 828 |
+
@unpack_inputs
|
| 829 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
| 830 |
+
@add_code_sample_docstrings(
|
| 831 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 832 |
+
output_type=TFSequenceClassifierOutput,
|
| 833 |
+
config_class=_CONFIG_FOR_DOC,
|
| 834 |
+
)
|
| 835 |
+
def call(
|
| 836 |
+
self,
|
| 837 |
+
input_ids: TFModelInputType | None = None,
|
| 838 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 839 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 840 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 841 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 842 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 843 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 844 |
+
use_cache: Optional[bool] = None,
|
| 845 |
+
output_attentions: Optional[bool] = None,
|
| 846 |
+
output_hidden_states: Optional[bool] = None,
|
| 847 |
+
return_dict: Optional[bool] = None,
|
| 848 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 849 |
+
training: Optional[bool] = False,
|
| 850 |
+
) -> Union[Tuple, TFSequenceClassifierOutput]:
|
| 851 |
+
r"""
|
| 852 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 853 |
+
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
|
| 854 |
+
config.vocab_size - 1]`.
|
| 855 |
+
"""
|
| 856 |
+
|
| 857 |
+
transformer_outputs = self.transformer(
|
| 858 |
+
input_ids=input_ids,
|
| 859 |
+
past_key_values=past_key_values,
|
| 860 |
+
attention_mask=attention_mask,
|
| 861 |
+
token_type_ids=token_type_ids,
|
| 862 |
+
position_ids=position_ids,
|
| 863 |
+
head_mask=head_mask,
|
| 864 |
+
inputs_embeds=inputs_embeds,
|
| 865 |
+
use_cache=use_cache,
|
| 866 |
+
output_attentions=output_attentions,
|
| 867 |
+
output_hidden_states=output_hidden_states,
|
| 868 |
+
return_dict=return_dict,
|
| 869 |
+
training=training,
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
hidden_states = transformer_outputs[0]
|
| 873 |
+
logits = self.classifier(hidden_states)
|
| 874 |
+
in_logits = None
|
| 875 |
+
if self.config.pad_token_id is None:
|
| 876 |
+
sequence_lengths = -1
|
| 877 |
+
else:
|
| 878 |
+
if input_ids is not None:
|
| 879 |
+
sequence_lengths = (
|
| 880 |
+
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
|
| 881 |
+
- 1
|
| 882 |
+
)
|
| 883 |
+
sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
|
| 884 |
+
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
|
| 885 |
+
else:
|
| 886 |
+
sequence_lengths = -1
|
| 887 |
+
logger.warning_once(
|
| 888 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 889 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 890 |
+
)
|
| 891 |
+
loss = None
|
| 892 |
+
|
| 893 |
+
if labels is not None:
|
| 894 |
+
if input_ids is not None:
|
| 895 |
+
batch_size, sequence_length = shape_list(input_ids)[:2]
|
| 896 |
+
else:
|
| 897 |
+
batch_size, sequence_length = shape_list(inputs_embeds)[:2]
|
| 898 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 899 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 900 |
+
|
| 901 |
+
if not tf.is_tensor(sequence_lengths):
|
| 902 |
+
in_logits = logits[0:batch_size, sequence_lengths]
|
| 903 |
+
|
| 904 |
+
loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
|
| 905 |
+
|
| 906 |
+
pooled_logits = in_logits if in_logits is not None else logits
|
| 907 |
+
|
| 908 |
+
if not return_dict:
|
| 909 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 910 |
+
return ((loss,) + output) if loss is not None else output
|
| 911 |
+
|
| 912 |
+
return TFSequenceClassifierOutput(
|
| 913 |
+
loss=loss,
|
| 914 |
+
logits=pooled_logits,
|
| 915 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 916 |
+
attentions=transformer_outputs.attentions,
|
| 917 |
+
)
|
| 918 |
+
|
| 919 |
+
def build(self, input_shape=None):
|
| 920 |
+
if self.built:
|
| 921 |
+
return
|
| 922 |
+
self.built = True
|
| 923 |
+
if getattr(self, "classifier", None) is not None:
|
| 924 |
+
with tf.name_scope(self.classifier.name):
|
| 925 |
+
self.classifier.build([None, None, self.config.n_embd])
|
| 926 |
+
if getattr(self, "transformer", None) is not None:
|
| 927 |
+
with tf.name_scope(self.transformer.name):
|
| 928 |
+
self.transformer.build(None)
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
__all__ = ["TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel"]
|
phi4/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 Salesforce and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for Salesforce CTRL."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
from typing import Optional, Tuple
|
| 20 |
+
|
| 21 |
+
import regex as re
|
| 22 |
+
|
| 23 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
| 24 |
+
from ...utils import logging
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
VOCAB_FILES_NAMES = {
|
| 30 |
+
"vocab_file": "vocab.json",
|
| 31 |
+
"merges_file": "merges.txt",
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
CONTROL_CODES = {
|
| 36 |
+
"Pregnancy": 168629,
|
| 37 |
+
"Christianity": 7675,
|
| 38 |
+
"Explain": 106423,
|
| 39 |
+
"Fitness": 63440,
|
| 40 |
+
"Saving": 63163,
|
| 41 |
+
"Ask": 27171,
|
| 42 |
+
"Ass": 95985,
|
| 43 |
+
"Joke": 163509,
|
| 44 |
+
"Questions": 45622,
|
| 45 |
+
"Thoughts": 49605,
|
| 46 |
+
"Retail": 52342,
|
| 47 |
+
"Feminism": 164338,
|
| 48 |
+
"Writing": 11992,
|
| 49 |
+
"Atheism": 192263,
|
| 50 |
+
"Netflix": 48616,
|
| 51 |
+
"Computing": 39639,
|
| 52 |
+
"Opinion": 43213,
|
| 53 |
+
"Alone": 44967,
|
| 54 |
+
"Funny": 58917,
|
| 55 |
+
"Gaming": 40358,
|
| 56 |
+
"Human": 4088,
|
| 57 |
+
"India": 1331,
|
| 58 |
+
"Joker": 77138,
|
| 59 |
+
"Diet": 36206,
|
| 60 |
+
"Legal": 11859,
|
| 61 |
+
"Norman": 4939,
|
| 62 |
+
"Tip": 72689,
|
| 63 |
+
"Weight": 52343,
|
| 64 |
+
"Movies": 46273,
|
| 65 |
+
"Running": 23425,
|
| 66 |
+
"Science": 2090,
|
| 67 |
+
"Horror": 37793,
|
| 68 |
+
"Confession": 60572,
|
| 69 |
+
"Finance": 12250,
|
| 70 |
+
"Politics": 16360,
|
| 71 |
+
"Scary": 191985,
|
| 72 |
+
"Support": 12654,
|
| 73 |
+
"Technologies": 32516,
|
| 74 |
+
"Teenage": 66160,
|
| 75 |
+
"Event": 32769,
|
| 76 |
+
"Learned": 67460,
|
| 77 |
+
"Notion": 182770,
|
| 78 |
+
"Wikipedia": 37583,
|
| 79 |
+
"Books": 6665,
|
| 80 |
+
"Extract": 76050,
|
| 81 |
+
"Confessions": 102701,
|
| 82 |
+
"Conspiracy": 75932,
|
| 83 |
+
"Links": 63674,
|
| 84 |
+
"Narcissus": 150425,
|
| 85 |
+
"Relationship": 54766,
|
| 86 |
+
"Relationships": 134796,
|
| 87 |
+
"Reviews": 41671,
|
| 88 |
+
"News": 4256,
|
| 89 |
+
"Translation": 26820,
|
| 90 |
+
"multilingual": 128406,
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_pairs(word):
|
| 95 |
+
"""
|
| 96 |
+
Return set of symbol pairs in a word.
|
| 97 |
+
|
| 98 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 99 |
+
"""
|
| 100 |
+
pairs = set()
|
| 101 |
+
prev_char = word[0]
|
| 102 |
+
for char in word[1:]:
|
| 103 |
+
pairs.add((prev_char, char))
|
| 104 |
+
prev_char = char
|
| 105 |
+
|
| 106 |
+
pairs = set(pairs)
|
| 107 |
+
return pairs
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class CTRLTokenizer(PreTrainedTokenizer):
|
| 111 |
+
"""
|
| 112 |
+
Construct a CTRL tokenizer. Based on Byte-Pair-Encoding.
|
| 113 |
+
|
| 114 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 115 |
+
this superclass for more information regarding those methods.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
vocab_file (`str`):
|
| 119 |
+
Path to the vocabulary file.
|
| 120 |
+
merges_file (`str`):
|
| 121 |
+
Path to the merges file.
|
| 122 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
| 123 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 124 |
+
token instead.
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 128 |
+
control_codes = CONTROL_CODES
|
| 129 |
+
|
| 130 |
+
def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
|
| 131 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
| 132 |
+
self.encoder = json.load(vocab_handle)
|
| 133 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 134 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
| 135 |
+
merges = merges_handle.read().split("\n")[1:-1]
|
| 136 |
+
merges = [tuple(merge.split()) for merge in merges]
|
| 137 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 138 |
+
self.cache = {}
|
| 139 |
+
super().__init__(unk_token=unk_token, **kwargs)
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def vocab_size(self):
|
| 143 |
+
return len(self.encoder)
|
| 144 |
+
|
| 145 |
+
def get_vocab(self):
|
| 146 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
| 147 |
+
|
| 148 |
+
def bpe(self, token):
|
| 149 |
+
if token in self.cache:
|
| 150 |
+
return self.cache[token]
|
| 151 |
+
word = tuple(token)
|
| 152 |
+
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
|
| 153 |
+
pairs = get_pairs(word)
|
| 154 |
+
|
| 155 |
+
if not pairs:
|
| 156 |
+
return token
|
| 157 |
+
|
| 158 |
+
while True:
|
| 159 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 160 |
+
if bigram not in self.bpe_ranks:
|
| 161 |
+
break
|
| 162 |
+
first, second = bigram
|
| 163 |
+
new_word = []
|
| 164 |
+
i = 0
|
| 165 |
+
while i < len(word):
|
| 166 |
+
try:
|
| 167 |
+
j = word.index(first, i)
|
| 168 |
+
except ValueError:
|
| 169 |
+
new_word.extend(word[i:])
|
| 170 |
+
break
|
| 171 |
+
else:
|
| 172 |
+
new_word.extend(word[i:j])
|
| 173 |
+
i = j
|
| 174 |
+
|
| 175 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 176 |
+
new_word.append(first + second)
|
| 177 |
+
i += 2
|
| 178 |
+
else:
|
| 179 |
+
new_word.append(word[i])
|
| 180 |
+
i += 1
|
| 181 |
+
new_word = tuple(new_word)
|
| 182 |
+
word = new_word
|
| 183 |
+
if len(word) == 1:
|
| 184 |
+
break
|
| 185 |
+
else:
|
| 186 |
+
pairs = get_pairs(word)
|
| 187 |
+
word = "@@ ".join(word)
|
| 188 |
+
word = word[:-4]
|
| 189 |
+
self.cache[token] = word
|
| 190 |
+
return word
|
| 191 |
+
|
| 192 |
+
def _tokenize(self, text):
|
| 193 |
+
"""Tokenize a string."""
|
| 194 |
+
split_tokens = []
|
| 195 |
+
|
| 196 |
+
words = re.findall(r"\S+\n?", text)
|
| 197 |
+
|
| 198 |
+
for token in words:
|
| 199 |
+
split_tokens.extend(list(self.bpe(token).split(" ")))
|
| 200 |
+
return split_tokens
|
| 201 |
+
|
| 202 |
+
def _convert_token_to_id(self, token):
|
| 203 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 204 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
| 205 |
+
|
| 206 |
+
def _convert_id_to_token(self, index):
|
| 207 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 208 |
+
return self.decoder.get(index, self.unk_token)
|
| 209 |
+
|
| 210 |
+
def convert_tokens_to_string(self, tokens):
|
| 211 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 212 |
+
out_string = " ".join(tokens).replace("@@ ", "").strip()
|
| 213 |
+
return out_string
|
| 214 |
+
|
| 215 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 216 |
+
if not os.path.isdir(save_directory):
|
| 217 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 218 |
+
return
|
| 219 |
+
vocab_file = os.path.join(
|
| 220 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 221 |
+
)
|
| 222 |
+
merge_file = os.path.join(
|
| 223 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 227 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
| 228 |
+
|
| 229 |
+
index = 0
|
| 230 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
| 231 |
+
writer.write("#version: 0.2\n")
|
| 232 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
| 233 |
+
if index != token_index:
|
| 234 |
+
logger.warning(
|
| 235 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
| 236 |
+
" Please check that the tokenizer is not corrupted!"
|
| 237 |
+
)
|
| 238 |
+
index = token_index
|
| 239 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
| 240 |
+
index += 1
|
| 241 |
+
|
| 242 |
+
return vocab_file, merge_file
|
| 243 |
+
|
| 244 |
+
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
|
| 245 |
+
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
|
| 246 |
+
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
|
| 247 |
+
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
|
| 248 |
+
# return ''.join(tokens_generated_so_far)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
__all__ = ["CTRLTokenizer"]
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_fsmt import *
|
| 22 |
+
from .modeling_fsmt import *
|
| 23 |
+
from .tokenization_fsmt import *
|
| 24 |
+
else:
|
| 25 |
+
import sys
|
| 26 |
+
|
| 27 |
+
_file = globals()["__file__"]
|
| 28 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (559 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc
ADDED
|
Binary file (8.47 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc
ADDED
|
Binary file (37.9 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc
ADDED
|
Binary file (17 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""FSMT configuration"""
|
| 16 |
+
|
| 17 |
+
from ...configuration_utils import PretrainedConfig
|
| 18 |
+
from ...utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DecoderConfig(PretrainedConfig):
|
| 25 |
+
r"""
|
| 26 |
+
Configuration class for FSMT's decoder specific things. note: this is a private helper class
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
model_type = "fsmt_decoder"
|
| 30 |
+
|
| 31 |
+
def __init__(self, vocab_size=0, bos_token_id=0):
|
| 32 |
+
super().__init__()
|
| 33 |
+
self.vocab_size = vocab_size
|
| 34 |
+
self.bos_token_id = bos_token_id
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class FSMTConfig(PretrainedConfig):
|
| 38 |
+
r"""
|
| 39 |
+
This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT
|
| 40 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 41 |
+
defaults will yield a similar configuration to that of the FSMT
|
| 42 |
+
[facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture.
|
| 43 |
+
|
| 44 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 45 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
langs (`List[str]`):
|
| 49 |
+
A list with source language and target_language (e.g., ['en', 'ru']).
|
| 50 |
+
src_vocab_size (`int`):
|
| 51 |
+
Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
|
| 52 |
+
`inputs_ids` passed to the forward method in the encoder.
|
| 53 |
+
tgt_vocab_size (`int`):
|
| 54 |
+
Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
|
| 55 |
+
`inputs_ids` passed to the forward method in the decoder.
|
| 56 |
+
d_model (`int`, *optional*, defaults to 1024):
|
| 57 |
+
Dimensionality of the layers and the pooler layer.
|
| 58 |
+
encoder_layers (`int`, *optional*, defaults to 12):
|
| 59 |
+
Number of encoder layers.
|
| 60 |
+
decoder_layers (`int`, *optional*, defaults to 12):
|
| 61 |
+
Number of decoder layers.
|
| 62 |
+
encoder_attention_heads (`int`, *optional*, defaults to 16):
|
| 63 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 64 |
+
decoder_attention_heads (`int`, *optional*, defaults to 16):
|
| 65 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
| 66 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
| 67 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
| 68 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
| 69 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
| 70 |
+
activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`):
|
| 71 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 72 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 73 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
| 74 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 75 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 76 |
+
The dropout ratio for the attention probabilities.
|
| 77 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
| 78 |
+
The dropout ratio for activations inside the fully connected layer.
|
| 79 |
+
max_position_embeddings (`int`, *optional*, defaults to 1024):
|
| 80 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 81 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 82 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
| 83 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 84 |
+
scale_embedding (`bool`, *optional*, defaults to `True`):
|
| 85 |
+
Scale embeddings by diving by sqrt(d_model).
|
| 86 |
+
bos_token_id (`int`, *optional*, defaults to 0)
|
| 87 |
+
Beginning of stream token id.
|
| 88 |
+
pad_token_id (`int`, *optional*, defaults to 1)
|
| 89 |
+
Padding token id.
|
| 90 |
+
eos_token_id (`int`, *optional*, defaults to 2)
|
| 91 |
+
End of stream token id.
|
| 92 |
+
decoder_start_token_id (`int`, *optional*):
|
| 93 |
+
This model starts decoding with `eos_token_id`
|
| 94 |
+
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
| 95 |
+
Google "layerdrop arxiv", as its not explainable in one line.
|
| 96 |
+
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
| 97 |
+
Google "layerdrop arxiv", as its not explainable in one line.
|
| 98 |
+
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
|
| 99 |
+
Whether this is an encoder/decoder model.
|
| 100 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 101 |
+
Whether to tie input and output embeddings.
|
| 102 |
+
num_beams (`int`, *optional*, defaults to 5)
|
| 103 |
+
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
|
| 104 |
+
no beam search.
|
| 105 |
+
length_penalty (`float`, *optional*, defaults to 1)
|
| 106 |
+
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
|
| 107 |
+
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
|
| 108 |
+
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
|
| 109 |
+
`length_penalty` < 0.0 encourages shorter sequences.
|
| 110 |
+
early_stopping (`bool`, *optional*, defaults to `False`)
|
| 111 |
+
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
|
| 112 |
+
when at least `num_beams` sentences are finished per batch or not.
|
| 113 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 114 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
| 115 |
+
forced_eos_token_id (`int`, *optional*, defaults to 2):
|
| 116 |
+
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
|
| 117 |
+
`eos_token_id`.
|
| 118 |
+
|
| 119 |
+
Examples:
|
| 120 |
+
|
| 121 |
+
```python
|
| 122 |
+
>>> from transformers import FSMTConfig, FSMTModel
|
| 123 |
+
|
| 124 |
+
>>> # Initializing a FSMT facebook/wmt19-en-ru style configuration
|
| 125 |
+
>>> config = FSMTConfig()
|
| 126 |
+
|
| 127 |
+
>>> # Initializing a model (with random weights) from the configuration
|
| 128 |
+
>>> model = FSMTModel(config)
|
| 129 |
+
|
| 130 |
+
>>> # Accessing the model configuration
|
| 131 |
+
>>> configuration = model.config
|
| 132 |
+
```"""
|
| 133 |
+
|
| 134 |
+
model_type = "fsmt"
|
| 135 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
| 136 |
+
|
| 137 |
+
# update the defaults from config file
|
| 138 |
+
def __init__(
|
| 139 |
+
self,
|
| 140 |
+
langs=["en", "de"],
|
| 141 |
+
src_vocab_size=42024,
|
| 142 |
+
tgt_vocab_size=42024,
|
| 143 |
+
activation_function="relu",
|
| 144 |
+
d_model=1024,
|
| 145 |
+
max_length=200,
|
| 146 |
+
max_position_embeddings=1024,
|
| 147 |
+
encoder_ffn_dim=4096,
|
| 148 |
+
encoder_layers=12,
|
| 149 |
+
encoder_attention_heads=16,
|
| 150 |
+
encoder_layerdrop=0.0,
|
| 151 |
+
decoder_ffn_dim=4096,
|
| 152 |
+
decoder_layers=12,
|
| 153 |
+
decoder_attention_heads=16,
|
| 154 |
+
decoder_layerdrop=0.0,
|
| 155 |
+
attention_dropout=0.0,
|
| 156 |
+
dropout=0.1,
|
| 157 |
+
activation_dropout=0.0,
|
| 158 |
+
init_std=0.02,
|
| 159 |
+
decoder_start_token_id=2,
|
| 160 |
+
is_encoder_decoder=True,
|
| 161 |
+
scale_embedding=True,
|
| 162 |
+
tie_word_embeddings=False,
|
| 163 |
+
num_beams=5,
|
| 164 |
+
length_penalty=1.0,
|
| 165 |
+
early_stopping=False,
|
| 166 |
+
use_cache=True,
|
| 167 |
+
pad_token_id=1,
|
| 168 |
+
bos_token_id=0,
|
| 169 |
+
eos_token_id=2,
|
| 170 |
+
forced_eos_token_id=2,
|
| 171 |
+
**common_kwargs,
|
| 172 |
+
):
|
| 173 |
+
self.langs = langs
|
| 174 |
+
self.src_vocab_size = src_vocab_size
|
| 175 |
+
self.tgt_vocab_size = tgt_vocab_size
|
| 176 |
+
self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
|
| 177 |
+
|
| 178 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
| 179 |
+
self.encoder_layers = self.num_hidden_layers = encoder_layers
|
| 180 |
+
self.encoder_attention_heads = encoder_attention_heads
|
| 181 |
+
self.encoder_layerdrop = encoder_layerdrop
|
| 182 |
+
self.decoder_layerdrop = decoder_layerdrop
|
| 183 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
| 184 |
+
self.decoder_layers = decoder_layers
|
| 185 |
+
self.decoder_attention_heads = decoder_attention_heads
|
| 186 |
+
self.max_position_embeddings = max_position_embeddings
|
| 187 |
+
self.init_std = init_std # Normal(0, this parameter)
|
| 188 |
+
self.activation_function = activation_function
|
| 189 |
+
|
| 190 |
+
self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id)
|
| 191 |
+
if "decoder" in common_kwargs:
|
| 192 |
+
del common_kwargs["decoder"]
|
| 193 |
+
|
| 194 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
| 195 |
+
|
| 196 |
+
# 3 Types of Dropout
|
| 197 |
+
self.attention_dropout = attention_dropout
|
| 198 |
+
self.activation_dropout = activation_dropout
|
| 199 |
+
self.dropout = dropout
|
| 200 |
+
|
| 201 |
+
self.use_cache = use_cache
|
| 202 |
+
super().__init__(
|
| 203 |
+
pad_token_id=pad_token_id,
|
| 204 |
+
bos_token_id=bos_token_id,
|
| 205 |
+
eos_token_id=eos_token_id,
|
| 206 |
+
decoder_start_token_id=decoder_start_token_id,
|
| 207 |
+
is_encoder_decoder=is_encoder_decoder,
|
| 208 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 209 |
+
forced_eos_token_id=forced_eos_token_id,
|
| 210 |
+
max_length=max_length,
|
| 211 |
+
num_beams=num_beams,
|
| 212 |
+
length_penalty=length_penalty,
|
| 213 |
+
early_stopping=early_stopping,
|
| 214 |
+
**common_kwargs,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
__all__ = ["FSMTConfig"]
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py
ADDED
|
@@ -0,0 +1,1369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
# Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19
|
| 17 |
+
# Authors:
|
| 18 |
+
# - @alexeib Alexei Baevski
|
| 19 |
+
# - @edunov Sergey Edunov
|
| 20 |
+
# - @michaelauli Michael Auli
|
| 21 |
+
# - @myleott Myle Ott
|
| 22 |
+
# - @nng555 Nathan Ng
|
| 23 |
+
# - David Grangier
|
| 24 |
+
# - Kyra Yee
|
| 25 |
+
#
|
| 26 |
+
# Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616
|
| 27 |
+
#
|
| 28 |
+
"""PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19"""
|
| 29 |
+
|
| 30 |
+
import math
|
| 31 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 32 |
+
|
| 33 |
+
import torch
|
| 34 |
+
from torch import Tensor, nn
|
| 35 |
+
from torch.nn import CrossEntropyLoss, LayerNorm
|
| 36 |
+
|
| 37 |
+
from ...activations import ACT2FN
|
| 38 |
+
from ...generation import GenerationMixin
|
| 39 |
+
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
|
| 40 |
+
from ...modeling_outputs import (
|
| 41 |
+
BaseModelOutput,
|
| 42 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 43 |
+
Seq2SeqLMOutput,
|
| 44 |
+
Seq2SeqModelOutput,
|
| 45 |
+
)
|
| 46 |
+
from ...modeling_utils import PreTrainedModel
|
| 47 |
+
from ...utils import (
|
| 48 |
+
add_code_sample_docstrings,
|
| 49 |
+
add_end_docstrings,
|
| 50 |
+
add_start_docstrings,
|
| 51 |
+
add_start_docstrings_to_model_forward,
|
| 52 |
+
logging,
|
| 53 |
+
replace_return_docstrings,
|
| 54 |
+
)
|
| 55 |
+
from .configuration_fsmt import FSMTConfig
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
logger = logging.get_logger(__name__)
|
| 59 |
+
|
| 60 |
+
_CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en"
|
| 61 |
+
_CONFIG_FOR_DOC = "FSMTConfig"
|
| 62 |
+
|
| 63 |
+
# See all FSMT models at https://huggingface.co/models?filter=fsmt
|
| 64 |
+
|
| 65 |
+
# Porting notes:
|
| 66 |
+
# this one is modeled after BartModel*
|
| 67 |
+
#
|
| 68 |
+
# Currently only translation (fairseq also has weights for LM)
|
| 69 |
+
#
|
| 70 |
+
# fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported.
|
| 71 |
+
# - ru-en, en-ru use asymmetric vocab
|
| 72 |
+
# - de-en, en-de use a merged single vocab (but the code works as if they are separate)
|
| 73 |
+
#
|
| 74 |
+
# Differences with Bart:
|
| 75 |
+
# - not using bos token
|
| 76 |
+
# - 2 separate vocabs (src and target)
|
| 77 |
+
# - embed weights aren't tied
|
| 78 |
+
# - uses a model Ensemble (but that part isn't ported/implemented yet) - so we
|
| 79 |
+
# aren't getting as good of a BLEU score
|
| 80 |
+
# - uses a projection layer at the end of the decoder
|
| 81 |
+
# - doesn't use final_logits_bias
|
| 82 |
+
# - beam search: stops as soon as num_beams == len(hypos) (whereas transformers
|
| 83 |
+
# is not satisfied there and will continue searching until the next cycles
|
| 84 |
+
# aren't promising something better), comparing BLEU scores - the transformers
|
| 85 |
+
# algorithm is slightly superior, therefore using the latter. But if you want
|
| 86 |
+
# to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``.
|
| 87 |
+
#
|
| 88 |
+
# SinusoidalPositionalEmbedding is slightly different from Bart's - generates
|
| 89 |
+
# different embeddings. This implementation is copied verbatim from fairseq with
|
| 90 |
+
# some small changes to make it work here.
|
| 91 |
+
#
|
| 92 |
+
# Other changes:
|
| 93 |
+
# - doesn't support use_cache as Bart's version does
|
| 94 |
+
#
|
| 95 |
+
#
|
| 96 |
+
# FSMTConfig changes with BartConfig
|
| 97 |
+
#
|
| 98 |
+
# Differences with BART:
|
| 99 |
+
# - src/tgt vocabs aren't shared
|
| 100 |
+
# - token embeddings aren't shared
|
| 101 |
+
# - needs a language pair
|
| 102 |
+
# - scale_embedding are True
|
| 103 |
+
#
|
| 104 |
+
# some unused args were removed too
|
| 105 |
+
#
|
| 106 |
+
#
|
| 107 |
+
# TODO:
|
| 108 |
+
# - port model ensemble (fs uses 4 model checkpoints)
|
| 109 |
+
# - solve beam search discrepancies
|
| 110 |
+
# docstyle-ignore
|
| 111 |
+
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
Here is how to compare BLEU scores against fairseq implementation:
|
| 115 |
+
|
| 116 |
+
# en-ru
|
| 117 |
+
|
| 118 |
+
export PAIR=en-ru
|
| 119 |
+
export DATA_DIR=data/$PAIR
|
| 120 |
+
export SAVE_DIR=data/$PAIR
|
| 121 |
+
export BS=8
|
| 122 |
+
export NUM_BEAMS=50
|
| 123 |
+
mkdir -p $DATA_DIR
|
| 124 |
+
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
|
| 125 |
+
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
|
| 126 |
+
echo $PAIR
|
| 127 |
+
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
|
| 128 |
+
|
| 129 |
+
# (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# ru-en
|
| 133 |
+
|
| 134 |
+
export PAIR=ru-en
|
| 135 |
+
export DATA_DIR=data/$PAIR
|
| 136 |
+
export SAVE_DIR=data/$PAIR
|
| 137 |
+
export BS=8
|
| 138 |
+
export NUM_BEAMS=50
|
| 139 |
+
mkdir -p $DATA_DIR
|
| 140 |
+
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
|
| 141 |
+
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
|
| 142 |
+
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# de-en
|
| 149 |
+
|
| 150 |
+
export PAIR=de-en
|
| 151 |
+
export DATA_DIR=data/$PAIR
|
| 152 |
+
export SAVE_DIR=data/$PAIR
|
| 153 |
+
export BS=8
|
| 154 |
+
export NUM_BEAMS=50
|
| 155 |
+
mkdir -p $DATA_DIR
|
| 156 |
+
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
|
| 157 |
+
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
|
| 158 |
+
echo $PAIR
|
| 159 |
+
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
|
| 160 |
+
|
| 161 |
+
# (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# en-de
|
| 166 |
+
|
| 167 |
+
export PAIR=en-de
|
| 168 |
+
export DATA_DIR=data/$PAIR
|
| 169 |
+
export SAVE_DIR=data/$PAIR
|
| 170 |
+
export BS=8
|
| 171 |
+
mkdir -p $DATA_DIR
|
| 172 |
+
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
|
| 173 |
+
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
|
| 174 |
+
echo $PAIR
|
| 175 |
+
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
|
| 176 |
+
|
| 177 |
+
# (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862)
|
| 178 |
+
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
FSMT_START_DOCSTRING = r"""
|
| 183 |
+
|
| 184 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 185 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 186 |
+
etc.)
|
| 187 |
+
|
| 188 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 189 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 190 |
+
and behavior.
|
| 191 |
+
|
| 192 |
+
Parameters:
|
| 193 |
+
config ([`FSMTConfig`]): Model configuration class with all the parameters of the model.
|
| 194 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 195 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 196 |
+
|
| 197 |
+
"""
|
| 198 |
+
FSMT_GENERATION_EXAMPLE = r"""
|
| 199 |
+
Translation example::
|
| 200 |
+
|
| 201 |
+
```python
|
| 202 |
+
>>> from transformers import AutoTokenizer, FSMTForConditionalGeneration
|
| 203 |
+
|
| 204 |
+
>>> mname = "facebook/wmt19-ru-en"
|
| 205 |
+
>>> model = FSMTForConditionalGeneration.from_pretrained(mname)
|
| 206 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(mname)
|
| 207 |
+
|
| 208 |
+
>>> src_text = "Машинное обучение - это здорово, не так ли?"
|
| 209 |
+
>>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids
|
| 210 |
+
>>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)
|
| 211 |
+
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 212 |
+
"Machine learning is great, isn't it?"
|
| 213 |
+
```
|
| 214 |
+
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
FSMT_INPUTS_DOCSTRING = r"""
|
| 218 |
+
Args:
|
| 219 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 220 |
+
Indices of input sequence tokens in the vocabulary.
|
| 221 |
+
|
| 222 |
+
Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 223 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 224 |
+
|
| 225 |
+
[What are input IDs?](../glossary#input-ids)
|
| 226 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 227 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 228 |
+
|
| 229 |
+
- 1 for tokens that are **not masked**,
|
| 230 |
+
- 0 for tokens that are **masked**.
|
| 231 |
+
|
| 232 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 233 |
+
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 234 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
| 235 |
+
|
| 236 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 237 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 238 |
+
|
| 239 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
| 240 |
+
|
| 241 |
+
FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
|
| 242 |
+
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
|
| 243 |
+
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 244 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
| 245 |
+
be used by default.
|
| 246 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
| 247 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
| 248 |
+
|
| 249 |
+
- 1 indicates the head is **not masked**,
|
| 250 |
+
- 0 indicates the head is **masked**.
|
| 251 |
+
|
| 252 |
+
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 253 |
+
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
| 254 |
+
|
| 255 |
+
- 1 indicates the head is **not masked**,
|
| 256 |
+
- 0 indicates the head is **masked**.
|
| 257 |
+
|
| 258 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 259 |
+
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
|
| 260 |
+
1]`:
|
| 261 |
+
|
| 262 |
+
- 1 indicates the head is **not masked**,
|
| 263 |
+
- 0 indicates the head is **masked**.
|
| 264 |
+
|
| 265 |
+
encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*):
|
| 266 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
| 267 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at
|
| 268 |
+
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
| 269 |
+
past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
| 270 |
+
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
|
| 271 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 272 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 273 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 274 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 275 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 276 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 277 |
+
model's internal embedding lookup matrix.
|
| 278 |
+
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
|
| 279 |
+
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
|
| 280 |
+
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
|
| 281 |
+
input (see `past_key_values`). This is useful if you want more control over how to convert
|
| 282 |
+
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
|
| 283 |
+
|
| 284 |
+
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
|
| 285 |
+
of `inputs_embeds`.
|
| 286 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 287 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 288 |
+
`past_key_values`).
|
| 289 |
+
output_attentions (`bool`, *optional*):
|
| 290 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 291 |
+
tensors for more detail.
|
| 292 |
+
output_hidden_states (`bool`, *optional*):
|
| 293 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 294 |
+
more detail.
|
| 295 |
+
return_dict (`bool`, *optional*):
|
| 296 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def invert_mask(attention_mask):
|
| 301 |
+
"""Turns 1->0, 0->1, False->True, True-> False"""
|
| 302 |
+
assert attention_mask.dim() == 2
|
| 303 |
+
return attention_mask.eq(0)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def triu_onnx(x, diagonal=0):
|
| 307 |
+
l = x.shape[0]
|
| 308 |
+
arange = torch.arange(l, device=x.device)
|
| 309 |
+
mask = arange.expand(l, l)
|
| 310 |
+
arange = arange.unsqueeze(-1)
|
| 311 |
+
if diagonal:
|
| 312 |
+
arange = arange + diagonal
|
| 313 |
+
mask = mask >= arange
|
| 314 |
+
return x.masked_fill(mask == 0, 0)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def _prepare_fsmt_decoder_inputs(
|
| 318 |
+
config,
|
| 319 |
+
input_ids,
|
| 320 |
+
decoder_input_ids=None,
|
| 321 |
+
decoder_padding_mask=None,
|
| 322 |
+
causal_mask_dtype=torch.float32,
|
| 323 |
+
):
|
| 324 |
+
"""
|
| 325 |
+
Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.
|
| 326 |
+
This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during
|
| 327 |
+
generation
|
| 328 |
+
"""
|
| 329 |
+
pad_token_id = config.pad_token_id
|
| 330 |
+
if decoder_input_ids is None:
|
| 331 |
+
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
|
| 332 |
+
bsz, tgt_len = decoder_input_ids.size()
|
| 333 |
+
if decoder_padding_mask is None:
|
| 334 |
+
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
|
| 335 |
+
else:
|
| 336 |
+
decoder_padding_mask = invert_mask(decoder_padding_mask)
|
| 337 |
+
causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to(
|
| 338 |
+
device=decoder_input_ids.device
|
| 339 |
+
)
|
| 340 |
+
return decoder_input_ids, decoder_padding_mask, causal_mask
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class PretrainedFSMTModel(PreTrainedModel):
|
| 344 |
+
config_class = FSMTConfig
|
| 345 |
+
base_model_prefix = "model"
|
| 346 |
+
|
| 347 |
+
def _init_weights(self, module):
|
| 348 |
+
std = self.config.init_std
|
| 349 |
+
if isinstance(module, nn.Linear):
|
| 350 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 351 |
+
if module.bias is not None:
|
| 352 |
+
module.bias.data.zero_()
|
| 353 |
+
elif isinstance(module, SinusoidalPositionalEmbedding):
|
| 354 |
+
pass
|
| 355 |
+
elif isinstance(module, nn.Embedding):
|
| 356 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 357 |
+
if module.padding_idx is not None:
|
| 358 |
+
module.weight.data[module.padding_idx].zero_()
|
| 359 |
+
|
| 360 |
+
@property
|
| 361 |
+
def dummy_inputs(self):
|
| 362 |
+
pad_token = self.config.pad_token_id
|
| 363 |
+
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
|
| 364 |
+
dummy_inputs = {
|
| 365 |
+
"attention_mask": input_ids.ne(pad_token),
|
| 366 |
+
"input_ids": input_ids,
|
| 367 |
+
}
|
| 368 |
+
return dummy_inputs
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def _make_linear_from_emb(emb):
|
| 372 |
+
vocab_size, emb_size = emb.weight.shape
|
| 373 |
+
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
|
| 374 |
+
lin_layer.weight.data = emb.weight.data
|
| 375 |
+
return lin_layer
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
# Helper Functions, mostly for making masks
|
| 379 |
+
def _check_shapes(shape_1, shape2):
|
| 380 |
+
if shape_1 != shape2:
|
| 381 |
+
raise AssertionError(f"shape mismatch: {shape_1} != {shape2}")
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def shift_tokens_right(input_ids, pad_token_id):
|
| 385 |
+
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
|
| 386 |
+
|
| 387 |
+
# replace possible -100 values in labels by `pad_token_id`
|
| 388 |
+
input_ids.masked_fill_(input_ids == -100, pad_token_id)
|
| 389 |
+
|
| 390 |
+
prev_output_tokens = input_ids.clone()
|
| 391 |
+
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
|
| 392 |
+
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
|
| 393 |
+
prev_output_tokens[:, 1:] = input_ids[:, :-1]
|
| 394 |
+
return prev_output_tokens
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def make_padding_mask(input_ids, padding_idx=1):
|
| 398 |
+
"""True for pad tokens"""
|
| 399 |
+
padding_mask = input_ids.eq(padding_idx)
|
| 400 |
+
if not padding_mask.any():
|
| 401 |
+
padding_mask = None
|
| 402 |
+
return padding_mask
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
# Helper Modules
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
class EncoderLayer(nn.Module):
|
| 409 |
+
def __init__(self, config: FSMTConfig):
|
| 410 |
+
super().__init__()
|
| 411 |
+
self.embed_dim = config.d_model
|
| 412 |
+
self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
|
| 413 |
+
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
|
| 414 |
+
self.dropout = config.dropout
|
| 415 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
| 416 |
+
self.activation_dropout = config.activation_dropout
|
| 417 |
+
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
|
| 418 |
+
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
|
| 419 |
+
self.final_layer_norm = LayerNorm(self.embed_dim)
|
| 420 |
+
|
| 421 |
+
def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False):
|
| 422 |
+
"""
|
| 423 |
+
Args:
|
| 424 |
+
x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
|
| 425 |
+
encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
|
| 426 |
+
*(batch, src_len)* where padding elements are indicated by `1`.
|
| 427 |
+
for t_tgt, t_src is excluded (or masked out), =0 means it is
|
| 428 |
+
included in attention
|
| 429 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
| 430 |
+
*(config.encoder_attention_heads,)*.
|
| 431 |
+
|
| 432 |
+
Returns:
|
| 433 |
+
encoded output of shape *(seq_len, batch, embed_dim)*
|
| 434 |
+
"""
|
| 435 |
+
residual = x
|
| 436 |
+
x, attn_weights = self.self_attn(
|
| 437 |
+
query=x,
|
| 438 |
+
key=x,
|
| 439 |
+
key_padding_mask=encoder_padding_mask,
|
| 440 |
+
layer_head_mask=layer_head_mask,
|
| 441 |
+
output_attentions=output_attentions,
|
| 442 |
+
)
|
| 443 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 444 |
+
x = residual + x
|
| 445 |
+
x = self.self_attn_layer_norm(x)
|
| 446 |
+
|
| 447 |
+
residual = x
|
| 448 |
+
x = self.activation_fn(self.fc1(x))
|
| 449 |
+
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
|
| 450 |
+
x = self.fc2(x)
|
| 451 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 452 |
+
x = residual + x
|
| 453 |
+
x = self.final_layer_norm(x)
|
| 454 |
+
return x, attn_weights
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
class FSMTEncoder(nn.Module):
|
| 458 |
+
"""
|
| 459 |
+
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
config: FSMTConfig
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
def __init__(self, config: FSMTConfig, embed_tokens):
|
| 466 |
+
super().__init__()
|
| 467 |
+
self.dropout = config.dropout
|
| 468 |
+
self.layerdrop = config.encoder_layerdrop
|
| 469 |
+
self.padding_idx = embed_tokens.padding_idx
|
| 470 |
+
self.embed_tokens = embed_tokens
|
| 471 |
+
embed_dim = embed_tokens.embedding_dim
|
| 472 |
+
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
|
| 473 |
+
self.embed_positions = SinusoidalPositionalEmbedding(
|
| 474 |
+
config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
|
| 475 |
+
)
|
| 476 |
+
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer]
|
| 477 |
+
|
| 478 |
+
def forward(
|
| 479 |
+
self,
|
| 480 |
+
input_ids: torch.Tensor,
|
| 481 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 482 |
+
inputs_embeds: torch.Tensor = None,
|
| 483 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 484 |
+
output_attentions: bool = False,
|
| 485 |
+
output_hidden_states: bool = False,
|
| 486 |
+
return_dict: bool = True,
|
| 487 |
+
):
|
| 488 |
+
"""
|
| 489 |
+
Args:
|
| 490 |
+
input_ids (`torch.LongTensor`): tokens in the source language of shape
|
| 491 |
+
*(batch, src_len)*
|
| 492 |
+
attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
|
| 493 |
+
inputs_embeds (`torch.FloatTensor`):
|
| 494 |
+
embedding vectors of shape *(batch, src_len, embed_dim)*
|
| 495 |
+
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
|
| 496 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
| 497 |
+
|
| 498 |
+
- 1 indicates the head is **not masked**,
|
| 499 |
+
- 0 indicates the head is **masked**.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
BaseModelOutput or Tuple comprised of:
|
| 503 |
+
|
| 504 |
+
- **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
|
| 505 |
+
- **encoder_states** (`Tuple(torch.FloatTensor)`): all intermediate hidden states of shape *(src_len,
|
| 506 |
+
batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
|
| 507 |
+
- **all_attentions** (`Tuple(torch.FloatTensor)`): Attention weights for each layer.
|
| 508 |
+
During training might not be of length n_layers because of layer dropout.
|
| 509 |
+
"""
|
| 510 |
+
# check attention mask and invert
|
| 511 |
+
if attention_mask is not None:
|
| 512 |
+
attention_mask = invert_mask(attention_mask)
|
| 513 |
+
|
| 514 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 515 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 516 |
+
elif input_ids is not None:
|
| 517 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
| 518 |
+
embed_pos = self.embed_positions(input_ids)
|
| 519 |
+
elif inputs_embeds is not None:
|
| 520 |
+
inputs_embeds = inputs_embeds * self.embed_scale
|
| 521 |
+
|
| 522 |
+
# We assume zeros hidden states correspond to padding tokens
|
| 523 |
+
# and create `position_ids` where inputs_embeds[:, :, 0] == 0
|
| 524 |
+
position_ids = inputs_embeds[:, :, 0].masked_fill(
|
| 525 |
+
inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
embed_pos = self.embed_positions(position_ids)
|
| 529 |
+
else:
|
| 530 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 531 |
+
|
| 532 |
+
x = inputs_embeds + embed_pos
|
| 533 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 534 |
+
|
| 535 |
+
# B x T x C -> T x B x C
|
| 536 |
+
x = x.transpose(0, 1)
|
| 537 |
+
|
| 538 |
+
encoder_states = () if output_hidden_states else None
|
| 539 |
+
all_attentions = () if output_attentions else None
|
| 540 |
+
# check if head_mask has a correct number of layers specified if desired
|
| 541 |
+
if head_mask is not None:
|
| 542 |
+
assert head_mask.size()[0] == (
|
| 543 |
+
len(self.layers)
|
| 544 |
+
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
|
| 545 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 546 |
+
if output_hidden_states:
|
| 547 |
+
x = x.transpose(0, 1) # T x B x C -> B x T x C
|
| 548 |
+
encoder_states += (x,)
|
| 549 |
+
x = x.transpose(0, 1) # B x T x C -> T x B x C
|
| 550 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 551 |
+
dropout_probability = torch.rand([])
|
| 552 |
+
if self.training and (dropout_probability < self.layerdrop): # skip the layer
|
| 553 |
+
attn = None
|
| 554 |
+
else:
|
| 555 |
+
x, attn = encoder_layer(
|
| 556 |
+
x,
|
| 557 |
+
attention_mask,
|
| 558 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
| 559 |
+
output_attentions=output_attentions,
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
if output_attentions:
|
| 563 |
+
all_attentions = all_attentions + (attn,)
|
| 564 |
+
|
| 565 |
+
# T x B x C -> B x T x C
|
| 566 |
+
x = x.transpose(0, 1)
|
| 567 |
+
|
| 568 |
+
if output_hidden_states:
|
| 569 |
+
encoder_states += (x,)
|
| 570 |
+
|
| 571 |
+
if not return_dict:
|
| 572 |
+
return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
|
| 573 |
+
return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
class DecoderLayer(nn.Module):
|
| 577 |
+
def __init__(self, config: FSMTConfig):
|
| 578 |
+
super().__init__()
|
| 579 |
+
self.embed_dim = config.d_model
|
| 580 |
+
|
| 581 |
+
self.self_attn = Attention(
|
| 582 |
+
embed_dim=self.embed_dim,
|
| 583 |
+
num_heads=config.decoder_attention_heads,
|
| 584 |
+
dropout=config.attention_dropout,
|
| 585 |
+
)
|
| 586 |
+
self.dropout = config.dropout
|
| 587 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
| 588 |
+
self.activation_dropout = config.activation_dropout
|
| 589 |
+
|
| 590 |
+
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
|
| 591 |
+
self.encoder_attn = Attention(
|
| 592 |
+
self.embed_dim,
|
| 593 |
+
config.decoder_attention_heads,
|
| 594 |
+
dropout=config.attention_dropout,
|
| 595 |
+
encoder_decoder_attention=True,
|
| 596 |
+
)
|
| 597 |
+
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
|
| 598 |
+
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
|
| 599 |
+
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
|
| 600 |
+
self.final_layer_norm = LayerNorm(self.embed_dim)
|
| 601 |
+
|
| 602 |
+
def forward(
|
| 603 |
+
self,
|
| 604 |
+
x,
|
| 605 |
+
encoder_hidden_states,
|
| 606 |
+
encoder_attn_mask=None,
|
| 607 |
+
layer_state=None,
|
| 608 |
+
causal_mask=None,
|
| 609 |
+
layer_head_mask=None,
|
| 610 |
+
cross_attn_layer_head_mask=None,
|
| 611 |
+
decoder_padding_mask=None,
|
| 612 |
+
output_attentions=False,
|
| 613 |
+
):
|
| 614 |
+
residual = x
|
| 615 |
+
|
| 616 |
+
if layer_state is None:
|
| 617 |
+
layer_state = {}
|
| 618 |
+
|
| 619 |
+
# Self Attention
|
| 620 |
+
x, self_attn_weights = self.self_attn(
|
| 621 |
+
query=x,
|
| 622 |
+
key=x,
|
| 623 |
+
layer_state=layer_state, # adds keys to layer state
|
| 624 |
+
key_padding_mask=decoder_padding_mask,
|
| 625 |
+
attn_mask=causal_mask,
|
| 626 |
+
layer_head_mask=layer_head_mask,
|
| 627 |
+
output_attentions=output_attentions,
|
| 628 |
+
)
|
| 629 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 630 |
+
x = residual + x
|
| 631 |
+
x = self.self_attn_layer_norm(x)
|
| 632 |
+
|
| 633 |
+
# Cross attention
|
| 634 |
+
residual = x
|
| 635 |
+
assert self.encoder_attn.cache_key != self.self_attn.cache_key
|
| 636 |
+
x, cross_attn_weights = self.encoder_attn(
|
| 637 |
+
query=x,
|
| 638 |
+
key=encoder_hidden_states,
|
| 639 |
+
key_padding_mask=encoder_attn_mask,
|
| 640 |
+
layer_state=layer_state, # mutates layer state
|
| 641 |
+
layer_head_mask=cross_attn_layer_head_mask,
|
| 642 |
+
output_attentions=output_attentions,
|
| 643 |
+
)
|
| 644 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 645 |
+
x = residual + x
|
| 646 |
+
x = self.encoder_attn_layer_norm(x)
|
| 647 |
+
|
| 648 |
+
# Fully Connected
|
| 649 |
+
residual = x
|
| 650 |
+
x = self.activation_fn(self.fc1(x))
|
| 651 |
+
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
|
| 652 |
+
x = self.fc2(x)
|
| 653 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 654 |
+
x = residual + x
|
| 655 |
+
x = self.final_layer_norm(x)
|
| 656 |
+
return (
|
| 657 |
+
x,
|
| 658 |
+
self_attn_weights,
|
| 659 |
+
layer_state,
|
| 660 |
+
cross_attn_weights,
|
| 661 |
+
) # layer_state = cache for decoding
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
class FSMTDecoder(nn.Module):
|
| 665 |
+
"""
|
| 666 |
+
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
config: FSMTConfig
|
| 670 |
+
embed_tokens (nn.Embedding): output embedding
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
|
| 674 |
+
super().__init__()
|
| 675 |
+
self.dropout = config.dropout
|
| 676 |
+
self.layerdrop = config.decoder_layerdrop
|
| 677 |
+
self.padding_idx = embed_tokens.padding_idx
|
| 678 |
+
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
|
| 679 |
+
self.embed_tokens = embed_tokens
|
| 680 |
+
embed_dim = embed_tokens.embedding_dim
|
| 681 |
+
self.embed_positions = SinusoidalPositionalEmbedding(
|
| 682 |
+
config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
|
| 683 |
+
)
|
| 684 |
+
self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer]
|
| 685 |
+
|
| 686 |
+
if is_deepspeed_zero3_enabled():
|
| 687 |
+
import deepspeed
|
| 688 |
+
|
| 689 |
+
with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None):
|
| 690 |
+
embed_tokens_weight_shape = self.embed_tokens.weight.shape
|
| 691 |
+
else:
|
| 692 |
+
embed_tokens_weight_shape = self.embed_tokens.weight.shape
|
| 693 |
+
self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False)
|
| 694 |
+
self.output_projection.weight = self.embed_tokens.weight
|
| 695 |
+
|
| 696 |
+
def _tie_weights(self):
|
| 697 |
+
self.embed_tokens.weight = self.output_projection.weight
|
| 698 |
+
|
| 699 |
+
def forward(
|
| 700 |
+
self,
|
| 701 |
+
input_ids: torch.Tensor,
|
| 702 |
+
encoder_hidden_states: torch.Tensor,
|
| 703 |
+
encoder_padding_mask: torch.Tensor,
|
| 704 |
+
decoder_padding_mask: torch.Tensor,
|
| 705 |
+
decoder_causal_mask: torch.Tensor,
|
| 706 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 707 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 708 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
| 709 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 710 |
+
use_cache: bool = False,
|
| 711 |
+
output_attentions: bool = False,
|
| 712 |
+
output_hidden_states: bool = False,
|
| 713 |
+
return_dict: bool = True,
|
| 714 |
+
):
|
| 715 |
+
"""
|
| 716 |
+
Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
|
| 717 |
+
EMNLP 2019).
|
| 718 |
+
|
| 719 |
+
Args:
|
| 720 |
+
input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
|
| 721 |
+
previous decoder outputs for teacher forcing
|
| 722 |
+
encoder_hidden_states: output from the encoder, used for
|
| 723 |
+
encoder-side attention
|
| 724 |
+
encoder_padding_mask: for ignoring pad tokens
|
| 725 |
+
past_key_values (dict or None): dictionary used for storing state during generation
|
| 726 |
+
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
|
| 727 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
| 728 |
+
|
| 729 |
+
- 1 indicates the head is **not masked**,
|
| 730 |
+
- 0 indicates the head is **masked**.
|
| 731 |
+
|
| 732 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
|
| 733 |
+
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
| 734 |
+
|
| 735 |
+
- 1 indicates the head is **not masked**,
|
| 736 |
+
- 0 indicates the head is **masked**.
|
| 737 |
+
|
| 738 |
+
Returns:
|
| 739 |
+
BaseModelOutputWithPast or tuple:
|
| 740 |
+
|
| 741 |
+
- the decoder's features of shape *(batch, tgt_len, embed_dim)*
|
| 742 |
+
- the cache
|
| 743 |
+
- hidden states
|
| 744 |
+
- attentions
|
| 745 |
+
"""
|
| 746 |
+
# check attention mask and invert
|
| 747 |
+
if encoder_padding_mask is not None:
|
| 748 |
+
encoder_padding_mask = invert_mask(encoder_padding_mask)
|
| 749 |
+
|
| 750 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 751 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 752 |
+
elif input_ids is not None:
|
| 753 |
+
# embed positions
|
| 754 |
+
positions = self.embed_positions(input_ids)
|
| 755 |
+
if use_cache:
|
| 756 |
+
input_ids = input_ids[:, -1:]
|
| 757 |
+
positions = positions[:, -1:] # happens after we embed them
|
| 758 |
+
x = self.embed_tokens(input_ids) * self.embed_scale
|
| 759 |
+
elif inputs_embeds is not None:
|
| 760 |
+
# We assume zeros hidden states correspond to padding tokens
|
| 761 |
+
# and create `position_ids` where inputs_embeds[:, :, 0] == 0
|
| 762 |
+
position_ids = inputs_embeds[:, :, 0].masked_fill(
|
| 763 |
+
inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
|
| 764 |
+
)
|
| 765 |
+
positions = self.embed_positions(position_ids)
|
| 766 |
+
x = inputs_embeds * self.embed_scale
|
| 767 |
+
else:
|
| 768 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 769 |
+
|
| 770 |
+
x += positions
|
| 771 |
+
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
|
| 772 |
+
|
| 773 |
+
# Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim)
|
| 774 |
+
x = x.transpose(0, 1)
|
| 775 |
+
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
|
| 776 |
+
|
| 777 |
+
# decoder layers
|
| 778 |
+
all_hidden_states = () if output_hidden_states else None
|
| 779 |
+
all_self_attns = () if output_attentions else None
|
| 780 |
+
all_cross_attns = () if output_attentions else None
|
| 781 |
+
next_decoder_cache = []
|
| 782 |
+
|
| 783 |
+
# check if head_mask has a correct number of layers specified if desired
|
| 784 |
+
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
|
| 785 |
+
if attn_mask is not None:
|
| 786 |
+
assert attn_mask.size()[0] == (len(self.layers)), (
|
| 787 |
+
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
|
| 788 |
+
f" {head_mask.size()[0]}."
|
| 789 |
+
)
|
| 790 |
+
for idx, decoder_layer in enumerate(self.layers):
|
| 791 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 792 |
+
if output_hidden_states:
|
| 793 |
+
x = x.transpose(0, 1)
|
| 794 |
+
all_hidden_states += (x,)
|
| 795 |
+
x = x.transpose(0, 1)
|
| 796 |
+
if self.training:
|
| 797 |
+
dropout_probability = torch.rand([])
|
| 798 |
+
if dropout_probability < self.layerdrop:
|
| 799 |
+
continue
|
| 800 |
+
|
| 801 |
+
layer_state = past_key_values[idx] if past_key_values is not None else None
|
| 802 |
+
|
| 803 |
+
x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
|
| 804 |
+
x,
|
| 805 |
+
encoder_hidden_states,
|
| 806 |
+
encoder_attn_mask=encoder_padding_mask,
|
| 807 |
+
decoder_padding_mask=decoder_padding_mask,
|
| 808 |
+
layer_state=layer_state,
|
| 809 |
+
causal_mask=decoder_causal_mask,
|
| 810 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
| 811 |
+
cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
|
| 812 |
+
output_attentions=output_attentions,
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
if use_cache:
|
| 816 |
+
next_decoder_cache.append(layer_past.copy())
|
| 817 |
+
|
| 818 |
+
if output_attentions:
|
| 819 |
+
all_self_attns += (layer_self_attn,)
|
| 820 |
+
all_cross_attns += (layer_cross_attn,)
|
| 821 |
+
|
| 822 |
+
# add hidden states from the last decoder layer
|
| 823 |
+
if output_hidden_states:
|
| 824 |
+
x = x.transpose(0, 1)
|
| 825 |
+
all_hidden_states += (x,)
|
| 826 |
+
x = x.transpose(0, 1)
|
| 827 |
+
|
| 828 |
+
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
|
| 829 |
+
x = x.transpose(0, 1)
|
| 830 |
+
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
|
| 831 |
+
|
| 832 |
+
x = self.output_projection(x)
|
| 833 |
+
|
| 834 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 835 |
+
|
| 836 |
+
if not return_dict:
|
| 837 |
+
return tuple(
|
| 838 |
+
v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
|
| 839 |
+
)
|
| 840 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 841 |
+
last_hidden_state=x,
|
| 842 |
+
past_key_values=next_cache,
|
| 843 |
+
hidden_states=all_hidden_states,
|
| 844 |
+
attentions=all_self_attns,
|
| 845 |
+
cross_attentions=all_cross_attns,
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def _reorder_buffer(attn_cache, new_order):
|
| 850 |
+
for k, input_buffer_k in attn_cache.items():
|
| 851 |
+
if input_buffer_k is not None:
|
| 852 |
+
attn_cache[k] = input_buffer_k.index_select(0, new_order)
|
| 853 |
+
return attn_cache
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
class Attention(nn.Module):
|
| 857 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 858 |
+
|
| 859 |
+
def __init__(
|
| 860 |
+
self,
|
| 861 |
+
embed_dim,
|
| 862 |
+
num_heads,
|
| 863 |
+
dropout=0.0,
|
| 864 |
+
bias=True,
|
| 865 |
+
encoder_decoder_attention=False, # otherwise self_attention
|
| 866 |
+
):
|
| 867 |
+
super().__init__()
|
| 868 |
+
self.embed_dim = embed_dim
|
| 869 |
+
self.num_heads = num_heads
|
| 870 |
+
self.dropout = dropout
|
| 871 |
+
self.head_dim = embed_dim // num_heads
|
| 872 |
+
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
|
| 873 |
+
self.scaling = self.head_dim**-0.5
|
| 874 |
+
|
| 875 |
+
self.encoder_decoder_attention = encoder_decoder_attention
|
| 876 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 877 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 878 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 879 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 880 |
+
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
|
| 881 |
+
|
| 882 |
+
def _shape(self, tensor, seq_len, bsz):
|
| 883 |
+
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
|
| 884 |
+
|
| 885 |
+
def forward(
|
| 886 |
+
self,
|
| 887 |
+
query,
|
| 888 |
+
key: Optional[Tensor],
|
| 889 |
+
key_padding_mask: Optional[Tensor] = None,
|
| 890 |
+
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
|
| 891 |
+
attn_mask: Optional[Tensor] = None,
|
| 892 |
+
layer_head_mask: Optional[Tensor] = None,
|
| 893 |
+
output_attentions=False,
|
| 894 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
| 895 |
+
"""Input shape: Time(SeqLen) x Batch x Channel"""
|
| 896 |
+
static_kv: bool = self.encoder_decoder_attention
|
| 897 |
+
tgt_len, bsz, embed_dim = query.size()
|
| 898 |
+
assert embed_dim == self.embed_dim
|
| 899 |
+
assert list(query.size()) == [tgt_len, bsz, embed_dim]
|
| 900 |
+
# get here for encoder decoder cause of static_kv
|
| 901 |
+
if layer_state is not None: # reuse k,v and encoder_padding_mask
|
| 902 |
+
saved_state = layer_state.get(self.cache_key, {})
|
| 903 |
+
if "prev_key" in saved_state and static_kv:
|
| 904 |
+
# previous time steps are cached - no need to recompute key and value if they are static
|
| 905 |
+
key = None
|
| 906 |
+
else:
|
| 907 |
+
saved_state = None
|
| 908 |
+
layer_state = {}
|
| 909 |
+
|
| 910 |
+
q = self.q_proj(query) * self.scaling
|
| 911 |
+
if static_kv:
|
| 912 |
+
if key is None:
|
| 913 |
+
k = v = None
|
| 914 |
+
else:
|
| 915 |
+
k = self.k_proj(key)
|
| 916 |
+
v = self.v_proj(key)
|
| 917 |
+
else:
|
| 918 |
+
k = self.k_proj(query)
|
| 919 |
+
v = self.v_proj(query)
|
| 920 |
+
|
| 921 |
+
q = self._shape(q, tgt_len, bsz)
|
| 922 |
+
if k is not None:
|
| 923 |
+
k = self._shape(k, -1, bsz)
|
| 924 |
+
if v is not None:
|
| 925 |
+
v = self._shape(v, -1, bsz)
|
| 926 |
+
|
| 927 |
+
if saved_state is not None:
|
| 928 |
+
k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
|
| 929 |
+
|
| 930 |
+
# Update cache
|
| 931 |
+
layer_state[self.cache_key] = {
|
| 932 |
+
"prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
|
| 933 |
+
"prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
|
| 934 |
+
"prev_key_padding_mask": key_padding_mask if not static_kv else None,
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
assert k is not None
|
| 938 |
+
src_len = k.size(1)
|
| 939 |
+
attn_weights = torch.bmm(q, k.transpose(1, 2))
|
| 940 |
+
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
|
| 941 |
+
|
| 942 |
+
if attn_mask is not None:
|
| 943 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
|
| 944 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 945 |
+
|
| 946 |
+
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
|
| 947 |
+
if key_padding_mask is not None and key_padding_mask.dim() == 0:
|
| 948 |
+
key_padding_mask = None
|
| 949 |
+
assert key_padding_mask is None or key_padding_mask.size()[:2] == (
|
| 950 |
+
bsz,
|
| 951 |
+
src_len,
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
if key_padding_mask is not None: # don't attend to padding symbols
|
| 955 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 956 |
+
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
|
| 957 |
+
attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min)
|
| 958 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 959 |
+
|
| 960 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 961 |
+
|
| 962 |
+
if layer_head_mask is not None:
|
| 963 |
+
assert layer_head_mask.size() == (
|
| 964 |
+
self.num_heads,
|
| 965 |
+
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
|
| 966 |
+
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 967 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 968 |
+
|
| 969 |
+
if output_attentions:
|
| 970 |
+
# make sure that attn_weights are included in graph
|
| 971 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 972 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| 973 |
+
else:
|
| 974 |
+
attn_weights_reshaped = None
|
| 975 |
+
|
| 976 |
+
attn_probs = nn.functional.dropout(
|
| 977 |
+
attn_weights,
|
| 978 |
+
p=self.dropout,
|
| 979 |
+
training=self.training,
|
| 980 |
+
)
|
| 981 |
+
|
| 982 |
+
assert v is not None
|
| 983 |
+
attn_output = torch.bmm(attn_probs, v)
|
| 984 |
+
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
|
| 985 |
+
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
|
| 986 |
+
attn_output = self.out_proj(attn_output)
|
| 987 |
+
|
| 988 |
+
return attn_output, attn_weights_reshaped
|
| 989 |
+
|
| 990 |
+
def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
|
| 991 |
+
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
|
| 992 |
+
if "prev_key" in saved_state:
|
| 993 |
+
_prev_key = saved_state["prev_key"]
|
| 994 |
+
assert _prev_key is not None
|
| 995 |
+
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
|
| 996 |
+
if static_kv:
|
| 997 |
+
k = prev_key
|
| 998 |
+
else:
|
| 999 |
+
assert k is not None
|
| 1000 |
+
k = torch.cat([prev_key, k], dim=1)
|
| 1001 |
+
if "prev_value" in saved_state:
|
| 1002 |
+
_prev_value = saved_state["prev_value"]
|
| 1003 |
+
assert _prev_value is not None
|
| 1004 |
+
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
|
| 1005 |
+
if static_kv:
|
| 1006 |
+
v = prev_value
|
| 1007 |
+
else:
|
| 1008 |
+
assert v is not None
|
| 1009 |
+
v = torch.cat([prev_value, v], dim=1)
|
| 1010 |
+
assert k is not None and v is not None
|
| 1011 |
+
prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
|
| 1012 |
+
if prev_key_padding_mask is not None:
|
| 1013 |
+
if static_kv:
|
| 1014 |
+
new_key_padding_mask = prev_key_padding_mask
|
| 1015 |
+
else:
|
| 1016 |
+
new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
|
| 1017 |
+
else:
|
| 1018 |
+
new_key_padding_mask = key_padding_mask
|
| 1019 |
+
return k, v, new_key_padding_mask
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def fill_with_neg_inf(t):
|
| 1023 |
+
"""FP16-compatible function that fills a input_ids with -inf."""
|
| 1024 |
+
return t.float().fill_(torch.finfo(t.dtype).min).type_as(t)
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
# Public API
|
| 1028 |
+
def _get_shape(t):
|
| 1029 |
+
return getattr(t, "shape", None)
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
@add_start_docstrings(
|
| 1033 |
+
"The bare FSMT Model outputting raw hidden-states without any specific head on top.",
|
| 1034 |
+
FSMT_START_DOCSTRING,
|
| 1035 |
+
)
|
| 1036 |
+
class FSMTModel(PretrainedFSMTModel):
|
| 1037 |
+
_tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
|
| 1038 |
+
|
| 1039 |
+
def __init__(self, config: FSMTConfig):
|
| 1040 |
+
super().__init__(config)
|
| 1041 |
+
|
| 1042 |
+
padding_idx = config.pad_token_id
|
| 1043 |
+
encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx)
|
| 1044 |
+
decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx)
|
| 1045 |
+
|
| 1046 |
+
self.encoder = FSMTEncoder(config, encoder_embed_tokens)
|
| 1047 |
+
self.decoder = FSMTDecoder(config, decoder_embed_tokens)
|
| 1048 |
+
|
| 1049 |
+
# Initialize weights and apply final processing
|
| 1050 |
+
self.post_init()
|
| 1051 |
+
|
| 1052 |
+
def get_encoder(self):
|
| 1053 |
+
return self.encoder
|
| 1054 |
+
|
| 1055 |
+
def get_decoder(self):
|
| 1056 |
+
return self.decoder
|
| 1057 |
+
|
| 1058 |
+
def _tie_weights(self):
|
| 1059 |
+
if self.config.tie_word_embeddings:
|
| 1060 |
+
self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings())
|
| 1061 |
+
self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings())
|
| 1062 |
+
|
| 1063 |
+
@add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
|
| 1064 |
+
@add_code_sample_docstrings(
|
| 1065 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1066 |
+
output_type=Seq2SeqModelOutput,
|
| 1067 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1068 |
+
)
|
| 1069 |
+
def forward(
|
| 1070 |
+
self,
|
| 1071 |
+
input_ids: torch.LongTensor,
|
| 1072 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1073 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 1074 |
+
decoder_attention_mask: Optional[torch.BoolTensor] = None,
|
| 1075 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1076 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
| 1077 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
| 1078 |
+
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1079 |
+
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1080 |
+
use_cache: Optional[bool] = None,
|
| 1081 |
+
output_attentions: Optional[bool] = None,
|
| 1082 |
+
output_hidden_states: Optional[bool] = None,
|
| 1083 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1084 |
+
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1085 |
+
return_dict: Optional[bool] = None,
|
| 1086 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
|
| 1087 |
+
if decoder_input_ids is None:
|
| 1088 |
+
use_cache = False
|
| 1089 |
+
|
| 1090 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1091 |
+
output_hidden_states = (
|
| 1092 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1093 |
+
)
|
| 1094 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1095 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1096 |
+
|
| 1097 |
+
# make masks if user doesn't supply
|
| 1098 |
+
if not use_cache and input_ids is not None:
|
| 1099 |
+
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs(
|
| 1100 |
+
self.config,
|
| 1101 |
+
input_ids,
|
| 1102 |
+
decoder_input_ids=decoder_input_ids,
|
| 1103 |
+
decoder_padding_mask=decoder_attention_mask,
|
| 1104 |
+
causal_mask_dtype=self.decoder.embed_tokens.weight.dtype,
|
| 1105 |
+
)
|
| 1106 |
+
else:
|
| 1107 |
+
decoder_padding_mask, causal_mask = None, None
|
| 1108 |
+
|
| 1109 |
+
if decoder_input_ids is None and decoder_inputs_embeds is None:
|
| 1110 |
+
raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.")
|
| 1111 |
+
|
| 1112 |
+
if encoder_outputs is None:
|
| 1113 |
+
encoder_outputs = self.encoder(
|
| 1114 |
+
input_ids=input_ids,
|
| 1115 |
+
attention_mask=attention_mask,
|
| 1116 |
+
inputs_embeds=inputs_embeds,
|
| 1117 |
+
head_mask=head_mask,
|
| 1118 |
+
output_attentions=output_attentions,
|
| 1119 |
+
output_hidden_states=output_hidden_states,
|
| 1120 |
+
return_dict=return_dict,
|
| 1121 |
+
)
|
| 1122 |
+
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
|
| 1123 |
+
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
|
| 1124 |
+
encoder_outputs = BaseModelOutput(
|
| 1125 |
+
last_hidden_state=encoder_outputs[0],
|
| 1126 |
+
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
| 1127 |
+
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1131 |
+
decoder_outputs = self.decoder(
|
| 1132 |
+
decoder_input_ids,
|
| 1133 |
+
encoder_outputs[0],
|
| 1134 |
+
attention_mask,
|
| 1135 |
+
decoder_padding_mask,
|
| 1136 |
+
decoder_causal_mask=causal_mask,
|
| 1137 |
+
inputs_embeds=decoder_inputs_embeds,
|
| 1138 |
+
head_mask=decoder_head_mask,
|
| 1139 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
| 1140 |
+
past_key_values=past_key_values,
|
| 1141 |
+
use_cache=use_cache,
|
| 1142 |
+
output_attentions=output_attentions,
|
| 1143 |
+
output_hidden_states=output_hidden_states,
|
| 1144 |
+
return_dict=return_dict,
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
if not return_dict:
|
| 1148 |
+
return decoder_outputs + encoder_outputs
|
| 1149 |
+
|
| 1150 |
+
return Seq2SeqModelOutput(
|
| 1151 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
| 1152 |
+
past_key_values=decoder_outputs.past_key_values,
|
| 1153 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
| 1154 |
+
decoder_attentions=decoder_outputs.attentions,
|
| 1155 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
| 1156 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
| 1157 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
| 1158 |
+
encoder_attentions=encoder_outputs.attentions,
|
| 1159 |
+
)
|
| 1160 |
+
|
| 1161 |
+
def get_input_embeddings(self):
|
| 1162 |
+
return self.encoder.embed_tokens
|
| 1163 |
+
|
| 1164 |
+
def set_input_embeddings(self, value):
|
| 1165 |
+
self.encoder.embed_tokens = value
|
| 1166 |
+
|
| 1167 |
+
def get_output_embeddings(self):
|
| 1168 |
+
return self.decoder.embed_tokens
|
| 1169 |
+
|
| 1170 |
+
def set_output_embeddings(self, value):
|
| 1171 |
+
self.decoder.embed_tokens = value
|
| 1172 |
+
|
| 1173 |
+
|
| 1174 |
+
@add_start_docstrings(
|
| 1175 |
+
"The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING
|
| 1176 |
+
)
|
| 1177 |
+
class FSMTForConditionalGeneration(PretrainedFSMTModel, GenerationMixin):
|
| 1178 |
+
base_model_prefix = "model"
|
| 1179 |
+
_tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
|
| 1180 |
+
|
| 1181 |
+
def __init__(self, config: FSMTConfig):
|
| 1182 |
+
super().__init__(config)
|
| 1183 |
+
base_model = FSMTModel(config)
|
| 1184 |
+
self.model = base_model
|
| 1185 |
+
|
| 1186 |
+
# Initialize weights and apply final processing
|
| 1187 |
+
self.post_init()
|
| 1188 |
+
|
| 1189 |
+
@add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
|
| 1190 |
+
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
|
| 1191 |
+
@add_end_docstrings(FSMT_GENERATION_EXAMPLE)
|
| 1192 |
+
def forward(
|
| 1193 |
+
self,
|
| 1194 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1195 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1196 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 1197 |
+
decoder_attention_mask: Optional[torch.BoolTensor] = None,
|
| 1198 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1199 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
| 1200 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
| 1201 |
+
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1202 |
+
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1203 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1204 |
+
decoder_inputs_embeds: Optional[torch.Tensor] = None,
|
| 1205 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1206 |
+
use_cache: Optional[bool] = None,
|
| 1207 |
+
output_attentions: Optional[bool] = None,
|
| 1208 |
+
output_hidden_states: Optional[bool] = None,
|
| 1209 |
+
return_dict: Optional[bool] = None,
|
| 1210 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
|
| 1211 |
+
r"""
|
| 1212 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1213 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1214 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1215 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1216 |
+
|
| 1217 |
+
Returns:
|
| 1218 |
+
|
| 1219 |
+
"""
|
| 1220 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1221 |
+
|
| 1222 |
+
if labels is not None:
|
| 1223 |
+
use_cache = False
|
| 1224 |
+
|
| 1225 |
+
outputs = self.model(
|
| 1226 |
+
input_ids,
|
| 1227 |
+
inputs_embeds=inputs_embeds,
|
| 1228 |
+
attention_mask=attention_mask,
|
| 1229 |
+
decoder_input_ids=decoder_input_ids,
|
| 1230 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
| 1231 |
+
encoder_outputs=encoder_outputs,
|
| 1232 |
+
decoder_attention_mask=decoder_attention_mask,
|
| 1233 |
+
head_mask=head_mask,
|
| 1234 |
+
decoder_head_mask=decoder_head_mask,
|
| 1235 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
| 1236 |
+
past_key_values=past_key_values,
|
| 1237 |
+
use_cache=use_cache,
|
| 1238 |
+
output_attentions=output_attentions,
|
| 1239 |
+
output_hidden_states=output_hidden_states,
|
| 1240 |
+
return_dict=return_dict,
|
| 1241 |
+
)
|
| 1242 |
+
lm_logits = outputs[0]
|
| 1243 |
+
|
| 1244 |
+
masked_lm_loss = None
|
| 1245 |
+
if labels is not None:
|
| 1246 |
+
loss_fct = CrossEntropyLoss()
|
| 1247 |
+
# TODO(SS): do we need to ignore pad tokens in labels?
|
| 1248 |
+
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1))
|
| 1249 |
+
|
| 1250 |
+
if not return_dict:
|
| 1251 |
+
output = (lm_logits,) + outputs[1:]
|
| 1252 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
| 1253 |
+
|
| 1254 |
+
return Seq2SeqLMOutput(
|
| 1255 |
+
loss=masked_lm_loss,
|
| 1256 |
+
logits=lm_logits,
|
| 1257 |
+
past_key_values=outputs.past_key_values,
|
| 1258 |
+
decoder_hidden_states=outputs.decoder_hidden_states,
|
| 1259 |
+
decoder_attentions=outputs.decoder_attentions,
|
| 1260 |
+
cross_attentions=outputs.cross_attentions,
|
| 1261 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
|
| 1262 |
+
encoder_hidden_states=outputs.encoder_hidden_states,
|
| 1263 |
+
encoder_attentions=outputs.encoder_attentions,
|
| 1264 |
+
)
|
| 1265 |
+
|
| 1266 |
+
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
|
| 1267 |
+
return shift_tokens_right(labels, self.config.pad_token_id)
|
| 1268 |
+
|
| 1269 |
+
@staticmethod
|
| 1270 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1271 |
+
reordered_past = []
|
| 1272 |
+
for layer_past in past_key_values:
|
| 1273 |
+
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
|
| 1274 |
+
layer_past_new = {
|
| 1275 |
+
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
|
| 1276 |
+
}
|
| 1277 |
+
reordered_past.append(layer_past_new)
|
| 1278 |
+
return reordered_past
|
| 1279 |
+
|
| 1280 |
+
def get_encoder(self):
|
| 1281 |
+
return self.model.encoder
|
| 1282 |
+
|
| 1283 |
+
def get_decoder(self):
|
| 1284 |
+
return self.model.decoder
|
| 1285 |
+
|
| 1286 |
+
def get_output_embeddings(self):
|
| 1287 |
+
return self.model.decoder.embed_tokens
|
| 1288 |
+
|
| 1289 |
+
def set_output_embeddings(self, value):
|
| 1290 |
+
self.model.decoder.embed_tokens = value
|
| 1291 |
+
|
| 1292 |
+
|
| 1293 |
+
class SinusoidalPositionalEmbedding(nn.Embedding):
|
| 1294 |
+
"""
|
| 1295 |
+
This module produces sinusoidal positional embeddings of any length.
|
| 1296 |
+
|
| 1297 |
+
We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
|
| 1298 |
+
|
| 1299 |
+
Padding symbols are ignored.
|
| 1300 |
+
|
| 1301 |
+
These embeddings get automatically extended in forward if more positions is needed.
|
| 1302 |
+
"""
|
| 1303 |
+
|
| 1304 |
+
def __init__(self, num_positions, embedding_dim, padding_idx):
|
| 1305 |
+
self.make_weight(num_positions, embedding_dim, padding_idx)
|
| 1306 |
+
|
| 1307 |
+
def make_weight(self, num_positions, embedding_dim, padding_idx):
|
| 1308 |
+
weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
|
| 1309 |
+
if not hasattr(self, "weight"):
|
| 1310 |
+
# in ___init__
|
| 1311 |
+
super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight)
|
| 1312 |
+
else:
|
| 1313 |
+
# in forward put the weights on the correct dtype and device of the param
|
| 1314 |
+
weight = weight.to(dtype=self.weight.dtype, device=self.weight.device)
|
| 1315 |
+
self.weight = nn.Parameter(weight)
|
| 1316 |
+
self.weight.detach_()
|
| 1317 |
+
self.weight.requires_grad = False
|
| 1318 |
+
|
| 1319 |
+
@staticmethod
|
| 1320 |
+
def get_embedding(num_embeddings, embedding_dim, padding_idx):
|
| 1321 |
+
"""
|
| 1322 |
+
Build sinusoidal embeddings.
|
| 1323 |
+
|
| 1324 |
+
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
|
| 1325 |
+
"Attention Is All You Need".
|
| 1326 |
+
"""
|
| 1327 |
+
half_dim = embedding_dim // 2
|
| 1328 |
+
emb = math.log(10000) / (half_dim - 1)
|
| 1329 |
+
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
|
| 1330 |
+
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
|
| 1331 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
|
| 1332 |
+
if embedding_dim % 2 == 1:
|
| 1333 |
+
# zero pad
|
| 1334 |
+
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
|
| 1335 |
+
if padding_idx is not None:
|
| 1336 |
+
emb[padding_idx, :] = 0
|
| 1337 |
+
return emb
|
| 1338 |
+
|
| 1339 |
+
@staticmethod
|
| 1340 |
+
def make_positions(tensor, padding_idx: int):
|
| 1341 |
+
"""
|
| 1342 |
+
Replace non-padding symbols with their position numbers.
|
| 1343 |
+
|
| 1344 |
+
Position numbers begin at padding_idx+1. Padding symbols are ignored.
|
| 1345 |
+
"""
|
| 1346 |
+
# The series of casts and type-conversions here are carefully
|
| 1347 |
+
# balanced to both work with ONNX export and XLA. In particular XLA
|
| 1348 |
+
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
|
| 1349 |
+
# how to handle the dtype kwarg in cumsum.
|
| 1350 |
+
mask = tensor.ne(padding_idx).int()
|
| 1351 |
+
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
|
| 1352 |
+
|
| 1353 |
+
def forward(
|
| 1354 |
+
self,
|
| 1355 |
+
input,
|
| 1356 |
+
incremental_state: Optional[Any] = None,
|
| 1357 |
+
timestep: Optional[Tensor] = None,
|
| 1358 |
+
):
|
| 1359 |
+
"""Input is expected to be of size [bsz x seqlen]."""
|
| 1360 |
+
bsz, seq_len = input.shape[:2]
|
| 1361 |
+
max_pos = self.padding_idx + 1 + seq_len
|
| 1362 |
+
if max_pos > self.weight.size(0):
|
| 1363 |
+
# expand embeddings if needed
|
| 1364 |
+
self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
|
| 1365 |
+
positions = self.make_positions(input, self.padding_idx)
|
| 1366 |
+
return super().forward(positions)
|
| 1367 |
+
|
| 1368 |
+
|
| 1369 |
+
__all__ = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
|
phi4/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for FSMT."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
import re
|
| 20 |
+
import unicodedata
|
| 21 |
+
from typing import Dict, List, Optional, Tuple
|
| 22 |
+
|
| 23 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
| 24 |
+
from ...utils import logging
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
VOCAB_FILES_NAMES = {
|
| 30 |
+
"src_vocab_file": "vocab-src.json",
|
| 31 |
+
"tgt_vocab_file": "vocab-tgt.json",
|
| 32 |
+
"merges_file": "merges.txt",
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_pairs(word):
|
| 37 |
+
"""
|
| 38 |
+
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
|
| 39 |
+
strings)
|
| 40 |
+
"""
|
| 41 |
+
pairs = set()
|
| 42 |
+
prev_char = word[0]
|
| 43 |
+
for char in word[1:]:
|
| 44 |
+
pairs.add((prev_char, char))
|
| 45 |
+
prev_char = char
|
| 46 |
+
return pairs
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def replace_unicode_punct(text):
|
| 50 |
+
"""
|
| 51 |
+
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
|
| 52 |
+
"""
|
| 53 |
+
text = text.replace(",", ",")
|
| 54 |
+
text = re.sub(r"。\s*", ". ", text)
|
| 55 |
+
text = text.replace("、", ",")
|
| 56 |
+
text = text.replace("”", '"')
|
| 57 |
+
text = text.replace("“", '"')
|
| 58 |
+
text = text.replace("∶", ":")
|
| 59 |
+
text = text.replace(":", ":")
|
| 60 |
+
text = text.replace("?", "?")
|
| 61 |
+
text = text.replace("《", '"')
|
| 62 |
+
text = text.replace("》", '"')
|
| 63 |
+
text = text.replace(")", ")")
|
| 64 |
+
text = text.replace("!", "!")
|
| 65 |
+
text = text.replace("(", "(")
|
| 66 |
+
text = text.replace(";", ";")
|
| 67 |
+
text = text.replace("1", "1")
|
| 68 |
+
text = text.replace("」", '"')
|
| 69 |
+
text = text.replace("「", '"')
|
| 70 |
+
text = text.replace("0", "0")
|
| 71 |
+
text = text.replace("3", "3")
|
| 72 |
+
text = text.replace("2", "2")
|
| 73 |
+
text = text.replace("5", "5")
|
| 74 |
+
text = text.replace("6", "6")
|
| 75 |
+
text = text.replace("9", "9")
|
| 76 |
+
text = text.replace("7", "7")
|
| 77 |
+
text = text.replace("8", "8")
|
| 78 |
+
text = text.replace("4", "4")
|
| 79 |
+
text = re.sub(r".\s*", ". ", text)
|
| 80 |
+
text = text.replace("~", "~")
|
| 81 |
+
text = text.replace("’", "'")
|
| 82 |
+
text = text.replace("…", "...")
|
| 83 |
+
text = text.replace("━", "-")
|
| 84 |
+
text = text.replace("〈", "<")
|
| 85 |
+
text = text.replace("〉", ">")
|
| 86 |
+
text = text.replace("【", "[")
|
| 87 |
+
text = text.replace("】", "]")
|
| 88 |
+
text = text.replace("%", "%")
|
| 89 |
+
return text
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def remove_non_printing_char(text):
|
| 93 |
+
"""
|
| 94 |
+
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
|
| 95 |
+
"""
|
| 96 |
+
output = []
|
| 97 |
+
for char in text:
|
| 98 |
+
cat = unicodedata.category(char)
|
| 99 |
+
if cat.startswith("C"):
|
| 100 |
+
continue
|
| 101 |
+
output.append(char)
|
| 102 |
+
return "".join(output)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# Porting notes:
|
| 106 |
+
# this one is modeled after XLMTokenizer
|
| 107 |
+
#
|
| 108 |
+
# added:
|
| 109 |
+
# - src_vocab_file,
|
| 110 |
+
# - tgt_vocab_file,
|
| 111 |
+
# - langs,
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class FSMTTokenizer(PreTrainedTokenizer):
|
| 115 |
+
"""
|
| 116 |
+
Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
|
| 117 |
+
|
| 118 |
+
- Moses preprocessing and tokenization.
|
| 119 |
+
- Normalizing all inputs text.
|
| 120 |
+
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
|
| 121 |
+
"__classify__") to a vocabulary.
|
| 122 |
+
- The argument `langs` defines a pair of languages.
|
| 123 |
+
|
| 124 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 125 |
+
this superclass for more information regarding those methods.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
langs (`List[str]`, *optional*):
|
| 129 |
+
A list of two languages to translate from and to, for instance `["en", "ru"]`.
|
| 130 |
+
src_vocab_file (`str`, *optional*):
|
| 131 |
+
File containing the vocabulary for the source language.
|
| 132 |
+
tgt_vocab_file (`st`, *optional*):
|
| 133 |
+
File containing the vocabulary for the target language.
|
| 134 |
+
merges_file (`str`, *optional*):
|
| 135 |
+
File containing the merges.
|
| 136 |
+
do_lower_case (`bool`, *optional*, defaults to `False`):
|
| 137 |
+
Whether or not to lowercase the input when tokenizing.
|
| 138 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
| 139 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 140 |
+
token instead.
|
| 141 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
| 142 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
| 143 |
+
|
| 144 |
+
<Tip>
|
| 145 |
+
|
| 146 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
| 147 |
+
sequence. The token used is the `cls_token`.
|
| 148 |
+
|
| 149 |
+
</Tip>
|
| 150 |
+
|
| 151 |
+
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
| 152 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
| 153 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
| 154 |
+
token of a sequence built with special tokens.
|
| 155 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
| 156 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 157 |
+
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 161 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 162 |
+
|
| 163 |
+
def __init__(
|
| 164 |
+
self,
|
| 165 |
+
langs=None,
|
| 166 |
+
src_vocab_file=None,
|
| 167 |
+
tgt_vocab_file=None,
|
| 168 |
+
merges_file=None,
|
| 169 |
+
do_lower_case=False,
|
| 170 |
+
unk_token="<unk>",
|
| 171 |
+
bos_token="<s>",
|
| 172 |
+
sep_token="</s>",
|
| 173 |
+
pad_token="<pad>",
|
| 174 |
+
**kwargs,
|
| 175 |
+
):
|
| 176 |
+
try:
|
| 177 |
+
import sacremoses
|
| 178 |
+
except ImportError:
|
| 179 |
+
raise ImportError(
|
| 180 |
+
"You need to install sacremoses to use XLMTokenizer. "
|
| 181 |
+
"See https://pypi.org/project/sacremoses/ for installation."
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
self.sm = sacremoses
|
| 185 |
+
|
| 186 |
+
self.src_vocab_file = src_vocab_file
|
| 187 |
+
self.tgt_vocab_file = tgt_vocab_file
|
| 188 |
+
self.merges_file = merges_file
|
| 189 |
+
self.do_lower_case = do_lower_case
|
| 190 |
+
|
| 191 |
+
# cache of sm.MosesPunctNormalizer instance
|
| 192 |
+
self.cache_moses_punct_normalizer = {}
|
| 193 |
+
# cache of sm.MosesTokenizer instance
|
| 194 |
+
self.cache_moses_tokenizer = {}
|
| 195 |
+
self.cache_moses_detokenizer = {}
|
| 196 |
+
|
| 197 |
+
if langs and len(langs) == 2:
|
| 198 |
+
self.src_lang, self.tgt_lang = langs
|
| 199 |
+
else:
|
| 200 |
+
raise ValueError(
|
| 201 |
+
f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
|
| 202 |
+
"Usually that means that tokenizer can't find a mapping for the given model path "
|
| 203 |
+
"in and other maps of this tokenizer."
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
|
| 207 |
+
self.encoder = json.load(src_vocab_handle)
|
| 208 |
+
with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
|
| 209 |
+
tgt_vocab = json.load(tgt_vocab_handle)
|
| 210 |
+
self.decoder = {v: k for k, v in tgt_vocab.items()}
|
| 211 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
| 212 |
+
merges = merges_handle.read().split("\n")[:-1]
|
| 213 |
+
merges = [tuple(merge.split()[:2]) for merge in merges]
|
| 214 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 215 |
+
self.cache = {}
|
| 216 |
+
super().__init__(
|
| 217 |
+
langs=langs,
|
| 218 |
+
src_vocab_file=src_vocab_file,
|
| 219 |
+
tgt_vocab_file=tgt_vocab_file,
|
| 220 |
+
merges_file=merges_file,
|
| 221 |
+
do_lower_case=do_lower_case,
|
| 222 |
+
unk_token=unk_token,
|
| 223 |
+
bos_token=bos_token,
|
| 224 |
+
sep_token=sep_token,
|
| 225 |
+
pad_token=pad_token,
|
| 226 |
+
**kwargs,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# hack override
|
| 230 |
+
def get_vocab(self) -> Dict[str, int]:
|
| 231 |
+
return self.get_src_vocab()
|
| 232 |
+
|
| 233 |
+
# hack override
|
| 234 |
+
@property
|
| 235 |
+
def vocab_size(self) -> int:
|
| 236 |
+
return self.src_vocab_size
|
| 237 |
+
|
| 238 |
+
def moses_punct_norm(self, text, lang):
|
| 239 |
+
if lang not in self.cache_moses_punct_normalizer:
|
| 240 |
+
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
|
| 241 |
+
self.cache_moses_punct_normalizer[lang] = punct_normalizer
|
| 242 |
+
return self.cache_moses_punct_normalizer[lang].normalize(text)
|
| 243 |
+
|
| 244 |
+
def moses_tokenize(self, text, lang):
|
| 245 |
+
if lang not in self.cache_moses_tokenizer:
|
| 246 |
+
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
|
| 247 |
+
self.cache_moses_tokenizer[lang] = moses_tokenizer
|
| 248 |
+
return self.cache_moses_tokenizer[lang].tokenize(
|
| 249 |
+
text, aggressive_dash_splits=True, return_str=False, escape=True
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
def moses_detokenize(self, tokens, lang):
|
| 253 |
+
if lang not in self.cache_moses_detokenizer:
|
| 254 |
+
moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
|
| 255 |
+
self.cache_moses_detokenizer[lang] = moses_detokenizer
|
| 256 |
+
return self.cache_moses_detokenizer[lang].detokenize(tokens)
|
| 257 |
+
|
| 258 |
+
def moses_pipeline(self, text, lang):
|
| 259 |
+
text = replace_unicode_punct(text)
|
| 260 |
+
text = self.moses_punct_norm(text, lang)
|
| 261 |
+
text = remove_non_printing_char(text)
|
| 262 |
+
return text
|
| 263 |
+
|
| 264 |
+
@property
|
| 265 |
+
def src_vocab_size(self):
|
| 266 |
+
return len(self.encoder)
|
| 267 |
+
|
| 268 |
+
@property
|
| 269 |
+
def tgt_vocab_size(self):
|
| 270 |
+
return len(self.decoder)
|
| 271 |
+
|
| 272 |
+
def get_src_vocab(self):
|
| 273 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
| 274 |
+
|
| 275 |
+
def get_tgt_vocab(self):
|
| 276 |
+
return dict(self.decoder, **self.added_tokens_decoder)
|
| 277 |
+
|
| 278 |
+
def bpe(self, token):
|
| 279 |
+
word = tuple(token[:-1]) + (token[-1] + "</w>",)
|
| 280 |
+
if token in self.cache:
|
| 281 |
+
return self.cache[token]
|
| 282 |
+
pairs = get_pairs(word)
|
| 283 |
+
|
| 284 |
+
if not pairs:
|
| 285 |
+
return token + "</w>"
|
| 286 |
+
|
| 287 |
+
while True:
|
| 288 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 289 |
+
if bigram not in self.bpe_ranks:
|
| 290 |
+
break
|
| 291 |
+
first, second = bigram
|
| 292 |
+
new_word = []
|
| 293 |
+
i = 0
|
| 294 |
+
while i < len(word):
|
| 295 |
+
try:
|
| 296 |
+
j = word.index(first, i)
|
| 297 |
+
except ValueError:
|
| 298 |
+
new_word.extend(word[i:])
|
| 299 |
+
break
|
| 300 |
+
else:
|
| 301 |
+
new_word.extend(word[i:j])
|
| 302 |
+
i = j
|
| 303 |
+
|
| 304 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 305 |
+
new_word.append(first + second)
|
| 306 |
+
i += 2
|
| 307 |
+
else:
|
| 308 |
+
new_word.append(word[i])
|
| 309 |
+
i += 1
|
| 310 |
+
new_word = tuple(new_word)
|
| 311 |
+
word = new_word
|
| 312 |
+
if len(word) == 1:
|
| 313 |
+
break
|
| 314 |
+
else:
|
| 315 |
+
pairs = get_pairs(word)
|
| 316 |
+
word = " ".join(word)
|
| 317 |
+
if word == "\n </w>":
|
| 318 |
+
word = "\n</w>"
|
| 319 |
+
self.cache[token] = word
|
| 320 |
+
return word
|
| 321 |
+
|
| 322 |
+
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
|
| 323 |
+
"""
|
| 324 |
+
Tokenize a string given language code using Moses.
|
| 325 |
+
|
| 326 |
+
Details of tokenization:
|
| 327 |
+
|
| 328 |
+
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
|
| 329 |
+
- Install with `pip install sacremoses`
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
|
| 333 |
+
languages. However, we don't enforce it.
|
| 334 |
+
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
|
| 335 |
+
(bool). If True, we only apply BPE.
|
| 336 |
+
|
| 337 |
+
Returns:
|
| 338 |
+
List of tokens.
|
| 339 |
+
"""
|
| 340 |
+
# ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
|
| 341 |
+
# if lang != self.src_lang:
|
| 342 |
+
# raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
|
| 343 |
+
lang = self.src_lang
|
| 344 |
+
|
| 345 |
+
if self.do_lower_case:
|
| 346 |
+
text = text.lower()
|
| 347 |
+
|
| 348 |
+
if bypass_tokenizer:
|
| 349 |
+
text = text.split()
|
| 350 |
+
else:
|
| 351 |
+
text = self.moses_pipeline(text, lang=lang)
|
| 352 |
+
text = self.moses_tokenize(text, lang=lang)
|
| 353 |
+
|
| 354 |
+
split_tokens = []
|
| 355 |
+
for token in text:
|
| 356 |
+
if token:
|
| 357 |
+
split_tokens.extend(list(self.bpe(token).split(" ")))
|
| 358 |
+
|
| 359 |
+
return split_tokens
|
| 360 |
+
|
| 361 |
+
def _convert_token_to_id(self, token):
|
| 362 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 363 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
| 364 |
+
|
| 365 |
+
def _convert_id_to_token(self, index):
|
| 366 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 367 |
+
return self.decoder.get(index, self.unk_token)
|
| 368 |
+
|
| 369 |
+
def convert_tokens_to_string(self, tokens):
|
| 370 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 371 |
+
|
| 372 |
+
# remove BPE
|
| 373 |
+
tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
|
| 374 |
+
tokens = "".join(tokens).split()
|
| 375 |
+
# detokenize
|
| 376 |
+
text = self.moses_detokenize(tokens, self.tgt_lang)
|
| 377 |
+
return text
|
| 378 |
+
|
| 379 |
+
def build_inputs_with_special_tokens(
|
| 380 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 381 |
+
) -> List[int]:
|
| 382 |
+
"""
|
| 383 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 384 |
+
adding special tokens. A FAIRSEQ Transformer sequence has the following format:
|
| 385 |
+
|
| 386 |
+
- single sequence: `<s> X </s>`
|
| 387 |
+
- pair of sequences: `<s> A </s> B </s>`
|
| 388 |
+
|
| 389 |
+
Args:
|
| 390 |
+
token_ids_0 (`List[int]`):
|
| 391 |
+
List of IDs to which the special tokens will be added.
|
| 392 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 393 |
+
Optional second list of IDs for sequence pairs.
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 397 |
+
"""
|
| 398 |
+
sep = [self.sep_token_id]
|
| 399 |
+
|
| 400 |
+
# no bos used in fairseq
|
| 401 |
+
if token_ids_1 is None:
|
| 402 |
+
return token_ids_0 + sep
|
| 403 |
+
return token_ids_0 + sep + token_ids_1 + sep
|
| 404 |
+
|
| 405 |
+
def get_special_tokens_mask(
|
| 406 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| 407 |
+
) -> List[int]:
|
| 408 |
+
"""
|
| 409 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 410 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
| 411 |
+
|
| 412 |
+
Args:
|
| 413 |
+
token_ids_0 (`List[int]`):
|
| 414 |
+
List of IDs.
|
| 415 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 416 |
+
Optional second list of IDs for sequence pairs.
|
| 417 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 418 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 419 |
+
|
| 420 |
+
Returns:
|
| 421 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 422 |
+
"""
|
| 423 |
+
|
| 424 |
+
if already_has_special_tokens:
|
| 425 |
+
return super().get_special_tokens_mask(
|
| 426 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 427 |
+
)
|
| 428 |
+
# no bos used in fairseq
|
| 429 |
+
if token_ids_1 is not None:
|
| 430 |
+
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
| 431 |
+
return ([0] * len(token_ids_0)) + [1]
|
| 432 |
+
|
| 433 |
+
def create_token_type_ids_from_sequences(
|
| 434 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 435 |
+
) -> List[int]:
|
| 436 |
+
"""
|
| 437 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
|
| 438 |
+
Transformer sequence pair mask has the following format:
|
| 439 |
+
|
| 440 |
+
```
|
| 441 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| 442 |
+
| first sequence | second sequence |
|
| 443 |
+
```
|
| 444 |
+
|
| 445 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
token_ids_0 (`List[int]`):
|
| 449 |
+
List of IDs.
|
| 450 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 451 |
+
Optional second list of IDs for sequence pairs.
|
| 452 |
+
|
| 453 |
+
Returns:
|
| 454 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| 455 |
+
|
| 456 |
+
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An
|
| 457 |
+
FAIRSEQ_TRANSFORMER sequence pair mask has the following format:
|
| 458 |
+
"""
|
| 459 |
+
sep = [self.sep_token_id]
|
| 460 |
+
|
| 461 |
+
# no bos used in fairseq
|
| 462 |
+
if token_ids_1 is None:
|
| 463 |
+
return len(token_ids_0 + sep) * [0]
|
| 464 |
+
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
| 465 |
+
|
| 466 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 467 |
+
if not os.path.isdir(save_directory):
|
| 468 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 469 |
+
return
|
| 470 |
+
|
| 471 |
+
src_vocab_file = os.path.join(
|
| 472 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
|
| 473 |
+
)
|
| 474 |
+
tgt_vocab_file = os.path.join(
|
| 475 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
|
| 476 |
+
)
|
| 477 |
+
merges_file = os.path.join(
|
| 478 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
with open(src_vocab_file, "w", encoding="utf-8") as f:
|
| 482 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
| 483 |
+
|
| 484 |
+
with open(tgt_vocab_file, "w", encoding="utf-8") as f:
|
| 485 |
+
tgt_vocab = {v: k for k, v in self.decoder.items()}
|
| 486 |
+
f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
| 487 |
+
|
| 488 |
+
index = 0
|
| 489 |
+
with open(merges_file, "w", encoding="utf-8") as writer:
|
| 490 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
| 491 |
+
if index != token_index:
|
| 492 |
+
logger.warning(
|
| 493 |
+
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
|
| 494 |
+
" Please check that the tokenizer is not corrupted!"
|
| 495 |
+
)
|
| 496 |
+
index = token_index
|
| 497 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
| 498 |
+
index += 1
|
| 499 |
+
|
| 500 |
+
return src_vocab_file, tgt_vocab_file, merges_file
|
| 501 |
+
|
| 502 |
+
def __getstate__(self):
|
| 503 |
+
state = self.__dict__.copy()
|
| 504 |
+
state["sm"] = None
|
| 505 |
+
return state
|
| 506 |
+
|
| 507 |
+
def __setstate__(self, d):
|
| 508 |
+
self.__dict__ = d
|
| 509 |
+
|
| 510 |
+
try:
|
| 511 |
+
import sacremoses
|
| 512 |
+
except ImportError:
|
| 513 |
+
raise ImportError(
|
| 514 |
+
"You need to install sacremoses to use XLMTokenizer. "
|
| 515 |
+
"See https://pypi.org/project/sacremoses/ for installation."
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
self.sm = sacremoses
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
__all__ = ["FSMTTokenizer"]
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_gptj import *
|
| 22 |
+
from .modeling_flax_gptj import *
|
| 23 |
+
from .modeling_gptj import *
|
| 24 |
+
from .modeling_tf_gptj import *
|
| 25 |
+
else:
|
| 26 |
+
import sys
|
| 27 |
+
|
| 28 |
+
_file = globals()["__file__"]
|
| 29 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (588 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc
ADDED
|
Binary file (7.57 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc
ADDED
|
Binary file (40.3 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc
ADDED
|
Binary file (33.6 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""GPT-J model configuration"""
|
| 16 |
+
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from typing import Any, List, Mapping, Optional
|
| 19 |
+
|
| 20 |
+
from ... import PreTrainedTokenizer, TensorType, is_torch_available
|
| 21 |
+
from ...configuration_utils import PretrainedConfig
|
| 22 |
+
from ...onnx import OnnxConfigWithPast, PatchingSpec
|
| 23 |
+
from ...utils import logging
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class GPTJConfig(PretrainedConfig):
|
| 30 |
+
r"""
|
| 31 |
+
This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
|
| 32 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 33 |
+
defaults will yield a similar configuration to that of the GPT-J
|
| 34 |
+
[EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
|
| 35 |
+
[`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
|
| 36 |
+
for more information.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
vocab_size (`int`, *optional*, defaults to 50400):
|
| 40 |
+
Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
|
| 41 |
+
`inputs_ids` passed when calling [`GPTJModel`].
|
| 42 |
+
n_positions (`int`, *optional*, defaults to 2048):
|
| 43 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 44 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 45 |
+
n_embd (`int`, *optional*, defaults to 4096):
|
| 46 |
+
Dimensionality of the embeddings and hidden states.
|
| 47 |
+
n_layer (`int`, *optional*, defaults to 28):
|
| 48 |
+
Number of hidden layers in the Transformer encoder.
|
| 49 |
+
n_head (`int`, *optional*, defaults to 16):
|
| 50 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 51 |
+
rotary_dim (`int`, *optional*, defaults to 64):
|
| 52 |
+
Number of dimensions in the embedding that Rotary Position Embedding is applied to.
|
| 53 |
+
n_inner (`int`, *optional*, defaults to None):
|
| 54 |
+
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
|
| 55 |
+
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
|
| 56 |
+
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
|
| 57 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
| 58 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 59 |
+
embd_pdrop (`int`, *optional*, defaults to 0.1):
|
| 60 |
+
The dropout ratio for the embeddings.
|
| 61 |
+
attn_pdrop (`float`, *optional*, defaults to 0.1):
|
| 62 |
+
The dropout ratio for the attention.
|
| 63 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
| 64 |
+
The epsilon to use in the layer normalization layers.
|
| 65 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 66 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 67 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 68 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
| 69 |
+
|
| 70 |
+
Example:
|
| 71 |
+
|
| 72 |
+
```python
|
| 73 |
+
>>> from transformers import GPTJModel, GPTJConfig
|
| 74 |
+
|
| 75 |
+
>>> # Initializing a GPT-J 6B configuration
|
| 76 |
+
>>> configuration = GPTJConfig()
|
| 77 |
+
|
| 78 |
+
>>> # Initializing a model from the configuration
|
| 79 |
+
>>> model = GPTJModel(configuration)
|
| 80 |
+
|
| 81 |
+
>>> # Accessing the model configuration
|
| 82 |
+
>>> configuration = model.config
|
| 83 |
+
```"""
|
| 84 |
+
|
| 85 |
+
model_type = "gptj"
|
| 86 |
+
attribute_map = {
|
| 87 |
+
"max_position_embeddings": "n_positions",
|
| 88 |
+
"hidden_size": "n_embd",
|
| 89 |
+
"num_attention_heads": "n_head",
|
| 90 |
+
"num_hidden_layers": "n_layer",
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
vocab_size=50400,
|
| 96 |
+
n_positions=2048,
|
| 97 |
+
n_embd=4096,
|
| 98 |
+
n_layer=28,
|
| 99 |
+
n_head=16,
|
| 100 |
+
rotary_dim=64,
|
| 101 |
+
n_inner=None,
|
| 102 |
+
activation_function="gelu_new",
|
| 103 |
+
resid_pdrop=0.0,
|
| 104 |
+
embd_pdrop=0.0,
|
| 105 |
+
attn_pdrop=0.0,
|
| 106 |
+
layer_norm_epsilon=1e-5,
|
| 107 |
+
initializer_range=0.02,
|
| 108 |
+
use_cache=True,
|
| 109 |
+
bos_token_id=50256,
|
| 110 |
+
eos_token_id=50256,
|
| 111 |
+
tie_word_embeddings=False,
|
| 112 |
+
**kwargs,
|
| 113 |
+
):
|
| 114 |
+
self.vocab_size = vocab_size
|
| 115 |
+
self.n_positions = n_positions
|
| 116 |
+
self.n_embd = n_embd
|
| 117 |
+
self.n_layer = n_layer
|
| 118 |
+
self.n_head = n_head
|
| 119 |
+
self.n_inner = n_inner
|
| 120 |
+
self.rotary_dim = rotary_dim
|
| 121 |
+
self.activation_function = activation_function
|
| 122 |
+
self.resid_pdrop = resid_pdrop
|
| 123 |
+
self.embd_pdrop = embd_pdrop
|
| 124 |
+
self.attn_pdrop = attn_pdrop
|
| 125 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 126 |
+
self.initializer_range = initializer_range
|
| 127 |
+
self.use_cache = use_cache
|
| 128 |
+
|
| 129 |
+
self.bos_token_id = bos_token_id
|
| 130 |
+
self.eos_token_id = eos_token_id
|
| 131 |
+
|
| 132 |
+
super().__init__(
|
| 133 |
+
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
|
| 138 |
+
class GPTJOnnxConfig(OnnxConfigWithPast):
|
| 139 |
+
def __init__(
|
| 140 |
+
self,
|
| 141 |
+
config: PretrainedConfig,
|
| 142 |
+
task: str = "default",
|
| 143 |
+
patching_specs: List[PatchingSpec] = None,
|
| 144 |
+
use_past: bool = False,
|
| 145 |
+
):
|
| 146 |
+
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
|
| 147 |
+
if not getattr(self._config, "pad_token_id", None):
|
| 148 |
+
# TODO: how to do that better?
|
| 149 |
+
self._config.pad_token_id = 0
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 153 |
+
common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
|
| 154 |
+
if self.use_past:
|
| 155 |
+
self.fill_with_past_key_values_(common_inputs, direction="inputs")
|
| 156 |
+
common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
|
| 157 |
+
else:
|
| 158 |
+
common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
|
| 159 |
+
|
| 160 |
+
return common_inputs
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def num_layers(self) -> int:
|
| 164 |
+
return self._config.n_layer
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def num_attention_heads(self) -> int:
|
| 168 |
+
return self._config.n_head
|
| 169 |
+
|
| 170 |
+
def generate_dummy_inputs(
|
| 171 |
+
self,
|
| 172 |
+
tokenizer: PreTrainedTokenizer,
|
| 173 |
+
batch_size: int = -1,
|
| 174 |
+
seq_length: int = -1,
|
| 175 |
+
is_pair: bool = False,
|
| 176 |
+
framework: Optional[TensorType] = None,
|
| 177 |
+
) -> Mapping[str, Any]:
|
| 178 |
+
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
|
| 179 |
+
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
# We need to order the input in the way they appears in the forward()
|
| 183 |
+
ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
|
| 184 |
+
|
| 185 |
+
# Need to add the past_keys
|
| 186 |
+
if self.use_past:
|
| 187 |
+
if not is_torch_available():
|
| 188 |
+
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
|
| 189 |
+
else:
|
| 190 |
+
import torch
|
| 191 |
+
|
| 192 |
+
batch, seqlen = common_inputs["input_ids"].shape
|
| 193 |
+
# Not using the same length for past_key_values
|
| 194 |
+
past_key_values_length = seqlen + 2
|
| 195 |
+
past_shape = (
|
| 196 |
+
batch,
|
| 197 |
+
self.num_attention_heads,
|
| 198 |
+
past_key_values_length,
|
| 199 |
+
self._config.hidden_size // self.num_attention_heads,
|
| 200 |
+
)
|
| 201 |
+
ordered_inputs["past_key_values"] = [
|
| 202 |
+
(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
|
| 203 |
+
]
|
| 204 |
+
|
| 205 |
+
ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
|
| 206 |
+
if self.use_past:
|
| 207 |
+
mask_dtype = ordered_inputs["attention_mask"].dtype
|
| 208 |
+
ordered_inputs["attention_mask"] = torch.cat(
|
| 209 |
+
[ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
return ordered_inputs
|
| 213 |
+
|
| 214 |
+
@property
|
| 215 |
+
def default_onnx_opset(self) -> int:
|
| 216 |
+
return 13
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
__all__ = ["GPTJConfig", "GPTJOnnxConfig"]
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py
ADDED
|
@@ -0,0 +1,721 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from functools import partial
|
| 17 |
+
from typing import Optional, Tuple
|
| 18 |
+
|
| 19 |
+
import flax.linen as nn
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import numpy as np
|
| 23 |
+
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
|
| 24 |
+
from flax.linen import combine_masks, make_causal_mask
|
| 25 |
+
from flax.linen.attention import dot_product_attention_weights
|
| 26 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
| 27 |
+
from jax import lax
|
| 28 |
+
|
| 29 |
+
from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
|
| 30 |
+
from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
|
| 31 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 32 |
+
from .configuration_gptj import GPTJConfig
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
logger = logging.get_logger(__name__)
|
| 36 |
+
|
| 37 |
+
_CHECKPOINT_FOR_DOC = "gptj"
|
| 38 |
+
_CONFIG_FOR_DOC = "GPTJConfig"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
GPTJ_START_DOCSTRING = r"""
|
| 42 |
+
|
| 43 |
+
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 44 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 45 |
+
etc.)
|
| 46 |
+
|
| 47 |
+
This model is also a Flax Linen
|
| 48 |
+
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
|
| 49 |
+
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
|
| 50 |
+
|
| 51 |
+
Finally, this model supports inherent JAX features such as:
|
| 52 |
+
|
| 53 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
| 54 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
| 55 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
| 56 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
| 57 |
+
|
| 58 |
+
Parameters:
|
| 59 |
+
config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
|
| 60 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 61 |
+
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 62 |
+
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
|
| 63 |
+
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
|
| 64 |
+
`jax.numpy.bfloat16` (on TPUs).
|
| 65 |
+
|
| 66 |
+
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
|
| 67 |
+
specified all the computation will be performed with the given `dtype`.
|
| 68 |
+
|
| 69 |
+
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
|
| 70 |
+
parameters.**
|
| 71 |
+
|
| 72 |
+
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
|
| 73 |
+
[`~FlaxPreTrainedModel.to_bf16`].
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
GPTJ_INPUTS_DOCSTRING = r"""
|
| 77 |
+
Args:
|
| 78 |
+
input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
|
| 79 |
+
`input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
|
| 80 |
+
|
| 81 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 82 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 83 |
+
|
| 84 |
+
[What are input IDs?](../glossary#input-ids)
|
| 85 |
+
attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 86 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 87 |
+
|
| 88 |
+
- 1 for tokens that are **not masked**,
|
| 89 |
+
- 0 for tokens that are **masked**.
|
| 90 |
+
|
| 91 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 92 |
+
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 93 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 94 |
+
config.max_position_embeddings - 1]`.
|
| 95 |
+
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
|
| 96 |
+
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
|
| 97 |
+
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
|
| 98 |
+
output_attentions (`bool`, *optional*):
|
| 99 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 100 |
+
tensors for more detail.
|
| 101 |
+
output_hidden_states (`bool`, *optional*):
|
| 102 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 103 |
+
more detail.
|
| 104 |
+
return_dict (`bool`, *optional*):
|
| 105 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def create_sinusoidal_positions(num_pos, dim):
|
| 110 |
+
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
|
| 111 |
+
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
|
| 112 |
+
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
|
| 113 |
+
|
| 114 |
+
sentinel = dim // 2 + dim % 2
|
| 115 |
+
out = np.zeros((num_pos, dim))
|
| 116 |
+
out[:, 0:sentinel] = sin
|
| 117 |
+
out[:, sentinel:] = cos
|
| 118 |
+
|
| 119 |
+
return jnp.array(out)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def rotate_every_two(tensor):
|
| 123 |
+
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
|
| 124 |
+
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
|
| 125 |
+
return rotate_half_tensor
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def apply_rotary_pos_emb(tensor, sincos):
|
| 129 |
+
sin_pos, cos_pos = sincos
|
| 130 |
+
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
|
| 131 |
+
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
|
| 132 |
+
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class FlaxGPTJAttention(nn.Module):
|
| 136 |
+
config: GPTJConfig
|
| 137 |
+
dtype: jnp.dtype = jnp.float32
|
| 138 |
+
causal: bool = True
|
| 139 |
+
is_cross_attention: bool = False
|
| 140 |
+
|
| 141 |
+
def setup(self):
|
| 142 |
+
config = self.config
|
| 143 |
+
self.embed_dim = config.hidden_size
|
| 144 |
+
self.num_heads = config.num_attention_heads
|
| 145 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 146 |
+
|
| 147 |
+
self.rotary_dim = config.rotary_dim
|
| 148 |
+
|
| 149 |
+
dense = partial(
|
| 150 |
+
nn.Dense,
|
| 151 |
+
self.embed_dim,
|
| 152 |
+
use_bias=False,
|
| 153 |
+
dtype=self.dtype,
|
| 154 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
|
| 158 |
+
self.out_proj = dense()
|
| 159 |
+
|
| 160 |
+
self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
|
| 161 |
+
|
| 162 |
+
self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
|
| 163 |
+
|
| 164 |
+
pos_embd_dim = self.rotary_dim or self.embed_dim
|
| 165 |
+
self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim)
|
| 166 |
+
|
| 167 |
+
def _split_heads(self, hidden_states):
|
| 168 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
|
| 169 |
+
|
| 170 |
+
def _merge_heads(self, hidden_states):
|
| 171 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
|
| 172 |
+
|
| 173 |
+
@nn.compact
|
| 174 |
+
def _concatenate_to_cache(self, key, value, query, attention_mask):
|
| 175 |
+
"""
|
| 176 |
+
This function takes projected key, value states from a single input token and concatenates the states to cached
|
| 177 |
+
states from previous steps. This function is slighly adapted from the official Flax repository:
|
| 178 |
+
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
|
| 179 |
+
"""
|
| 180 |
+
# detect if we're initializing by absence of existing cache data.
|
| 181 |
+
is_initialized = self.has_variable("cache", "cached_key")
|
| 182 |
+
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
|
| 183 |
+
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
|
| 184 |
+
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
|
| 185 |
+
|
| 186 |
+
if is_initialized:
|
| 187 |
+
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
|
| 188 |
+
# update key, value caches with our new 1d spatial slices
|
| 189 |
+
cur_index = cache_index.value
|
| 190 |
+
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
|
| 191 |
+
key = lax.dynamic_update_slice(cached_key.value, key, indices)
|
| 192 |
+
value = lax.dynamic_update_slice(cached_value.value, value, indices)
|
| 193 |
+
cached_key.value = key
|
| 194 |
+
cached_value.value = value
|
| 195 |
+
num_updated_cache_vectors = query.shape[1]
|
| 196 |
+
cache_index.value = cache_index.value + num_updated_cache_vectors
|
| 197 |
+
# causal mask for cached decoder self-attention: our single query position should only attend to those key
|
| 198 |
+
# positions that have already been generated and cached, not the remaining zero elements.
|
| 199 |
+
pad_mask = jnp.broadcast_to(
|
| 200 |
+
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
|
| 201 |
+
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
|
| 202 |
+
)
|
| 203 |
+
attention_mask = combine_masks(pad_mask, attention_mask)
|
| 204 |
+
return key, value, attention_mask
|
| 205 |
+
|
| 206 |
+
def __call__(
|
| 207 |
+
self,
|
| 208 |
+
hidden_states,
|
| 209 |
+
attention_mask,
|
| 210 |
+
position_ids,
|
| 211 |
+
deterministic: bool = True,
|
| 212 |
+
init_cache: bool = False,
|
| 213 |
+
output_attentions: bool = False,
|
| 214 |
+
):
|
| 215 |
+
query = self.q_proj(hidden_states)
|
| 216 |
+
key = self.k_proj(hidden_states)
|
| 217 |
+
value = self.v_proj(hidden_states)
|
| 218 |
+
|
| 219 |
+
query = self._split_heads(query)
|
| 220 |
+
key = self._split_heads(key)
|
| 221 |
+
value = self._split_heads(value)
|
| 222 |
+
|
| 223 |
+
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
|
| 224 |
+
sincos = jnp.split(sincos, 2, axis=-1)
|
| 225 |
+
if self.rotary_dim is not None:
|
| 226 |
+
k_rot = key[:, :, :, : self.rotary_dim]
|
| 227 |
+
k_pass = key[:, :, :, self.rotary_dim :]
|
| 228 |
+
|
| 229 |
+
q_rot = query[:, :, :, : self.rotary_dim]
|
| 230 |
+
q_pass = query[:, :, :, self.rotary_dim :]
|
| 231 |
+
|
| 232 |
+
k_rot = apply_rotary_pos_emb(k_rot, sincos)
|
| 233 |
+
q_rot = apply_rotary_pos_emb(q_rot, sincos)
|
| 234 |
+
|
| 235 |
+
key = jnp.concatenate([k_rot, k_pass], axis=-1)
|
| 236 |
+
query = jnp.concatenate([q_rot, q_pass], axis=-1)
|
| 237 |
+
else:
|
| 238 |
+
key = apply_rotary_pos_emb(key, sincos)
|
| 239 |
+
query = apply_rotary_pos_emb(query, sincos)
|
| 240 |
+
|
| 241 |
+
query_length, key_length = query.shape[1], key.shape[1]
|
| 242 |
+
|
| 243 |
+
if self.has_variable("cache", "cached_key"):
|
| 244 |
+
mask_shift = self.variables["cache"]["cache_index"]
|
| 245 |
+
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
|
| 246 |
+
causal_mask = lax.dynamic_slice(
|
| 247 |
+
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
|
| 251 |
+
|
| 252 |
+
batch_size = hidden_states.shape[0]
|
| 253 |
+
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
|
| 254 |
+
|
| 255 |
+
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
|
| 256 |
+
attention_mask = combine_masks(attention_mask, causal_mask)
|
| 257 |
+
|
| 258 |
+
dropout_rng = None
|
| 259 |
+
if not deterministic and self.config.attn_pdrop > 0.0:
|
| 260 |
+
dropout_rng = self.make_rng("dropout")
|
| 261 |
+
|
| 262 |
+
# During fast autoregressive decoding, we feed one position at a time,
|
| 263 |
+
# and cache the keys and values step by step.
|
| 264 |
+
if self.has_variable("cache", "cached_key") or init_cache:
|
| 265 |
+
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
|
| 266 |
+
|
| 267 |
+
# transform boolean mask into float mask
|
| 268 |
+
attention_bias = lax.select(
|
| 269 |
+
attention_mask > 0,
|
| 270 |
+
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
|
| 271 |
+
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# usual dot product attention
|
| 275 |
+
attn_weights = dot_product_attention_weights(
|
| 276 |
+
query,
|
| 277 |
+
key,
|
| 278 |
+
bias=attention_bias,
|
| 279 |
+
dropout_rng=dropout_rng,
|
| 280 |
+
dropout_rate=self.config.attn_pdrop,
|
| 281 |
+
deterministic=deterministic,
|
| 282 |
+
dtype=self.dtype,
|
| 283 |
+
precision=None,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
|
| 287 |
+
attn_output = self._merge_heads(attn_output)
|
| 288 |
+
attn_output = self.out_proj(attn_output)
|
| 289 |
+
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
|
| 290 |
+
|
| 291 |
+
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
|
| 292 |
+
return outputs
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class FlaxGPTJMLP(nn.Module):
|
| 296 |
+
config: GPTJConfig
|
| 297 |
+
intermediate_size: int
|
| 298 |
+
dtype: jnp.dtype = jnp.float32
|
| 299 |
+
|
| 300 |
+
def setup(self):
|
| 301 |
+
embed_dim = self.config.hidden_size
|
| 302 |
+
kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
|
| 303 |
+
|
| 304 |
+
self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
|
| 305 |
+
self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
|
| 306 |
+
|
| 307 |
+
self.act = ACT2FN[self.config.activation_function]
|
| 308 |
+
self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
|
| 309 |
+
|
| 310 |
+
def __call__(self, hidden_states, deterministic: bool = True):
|
| 311 |
+
hidden_states = self.fc_in(hidden_states)
|
| 312 |
+
hidden_states = self.act(hidden_states)
|
| 313 |
+
hidden_states = self.fc_out(hidden_states)
|
| 314 |
+
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
|
| 315 |
+
return hidden_states
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
class FlaxGPTJBlock(nn.Module):
|
| 319 |
+
config: GPTJConfig
|
| 320 |
+
dtype: jnp.dtype = jnp.float32
|
| 321 |
+
|
| 322 |
+
def setup(self):
|
| 323 |
+
hidden_size = self.config.hidden_size
|
| 324 |
+
inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
|
| 325 |
+
|
| 326 |
+
self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
|
| 327 |
+
self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype)
|
| 328 |
+
|
| 329 |
+
self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype)
|
| 330 |
+
|
| 331 |
+
def __call__(
|
| 332 |
+
self,
|
| 333 |
+
hidden_states,
|
| 334 |
+
attention_mask=None,
|
| 335 |
+
position_ids=None,
|
| 336 |
+
deterministic: bool = True,
|
| 337 |
+
init_cache: bool = False,
|
| 338 |
+
output_attentions: bool = False,
|
| 339 |
+
):
|
| 340 |
+
residual = hidden_states
|
| 341 |
+
hidden_states = self.ln_1(hidden_states)
|
| 342 |
+
attn_outputs = self.attn(
|
| 343 |
+
hidden_states,
|
| 344 |
+
attention_mask=attention_mask,
|
| 345 |
+
position_ids=position_ids,
|
| 346 |
+
deterministic=deterministic,
|
| 347 |
+
init_cache=init_cache,
|
| 348 |
+
output_attentions=output_attentions,
|
| 349 |
+
)
|
| 350 |
+
attn_output = attn_outputs[0]
|
| 351 |
+
|
| 352 |
+
feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
|
| 353 |
+
# residual connection
|
| 354 |
+
hidden_states = attn_output + feed_forward_hidden_states + residual
|
| 355 |
+
|
| 356 |
+
return (hidden_states,) + attn_outputs[1:]
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel):
|
| 360 |
+
"""
|
| 361 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 362 |
+
models.
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
config_class = GPTJConfig
|
| 366 |
+
base_model_prefix = "transformer"
|
| 367 |
+
module_class: nn.Module = None
|
| 368 |
+
|
| 369 |
+
def __init__(
|
| 370 |
+
self,
|
| 371 |
+
config: GPTJConfig,
|
| 372 |
+
input_shape: Tuple = (1, 1),
|
| 373 |
+
seed: int = 0,
|
| 374 |
+
dtype: jnp.dtype = jnp.float32,
|
| 375 |
+
_do_init: bool = True,
|
| 376 |
+
**kwargs,
|
| 377 |
+
):
|
| 378 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
| 379 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
| 380 |
+
|
| 381 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
| 382 |
+
# init input tensors
|
| 383 |
+
input_ids = jnp.zeros(input_shape, dtype="i4")
|
| 384 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 385 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
|
| 386 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 387 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 388 |
+
|
| 389 |
+
if self.config.add_cross_attention:
|
| 390 |
+
encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
|
| 391 |
+
encoder_attention_mask = attention_mask
|
| 392 |
+
module_init_outputs = self.module.init(
|
| 393 |
+
rngs,
|
| 394 |
+
input_ids,
|
| 395 |
+
attention_mask,
|
| 396 |
+
position_ids,
|
| 397 |
+
encoder_hidden_states,
|
| 398 |
+
encoder_attention_mask,
|
| 399 |
+
return_dict=False,
|
| 400 |
+
)
|
| 401 |
+
else:
|
| 402 |
+
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
|
| 403 |
+
|
| 404 |
+
random_params = module_init_outputs["params"]
|
| 405 |
+
|
| 406 |
+
if params is not None:
|
| 407 |
+
random_params = flatten_dict(unfreeze(random_params))
|
| 408 |
+
params = flatten_dict(unfreeze(params))
|
| 409 |
+
for missing_key in self._missing_keys:
|
| 410 |
+
params[missing_key] = random_params[missing_key]
|
| 411 |
+
self._missing_keys = set()
|
| 412 |
+
return freeze(unflatten_dict(params))
|
| 413 |
+
else:
|
| 414 |
+
return random_params
|
| 415 |
+
|
| 416 |
+
def init_cache(self, batch_size, max_length):
|
| 417 |
+
r"""
|
| 418 |
+
Args:
|
| 419 |
+
batch_size (`int`):
|
| 420 |
+
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
|
| 421 |
+
max_length (`int`):
|
| 422 |
+
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
|
| 423 |
+
cache.
|
| 424 |
+
"""
|
| 425 |
+
# init input variables to retrieve cache
|
| 426 |
+
input_ids = jnp.ones((batch_size, max_length))
|
| 427 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 428 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
|
| 429 |
+
|
| 430 |
+
init_variables = self.module.init(
|
| 431 |
+
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
|
| 432 |
+
)
|
| 433 |
+
return init_variables["cache"]
|
| 434 |
+
|
| 435 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
|
| 436 |
+
def __call__(
|
| 437 |
+
self,
|
| 438 |
+
input_ids,
|
| 439 |
+
attention_mask=None,
|
| 440 |
+
position_ids=None,
|
| 441 |
+
params: dict = None,
|
| 442 |
+
past_key_values: dict = None,
|
| 443 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 444 |
+
train: bool = False,
|
| 445 |
+
output_attentions: Optional[bool] = None,
|
| 446 |
+
output_hidden_states: Optional[bool] = None,
|
| 447 |
+
return_dict: Optional[bool] = None,
|
| 448 |
+
):
|
| 449 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 450 |
+
output_hidden_states = (
|
| 451 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 452 |
+
)
|
| 453 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 454 |
+
|
| 455 |
+
batch_size, sequence_length = input_ids.shape
|
| 456 |
+
|
| 457 |
+
if position_ids is None:
|
| 458 |
+
if past_key_values is not None:
|
| 459 |
+
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
|
| 460 |
+
|
| 461 |
+
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
|
| 462 |
+
|
| 463 |
+
if attention_mask is None:
|
| 464 |
+
attention_mask = jnp.ones((batch_size, sequence_length))
|
| 465 |
+
|
| 466 |
+
# Handle any PRNG if needed
|
| 467 |
+
rngs = {}
|
| 468 |
+
if dropout_rng is not None:
|
| 469 |
+
rngs["dropout"] = dropout_rng
|
| 470 |
+
|
| 471 |
+
inputs = {"params": params or self.params}
|
| 472 |
+
|
| 473 |
+
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
|
| 474 |
+
if past_key_values:
|
| 475 |
+
inputs["cache"] = past_key_values
|
| 476 |
+
mutable = ["cache"]
|
| 477 |
+
else:
|
| 478 |
+
mutable = False
|
| 479 |
+
|
| 480 |
+
outputs = self.module.apply(
|
| 481 |
+
inputs,
|
| 482 |
+
jnp.array(input_ids, dtype="i4"),
|
| 483 |
+
jnp.array(attention_mask, dtype="i4"),
|
| 484 |
+
jnp.array(position_ids, dtype="i4"),
|
| 485 |
+
not train,
|
| 486 |
+
False,
|
| 487 |
+
output_attentions,
|
| 488 |
+
output_hidden_states,
|
| 489 |
+
return_dict,
|
| 490 |
+
rngs=rngs,
|
| 491 |
+
mutable=mutable,
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
# add updated cache to model output
|
| 495 |
+
if past_key_values is not None and return_dict:
|
| 496 |
+
outputs, past_key_values = outputs
|
| 497 |
+
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
|
| 498 |
+
return outputs
|
| 499 |
+
elif past_key_values is not None and not return_dict:
|
| 500 |
+
outputs, past_key_values = outputs
|
| 501 |
+
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
|
| 502 |
+
|
| 503 |
+
return outputs
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
class FlaxGPTJBlockCollection(nn.Module):
|
| 507 |
+
config: GPTJConfig
|
| 508 |
+
dtype: jnp.dtype = jnp.float32
|
| 509 |
+
|
| 510 |
+
def setup(self):
|
| 511 |
+
self.blocks = [
|
| 512 |
+
FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
|
| 513 |
+
]
|
| 514 |
+
|
| 515 |
+
def __call__(
|
| 516 |
+
self,
|
| 517 |
+
hidden_states,
|
| 518 |
+
attention_mask=None,
|
| 519 |
+
position_ids=None,
|
| 520 |
+
deterministic: bool = True,
|
| 521 |
+
init_cache: bool = False,
|
| 522 |
+
output_attentions: bool = False,
|
| 523 |
+
output_hidden_states: bool = False,
|
| 524 |
+
return_dict: bool = True,
|
| 525 |
+
):
|
| 526 |
+
all_attentions = () if output_attentions else None
|
| 527 |
+
all_hidden_states = () if output_hidden_states else None
|
| 528 |
+
|
| 529 |
+
for block in self.blocks:
|
| 530 |
+
if output_hidden_states:
|
| 531 |
+
all_hidden_states += (hidden_states,)
|
| 532 |
+
|
| 533 |
+
layer_outputs = block(
|
| 534 |
+
hidden_states,
|
| 535 |
+
attention_mask,
|
| 536 |
+
position_ids=position_ids,
|
| 537 |
+
deterministic=deterministic,
|
| 538 |
+
init_cache=init_cache,
|
| 539 |
+
output_attentions=output_attentions,
|
| 540 |
+
)
|
| 541 |
+
hidden_states = layer_outputs[0]
|
| 542 |
+
|
| 543 |
+
if output_attentions:
|
| 544 |
+
all_attentions += (layer_outputs[1],)
|
| 545 |
+
|
| 546 |
+
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
|
| 547 |
+
outputs = (hidden_states, all_hidden_states, all_attentions)
|
| 548 |
+
|
| 549 |
+
return outputs
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
class FlaxGPTJModule(nn.Module):
|
| 553 |
+
config: GPTJConfig
|
| 554 |
+
dtype: jnp.dtype = jnp.float32
|
| 555 |
+
|
| 556 |
+
def setup(self):
|
| 557 |
+
self.embed_dim = self.config.hidden_size
|
| 558 |
+
|
| 559 |
+
self.wte = nn.Embed(
|
| 560 |
+
self.config.vocab_size,
|
| 561 |
+
self.config.hidden_size,
|
| 562 |
+
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
|
| 563 |
+
)
|
| 564 |
+
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
|
| 565 |
+
self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype)
|
| 566 |
+
self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
|
| 567 |
+
|
| 568 |
+
def __call__(
|
| 569 |
+
self,
|
| 570 |
+
input_ids,
|
| 571 |
+
attention_mask,
|
| 572 |
+
position_ids,
|
| 573 |
+
deterministic=True,
|
| 574 |
+
init_cache: bool = False,
|
| 575 |
+
output_attentions: bool = False,
|
| 576 |
+
output_hidden_states: bool = False,
|
| 577 |
+
return_dict: bool = True,
|
| 578 |
+
):
|
| 579 |
+
input_embeds = self.wte(input_ids.astype("i4"))
|
| 580 |
+
|
| 581 |
+
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
|
| 582 |
+
|
| 583 |
+
outputs = self.h(
|
| 584 |
+
hidden_states,
|
| 585 |
+
attention_mask,
|
| 586 |
+
position_ids=position_ids,
|
| 587 |
+
deterministic=deterministic,
|
| 588 |
+
init_cache=init_cache,
|
| 589 |
+
output_attentions=output_attentions,
|
| 590 |
+
output_hidden_states=output_hidden_states,
|
| 591 |
+
return_dict=return_dict,
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
hidden_states = outputs[0]
|
| 595 |
+
hidden_states = self.ln_f(hidden_states)
|
| 596 |
+
|
| 597 |
+
if output_hidden_states:
|
| 598 |
+
all_hidden_states = outputs[1] + (hidden_states,)
|
| 599 |
+
outputs = (hidden_states, all_hidden_states) + outputs[2:]
|
| 600 |
+
else:
|
| 601 |
+
outputs = (hidden_states,) + outputs[1:]
|
| 602 |
+
|
| 603 |
+
if not return_dict:
|
| 604 |
+
return tuple(v for v in outputs if v is not None)
|
| 605 |
+
|
| 606 |
+
return FlaxBaseModelOutput(
|
| 607 |
+
last_hidden_state=hidden_states,
|
| 608 |
+
hidden_states=outputs[1],
|
| 609 |
+
attentions=outputs[-1],
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
@add_start_docstrings(
|
| 614 |
+
"The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.",
|
| 615 |
+
GPTJ_START_DOCSTRING,
|
| 616 |
+
)
|
| 617 |
+
class FlaxGPTJModel(FlaxGPTJPreTrainedModel):
|
| 618 |
+
module_class = FlaxGPTJModule
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
append_call_sample_docstring(
|
| 622 |
+
FlaxGPTJModel,
|
| 623 |
+
_CHECKPOINT_FOR_DOC,
|
| 624 |
+
FlaxCausalLMOutput,
|
| 625 |
+
_CONFIG_FOR_DOC,
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
class FlaxGPTJForCausalLMModule(nn.Module):
|
| 630 |
+
config: GPTJConfig
|
| 631 |
+
dtype: jnp.dtype = jnp.float32
|
| 632 |
+
|
| 633 |
+
def setup(self):
|
| 634 |
+
self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype)
|
| 635 |
+
self.lm_head = nn.Dense(
|
| 636 |
+
self.config.vocab_size,
|
| 637 |
+
dtype=self.dtype,
|
| 638 |
+
kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
def __call__(
|
| 642 |
+
self,
|
| 643 |
+
input_ids,
|
| 644 |
+
attention_mask,
|
| 645 |
+
position_ids,
|
| 646 |
+
deterministic: bool = True,
|
| 647 |
+
init_cache: bool = False,
|
| 648 |
+
output_attentions: bool = False,
|
| 649 |
+
output_hidden_states: bool = False,
|
| 650 |
+
return_dict: bool = True,
|
| 651 |
+
):
|
| 652 |
+
outputs = self.transformer(
|
| 653 |
+
input_ids,
|
| 654 |
+
attention_mask,
|
| 655 |
+
position_ids,
|
| 656 |
+
deterministic=deterministic,
|
| 657 |
+
init_cache=init_cache,
|
| 658 |
+
output_attentions=output_attentions,
|
| 659 |
+
output_hidden_states=output_hidden_states,
|
| 660 |
+
return_dict=return_dict,
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
hidden_states = outputs[0]
|
| 664 |
+
|
| 665 |
+
if self.config.tie_word_embeddings:
|
| 666 |
+
shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
|
| 667 |
+
lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
|
| 668 |
+
else:
|
| 669 |
+
lm_logits = self.lm_head(hidden_states)
|
| 670 |
+
|
| 671 |
+
if not return_dict:
|
| 672 |
+
return (lm_logits,) + outputs[1:]
|
| 673 |
+
|
| 674 |
+
return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@add_start_docstrings(
|
| 678 |
+
"""
|
| 679 |
+
The GPTJ Model transformer with a language modeling head on top.
|
| 680 |
+
""",
|
| 681 |
+
GPTJ_START_DOCSTRING,
|
| 682 |
+
)
|
| 683 |
+
class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel):
|
| 684 |
+
module_class = FlaxGPTJForCausalLMModule
|
| 685 |
+
|
| 686 |
+
def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
|
| 687 |
+
# initializing the cache
|
| 688 |
+
batch_size, seq_length = input_ids.shape
|
| 689 |
+
|
| 690 |
+
past_key_values = self.init_cache(batch_size, max_length)
|
| 691 |
+
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
|
| 692 |
+
# But since GPTJ uses a causal mask, those positions are masked anyways.
|
| 693 |
+
# Thus we can create a single static attention_mask here, which is more efficient for compilation
|
| 694 |
+
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
|
| 695 |
+
if attention_mask is not None:
|
| 696 |
+
position_ids = attention_mask.cumsum(axis=-1) - 1
|
| 697 |
+
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
|
| 698 |
+
else:
|
| 699 |
+
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
|
| 700 |
+
|
| 701 |
+
return {
|
| 702 |
+
"past_key_values": past_key_values,
|
| 703 |
+
"attention_mask": extended_attention_mask,
|
| 704 |
+
"position_ids": position_ids,
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
def update_inputs_for_generation(self, model_outputs, model_kwargs):
|
| 708 |
+
model_kwargs["past_key_values"] = model_outputs.past_key_values
|
| 709 |
+
model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
|
| 710 |
+
return model_kwargs
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
append_call_sample_docstring(
|
| 714 |
+
FlaxGPTJForCausalLM,
|
| 715 |
+
_CHECKPOINT_FOR_DOC,
|
| 716 |
+
FlaxCausalLMOutput,
|
| 717 |
+
_CONFIG_FOR_DOC,
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
__all__ = ["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"]
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py
ADDED
|
@@ -0,0 +1,1407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch GPT-J model."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.fx
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 25 |
+
|
| 26 |
+
from ...activations import ACT2FN
|
| 27 |
+
from ...cache_utils import Cache, DynamicCache, StaticCache
|
| 28 |
+
from ...generation import GenerationMixin
|
| 29 |
+
from ...modeling_attn_mask_utils import AttentionMaskConverter
|
| 30 |
+
from ...modeling_outputs import (
|
| 31 |
+
BaseModelOutputWithPast,
|
| 32 |
+
CausalLMOutputWithPast,
|
| 33 |
+
QuestionAnsweringModelOutput,
|
| 34 |
+
SequenceClassifierOutputWithPast,
|
| 35 |
+
)
|
| 36 |
+
from ...modeling_utils import PreTrainedModel
|
| 37 |
+
from ...utils import (
|
| 38 |
+
add_code_sample_docstrings,
|
| 39 |
+
add_start_docstrings,
|
| 40 |
+
add_start_docstrings_to_model_forward,
|
| 41 |
+
is_flash_attn_2_available,
|
| 42 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 43 |
+
is_torch_fx_proxy,
|
| 44 |
+
logging,
|
| 45 |
+
)
|
| 46 |
+
from ...utils.model_parallel_utils import assert_device_map, get_device_map
|
| 47 |
+
from .configuration_gptj import GPTJConfig
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if is_flash_attn_2_available():
|
| 51 |
+
from ...modeling_flash_attention_utils import _flash_attention_forward
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
logger = logging.get_logger(__name__)
|
| 55 |
+
|
| 56 |
+
_CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
|
| 57 |
+
_REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
|
| 58 |
+
_CONFIG_FOR_DOC = "GPTJConfig"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
|
| 62 |
+
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
|
| 63 |
+
sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
|
| 64 |
+
return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@torch.fx.wrap
|
| 68 |
+
def get_embed_positions(embed_positions, position_ids):
|
| 69 |
+
return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
|
| 73 |
+
x1 = x[:, :, :, ::2]
|
| 74 |
+
x2 = x[:, :, :, 1::2]
|
| 75 |
+
x = torch.stack((-x2, x1), dim=-1)
|
| 76 |
+
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
|
| 80 |
+
sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
|
| 81 |
+
cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
|
| 82 |
+
return (tensor * cos) + (rotate_every_two(tensor) * sin)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class GPTJAttention(nn.Module):
|
| 86 |
+
def __init__(self, config, layer_idx=None):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.config = config
|
| 89 |
+
max_positions = config.max_position_embeddings
|
| 90 |
+
|
| 91 |
+
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
| 92 |
+
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
| 93 |
+
|
| 94 |
+
self.is_causal = True
|
| 95 |
+
self.layer_idx = layer_idx
|
| 96 |
+
if layer_idx is None:
|
| 97 |
+
logger.warning_once(
|
| 98 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 99 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 100 |
+
"when creating this class."
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
self.embed_dim = config.hidden_size
|
| 104 |
+
self.num_attention_heads = config.num_attention_heads
|
| 105 |
+
self.head_dim = self.embed_dim // self.num_attention_heads
|
| 106 |
+
if self.head_dim * self.num_attention_heads != self.embed_dim:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
|
| 109 |
+
f" `num_attention_heads`: {self.num_attention_heads})."
|
| 110 |
+
)
|
| 111 |
+
self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
|
| 112 |
+
|
| 113 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
|
| 114 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
|
| 115 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
|
| 116 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
|
| 117 |
+
self.rotary_dim = config.rotary_dim
|
| 118 |
+
pos_embd_dim = self.rotary_dim or self.embed_dim
|
| 119 |
+
self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
|
| 120 |
+
|
| 121 |
+
def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
|
| 122 |
+
"""
|
| 123 |
+
Splits hidden dim into attn_head_size and num_attention_heads
|
| 124 |
+
"""
|
| 125 |
+
new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
|
| 126 |
+
tensor = tensor.view(new_shape)
|
| 127 |
+
if rotary:
|
| 128 |
+
return tensor
|
| 129 |
+
if len(tensor.shape) == 5:
|
| 130 |
+
return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
|
| 131 |
+
elif len(tensor.shape) == 4:
|
| 132 |
+
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
|
| 133 |
+
else:
|
| 134 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
|
| 135 |
+
|
| 136 |
+
def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
|
| 137 |
+
"""
|
| 138 |
+
Merges attn_head_size dim and num_attn_heads dim into hidden dim
|
| 139 |
+
"""
|
| 140 |
+
if len(tensor.shape) == 5:
|
| 141 |
+
tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
|
| 142 |
+
elif len(tensor.shape) == 4:
|
| 143 |
+
tensor = tensor.permute(0, 2, 1, 3).contiguous()
|
| 144 |
+
else:
|
| 145 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
|
| 146 |
+
new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
|
| 147 |
+
return tensor.view(new_shape)
|
| 148 |
+
|
| 149 |
+
def _attn(
|
| 150 |
+
self,
|
| 151 |
+
query,
|
| 152 |
+
key,
|
| 153 |
+
value,
|
| 154 |
+
attention_mask=None,
|
| 155 |
+
head_mask=None,
|
| 156 |
+
):
|
| 157 |
+
# Keep the attention weights computation in fp32 to avoid overflow issues
|
| 158 |
+
query = query.to(torch.float32)
|
| 159 |
+
key = key.to(torch.float32)
|
| 160 |
+
|
| 161 |
+
attn_weights = torch.matmul(query, key.transpose(-1, -2))
|
| 162 |
+
attn_weights = attn_weights / self.scale_attn
|
| 163 |
+
|
| 164 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
| 165 |
+
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
|
| 166 |
+
attn_weights = attn_weights + causal_mask
|
| 167 |
+
|
| 168 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 169 |
+
attn_weights = attn_weights.to(value.dtype)
|
| 170 |
+
attn_weights = self.attn_dropout(attn_weights)
|
| 171 |
+
|
| 172 |
+
# Mask heads if we want to
|
| 173 |
+
if head_mask is not None:
|
| 174 |
+
attn_weights = attn_weights * head_mask
|
| 175 |
+
|
| 176 |
+
attn_output = torch.matmul(attn_weights, value)
|
| 177 |
+
|
| 178 |
+
return attn_output, attn_weights
|
| 179 |
+
|
| 180 |
+
def _get_embed_positions(self, position_ids):
|
| 181 |
+
embed_positions = self.embed_positions
|
| 182 |
+
if embed_positions.device != position_ids.device:
|
| 183 |
+
embed_positions = embed_positions.to(position_ids.device)
|
| 184 |
+
self.embed_positions = embed_positions
|
| 185 |
+
return embed_positions.repeat(position_ids.shape[0], 1, 1)
|
| 186 |
+
|
| 187 |
+
def forward(
|
| 188 |
+
self,
|
| 189 |
+
hidden_states: torch.FloatTensor,
|
| 190 |
+
layer_past: Optional[Cache] = None,
|
| 191 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 192 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 193 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 194 |
+
use_cache: Optional[bool] = False,
|
| 195 |
+
output_attentions: Optional[bool] = False,
|
| 196 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 197 |
+
) -> Union[
|
| 198 |
+
Tuple[torch.Tensor, Tuple[torch.Tensor]],
|
| 199 |
+
Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
|
| 200 |
+
]:
|
| 201 |
+
query = self.q_proj(hidden_states)
|
| 202 |
+
key = self.k_proj(hidden_states)
|
| 203 |
+
value = self.v_proj(hidden_states)
|
| 204 |
+
|
| 205 |
+
query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
|
| 206 |
+
key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
|
| 207 |
+
value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
|
| 208 |
+
|
| 209 |
+
if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
|
| 210 |
+
# The logic to conditionally copy to GPU could not be traced, so we do this
|
| 211 |
+
# every time in the torch.fx case
|
| 212 |
+
embed_positions = get_embed_positions(self.embed_positions, position_ids)
|
| 213 |
+
else:
|
| 214 |
+
embed_positions = self._get_embed_positions(position_ids)
|
| 215 |
+
|
| 216 |
+
repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
|
| 217 |
+
sincos = torch.gather(embed_positions, 1, repeated_position_ids)
|
| 218 |
+
sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
|
| 219 |
+
|
| 220 |
+
if self.rotary_dim is not None:
|
| 221 |
+
k_rot = key[:, :, :, : self.rotary_dim]
|
| 222 |
+
k_pass = key[:, :, :, self.rotary_dim :]
|
| 223 |
+
|
| 224 |
+
q_rot = query[:, :, :, : self.rotary_dim]
|
| 225 |
+
q_pass = query[:, :, :, self.rotary_dim :]
|
| 226 |
+
|
| 227 |
+
k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
|
| 228 |
+
q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
|
| 229 |
+
|
| 230 |
+
key = torch.cat([k_rot, k_pass], dim=-1)
|
| 231 |
+
query = torch.cat([q_rot, q_pass], dim=-1)
|
| 232 |
+
else:
|
| 233 |
+
key = apply_rotary_pos_emb(key, sin, cos)
|
| 234 |
+
query = apply_rotary_pos_emb(query, sin, cos)
|
| 235 |
+
|
| 236 |
+
key = key.permute(0, 2, 1, 3)
|
| 237 |
+
query = query.permute(0, 2, 1, 3)
|
| 238 |
+
|
| 239 |
+
if layer_past is not None:
|
| 240 |
+
cache_kwargs = {
|
| 241 |
+
"sin": sin,
|
| 242 |
+
"cos": cos,
|
| 243 |
+
"partial_rotation_size": self.rotary_dim,
|
| 244 |
+
"cache_position": cache_position,
|
| 245 |
+
}
|
| 246 |
+
key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs)
|
| 247 |
+
|
| 248 |
+
# compute self-attention: V x Softmax(QK^T)
|
| 249 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
| 250 |
+
|
| 251 |
+
attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
|
| 252 |
+
attn_output = self.out_proj(attn_output)
|
| 253 |
+
attn_output = self.resid_dropout(attn_output)
|
| 254 |
+
|
| 255 |
+
outputs = (attn_output, layer_past)
|
| 256 |
+
if output_attentions:
|
| 257 |
+
outputs += (attn_weights,)
|
| 258 |
+
|
| 259 |
+
return outputs # a, present, (attentions)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class GPTJFlashAttention2(GPTJAttention):
|
| 263 |
+
"""
|
| 264 |
+
GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays
|
| 265 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 266 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 267 |
+
"""
|
| 268 |
+
|
| 269 |
+
def __init__(self, *args, **kwargs):
|
| 270 |
+
super().__init__(*args, **kwargs)
|
| 271 |
+
|
| 272 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 273 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 274 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 275 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 276 |
+
|
| 277 |
+
def forward(
|
| 278 |
+
self,
|
| 279 |
+
hidden_states: torch.FloatTensor,
|
| 280 |
+
layer_past: Optional[Cache] = None,
|
| 281 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 282 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 283 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 284 |
+
use_cache: Optional[bool] = False,
|
| 285 |
+
output_attentions: Optional[bool] = False,
|
| 286 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 287 |
+
) -> Union[
|
| 288 |
+
Tuple[torch.Tensor, Tuple[torch.Tensor]],
|
| 289 |
+
Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
|
| 290 |
+
]:
|
| 291 |
+
query = self.q_proj(hidden_states)
|
| 292 |
+
key = self.k_proj(hidden_states)
|
| 293 |
+
value = self.v_proj(hidden_states)
|
| 294 |
+
|
| 295 |
+
query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
|
| 296 |
+
key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
|
| 297 |
+
value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
|
| 298 |
+
|
| 299 |
+
if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
|
| 300 |
+
# The logic to conditionally copy to GPU could not be traced, so we do this
|
| 301 |
+
# every time in the torch.fx case
|
| 302 |
+
embed_positions = get_embed_positions(self.embed_positions, position_ids)
|
| 303 |
+
else:
|
| 304 |
+
embed_positions = self._get_embed_positions(position_ids)
|
| 305 |
+
|
| 306 |
+
repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
|
| 307 |
+
sincos = torch.gather(embed_positions, 1, repeated_position_ids)
|
| 308 |
+
sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
|
| 309 |
+
|
| 310 |
+
if self.rotary_dim is not None:
|
| 311 |
+
k_rot = key[:, :, :, : self.rotary_dim]
|
| 312 |
+
k_pass = key[:, :, :, self.rotary_dim :]
|
| 313 |
+
|
| 314 |
+
q_rot = query[:, :, :, : self.rotary_dim]
|
| 315 |
+
q_pass = query[:, :, :, self.rotary_dim :]
|
| 316 |
+
|
| 317 |
+
k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
|
| 318 |
+
q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
|
| 319 |
+
|
| 320 |
+
key = torch.cat([k_rot, k_pass], dim=-1)
|
| 321 |
+
query = torch.cat([q_rot, q_pass], dim=-1)
|
| 322 |
+
else:
|
| 323 |
+
key = apply_rotary_pos_emb(key, sin, cos)
|
| 324 |
+
query = apply_rotary_pos_emb(query, sin, cos)
|
| 325 |
+
|
| 326 |
+
# tanspose to have the desired shape
|
| 327 |
+
# before transpose: batch_size x seq_length x num_attention_heads x head_dim
|
| 328 |
+
# after transpose: batch_size x num_attention_heads x seq_length x head_dim
|
| 329 |
+
key = key.permute(0, 2, 1, 3)
|
| 330 |
+
query = query.permute(0, 2, 1, 3)
|
| 331 |
+
# value: batch_size x num_attention_heads x seq_length x head_dim
|
| 332 |
+
|
| 333 |
+
if layer_past is not None:
|
| 334 |
+
cache_kwargs = {
|
| 335 |
+
"sin": sin,
|
| 336 |
+
"cos": cos,
|
| 337 |
+
"partial_rotation_size": self.rotary_dim,
|
| 338 |
+
"cache_position": cache_position,
|
| 339 |
+
}
|
| 340 |
+
key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs)
|
| 341 |
+
|
| 342 |
+
# The Flash attention requires the input to have the shape
|
| 343 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 344 |
+
# therefore we need to keep the original shape for query and key, and reshape value
|
| 345 |
+
# to have the correct shape.
|
| 346 |
+
key = key.permute(0, 2, 1, 3).contiguous()
|
| 347 |
+
query = query.permute(0, 2, 1, 3).contiguous()
|
| 348 |
+
value = value.permute(0, 2, 1, 3).contiguous()
|
| 349 |
+
|
| 350 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 351 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 352 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 353 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 354 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
| 355 |
+
|
| 356 |
+
input_dtype = query.dtype
|
| 357 |
+
if input_dtype == torch.float32:
|
| 358 |
+
if torch.is_autocast_enabled():
|
| 359 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 360 |
+
# Handle the case where the model is quantized
|
| 361 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 362 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 363 |
+
else:
|
| 364 |
+
target_dtype = self.q_proj.weight.dtype
|
| 365 |
+
|
| 366 |
+
logger.warning_once(
|
| 367 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 368 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 369 |
+
f" {target_dtype}."
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
query = query.to(target_dtype)
|
| 373 |
+
key = key.to(target_dtype)
|
| 374 |
+
value = value.to(target_dtype)
|
| 375 |
+
|
| 376 |
+
attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj
|
| 377 |
+
|
| 378 |
+
query_length = query.shape[1]
|
| 379 |
+
|
| 380 |
+
# Compute attention
|
| 381 |
+
attn_weights = _flash_attention_forward(
|
| 382 |
+
query,
|
| 383 |
+
key,
|
| 384 |
+
value,
|
| 385 |
+
attention_mask,
|
| 386 |
+
query_length,
|
| 387 |
+
dropout=attention_dropout,
|
| 388 |
+
is_causal=self.is_causal,
|
| 389 |
+
use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# Reshape outputs
|
| 393 |
+
attn_output = attn_weights.reshape(
|
| 394 |
+
attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]
|
| 395 |
+
)
|
| 396 |
+
attn_output = self.out_proj(attn_output)
|
| 397 |
+
attn_output = self.resid_dropout(attn_output)
|
| 398 |
+
|
| 399 |
+
outputs = (attn_output, layer_past)
|
| 400 |
+
if output_attentions:
|
| 401 |
+
outputs += (attn_weights,)
|
| 402 |
+
|
| 403 |
+
return outputs
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
GPTJ_ATTENTION_CLASSES = {
|
| 407 |
+
"eager": GPTJAttention,
|
| 408 |
+
"flash_attention_2": GPTJFlashAttention2,
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class GPTJMLP(nn.Module):
|
| 413 |
+
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
|
| 414 |
+
super().__init__()
|
| 415 |
+
embed_dim = config.n_embd
|
| 416 |
+
|
| 417 |
+
self.fc_in = nn.Linear(embed_dim, intermediate_size)
|
| 418 |
+
self.fc_out = nn.Linear(intermediate_size, embed_dim)
|
| 419 |
+
|
| 420 |
+
self.act = ACT2FN[config.activation_function]
|
| 421 |
+
self.dropout = nn.Dropout(config.resid_pdrop)
|
| 422 |
+
|
| 423 |
+
def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
|
| 424 |
+
hidden_states = self.fc_in(hidden_states)
|
| 425 |
+
hidden_states = self.act(hidden_states)
|
| 426 |
+
hidden_states = self.fc_out(hidden_states)
|
| 427 |
+
hidden_states = self.dropout(hidden_states)
|
| 428 |
+
return hidden_states
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class GPTJBlock(nn.Module):
|
| 432 |
+
def __init__(self, config, layer_idx=None):
|
| 433 |
+
super().__init__()
|
| 434 |
+
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
|
| 435 |
+
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| 436 |
+
self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 437 |
+
self.mlp = GPTJMLP(inner_dim, config)
|
| 438 |
+
|
| 439 |
+
def forward(
|
| 440 |
+
self,
|
| 441 |
+
hidden_states: Optional[torch.FloatTensor],
|
| 442 |
+
layer_past: Optional[Cache] = None,
|
| 443 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 444 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 445 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 446 |
+
use_cache: Optional[bool] = False,
|
| 447 |
+
output_attentions: Optional[bool] = False,
|
| 448 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 449 |
+
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
|
| 450 |
+
residual = hidden_states
|
| 451 |
+
hidden_states = self.ln_1(hidden_states)
|
| 452 |
+
attn_outputs = self.attn(
|
| 453 |
+
hidden_states=hidden_states,
|
| 454 |
+
layer_past=layer_past,
|
| 455 |
+
attention_mask=attention_mask,
|
| 456 |
+
position_ids=position_ids,
|
| 457 |
+
head_mask=head_mask,
|
| 458 |
+
use_cache=use_cache,
|
| 459 |
+
output_attentions=output_attentions,
|
| 460 |
+
cache_position=cache_position,
|
| 461 |
+
)
|
| 462 |
+
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
|
| 463 |
+
outputs = attn_outputs[1:]
|
| 464 |
+
|
| 465 |
+
feed_forward_hidden_states = self.mlp(hidden_states)
|
| 466 |
+
hidden_states = attn_output + feed_forward_hidden_states + residual
|
| 467 |
+
|
| 468 |
+
if use_cache:
|
| 469 |
+
outputs = (hidden_states,) + outputs
|
| 470 |
+
else:
|
| 471 |
+
outputs = (hidden_states,) + outputs[1:]
|
| 472 |
+
|
| 473 |
+
return outputs # hidden_states, present, (attentions)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
class GPTJPreTrainedModel(PreTrainedModel):
|
| 477 |
+
"""
|
| 478 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 479 |
+
models.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
config_class = GPTJConfig
|
| 483 |
+
base_model_prefix = "transformer"
|
| 484 |
+
is_parallelizable = True
|
| 485 |
+
supports_gradient_checkpointing = True
|
| 486 |
+
_no_split_modules = ["GPTJBlock"]
|
| 487 |
+
_skip_keys_device_placement = "past_key_values"
|
| 488 |
+
_supports_flash_attn_2 = True
|
| 489 |
+
_supports_cache_class = True
|
| 490 |
+
_supports_quantized_cache = True
|
| 491 |
+
_supports_static_cache = True
|
| 492 |
+
_supports_param_buffer_assignment = False
|
| 493 |
+
|
| 494 |
+
def __init__(self, *inputs, **kwargs):
|
| 495 |
+
super().__init__(*inputs, **kwargs)
|
| 496 |
+
|
| 497 |
+
def _init_weights(self, module):
|
| 498 |
+
"""Initialize the weights."""
|
| 499 |
+
if isinstance(module, (nn.Linear,)):
|
| 500 |
+
# Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
|
| 501 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 502 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 503 |
+
if module.bias is not None:
|
| 504 |
+
module.bias.data.zero_()
|
| 505 |
+
elif isinstance(module, nn.Embedding):
|
| 506 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 507 |
+
if module.padding_idx is not None:
|
| 508 |
+
module.weight.data[module.padding_idx].zero_()
|
| 509 |
+
elif isinstance(module, nn.LayerNorm):
|
| 510 |
+
module.bias.data.zero_()
|
| 511 |
+
module.weight.data.fill_(1.0)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
GPTJ_START_DOCSTRING = r"""
|
| 515 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 516 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 517 |
+
behavior.
|
| 518 |
+
|
| 519 |
+
Parameters:
|
| 520 |
+
config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
|
| 521 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 522 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 523 |
+
"""
|
| 524 |
+
|
| 525 |
+
GPTJ_INPUTS_DOCSTRING = r"""
|
| 526 |
+
Args:
|
| 527 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
| 528 |
+
Indices of input sequence tokens in the vocabulary.
|
| 529 |
+
|
| 530 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 531 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 532 |
+
|
| 533 |
+
[What are input IDs?](../glossary#input-ids)
|
| 534 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
| 535 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 536 |
+
|
| 537 |
+
- 1 for tokens that are **not masked**,
|
| 538 |
+
- 0 for tokens that are **masked**.
|
| 539 |
+
|
| 540 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 541 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 542 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 543 |
+
1]`:
|
| 544 |
+
|
| 545 |
+
- 0 corresponds to a *sentence A* token,
|
| 546 |
+
- 1 corresponds to a *sentence B* token.
|
| 547 |
+
|
| 548 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 549 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 550 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 551 |
+
config.n_positions - 1]`.
|
| 552 |
+
|
| 553 |
+
[What are position IDs?](../glossary#position-ids)
|
| 554 |
+
head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
|
| 555 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 556 |
+
|
| 557 |
+
- 1 indicates the head is **not masked**,
|
| 558 |
+
- 0 indicates the head is **masked**.
|
| 559 |
+
|
| 560 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
|
| 561 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 562 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
| 563 |
+
model's internal embedding lookup matrix.
|
| 564 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 565 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 566 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 567 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 568 |
+
|
| 569 |
+
Two formats are allowed:
|
| 570 |
+
- a [`~cache_utils.Cache`] instance, see our
|
| 571 |
+
[kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
|
| 572 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 573 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 574 |
+
cache format.
|
| 575 |
+
|
| 576 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 577 |
+
legacy cache format will be returned.
|
| 578 |
+
|
| 579 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 580 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 581 |
+
of shape `(batch_size, sequence_length)`.
|
| 582 |
+
output_attentions (`bool`, *optional*):
|
| 583 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 584 |
+
tensors for more detail.
|
| 585 |
+
output_hidden_states (`bool`, *optional*):
|
| 586 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 587 |
+
more detail.
|
| 588 |
+
return_dict (`bool`, *optional*):
|
| 589 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 590 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 591 |
+
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
| 592 |
+
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
| 593 |
+
the complete sequence length.
|
| 594 |
+
"""
|
| 595 |
+
|
| 596 |
+
PARALLELIZE_DOCSTRING = r"""
|
| 597 |
+
This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
|
| 598 |
+
attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
|
| 599 |
+
across all devices.
|
| 600 |
+
|
| 601 |
+
Args:
|
| 602 |
+
device_map (`Dict[int, list]`, *optional*):
|
| 603 |
+
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
|
| 604 |
+
automatically mapped to the first device (for esoteric reasons). That means that the first device should
|
| 605 |
+
have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
|
| 606 |
+
following number of attention modules:
|
| 607 |
+
|
| 608 |
+
- gpt-j-6B: 28
|
| 609 |
+
|
| 610 |
+
Example:
|
| 611 |
+
|
| 612 |
+
```python
|
| 613 |
+
# Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
|
| 614 |
+
model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
|
| 615 |
+
device_map = {
|
| 616 |
+
0: [0, 1, 2, 3, 4, 5, 6],
|
| 617 |
+
1: [7, 8, 9, 10, 11, 12, 13],
|
| 618 |
+
2: [14, 15, 16, 17, 18, 19, 20],
|
| 619 |
+
3: [21, 22, 23, 24, 25, 26, 27],
|
| 620 |
+
}
|
| 621 |
+
model.parallelize(device_map)
|
| 622 |
+
```
|
| 623 |
+
"""
|
| 624 |
+
|
| 625 |
+
DEPARALLELIZE_DOCSTRING = r"""
|
| 626 |
+
Moves the model to CPU from a model parallel state.
|
| 627 |
+
|
| 628 |
+
Example:
|
| 629 |
+
|
| 630 |
+
```python
|
| 631 |
+
# On a 4 GPU machine with gpt-j-6B:
|
| 632 |
+
model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
|
| 633 |
+
device_map = {
|
| 634 |
+
0: [0, 1, 2, 3, 4, 5, 6],
|
| 635 |
+
1: [7, 8, 9, 10, 11, 12, 13],
|
| 636 |
+
2: [14, 15, 16, 17, 18, 19, 20],
|
| 637 |
+
3: [21, 22, 23, 24, 25, 26, 27],
|
| 638 |
+
}
|
| 639 |
+
model.parallelize(device_map) # Splits the model across several devices
|
| 640 |
+
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
|
| 641 |
+
```
|
| 642 |
+
"""
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
@add_start_docstrings(
|
| 646 |
+
"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
|
| 647 |
+
GPTJ_START_DOCSTRING,
|
| 648 |
+
)
|
| 649 |
+
class GPTJModel(GPTJPreTrainedModel):
|
| 650 |
+
def __init__(self, config):
|
| 651 |
+
super().__init__(config)
|
| 652 |
+
|
| 653 |
+
self.embed_dim = config.n_embd
|
| 654 |
+
self.vocab_size = config.vocab_size
|
| 655 |
+
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
|
| 656 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
| 657 |
+
self.h = nn.ModuleList([GPTJBlock(config, layer_idx=i) for i in range(config.n_layer)])
|
| 658 |
+
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
|
| 659 |
+
|
| 660 |
+
# Model parallel
|
| 661 |
+
self.model_parallel = False
|
| 662 |
+
self.device_map = None
|
| 663 |
+
self.gradient_checkpointing = False
|
| 664 |
+
|
| 665 |
+
# Initialize weights and apply final processing
|
| 666 |
+
self.post_init()
|
| 667 |
+
|
| 668 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 669 |
+
|
| 670 |
+
@add_start_docstrings(PARALLELIZE_DOCSTRING)
|
| 671 |
+
def parallelize(self, device_map=None):
|
| 672 |
+
warnings.warn(
|
| 673 |
+
"`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
|
| 674 |
+
" model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
|
| 675 |
+
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
|
| 676 |
+
" ...}",
|
| 677 |
+
FutureWarning,
|
| 678 |
+
)
|
| 679 |
+
# Check validity of device_map
|
| 680 |
+
self.device_map = (
|
| 681 |
+
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
|
| 682 |
+
)
|
| 683 |
+
assert_device_map(self.device_map, len(self.h))
|
| 684 |
+
self.model_parallel = True
|
| 685 |
+
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
|
| 686 |
+
self.last_device = "cuda:" + str(max(self.device_map.keys()))
|
| 687 |
+
self.wte = self.wte.to(self.first_device)
|
| 688 |
+
# Load onto devices
|
| 689 |
+
for k, v in self.device_map.items():
|
| 690 |
+
for block in v:
|
| 691 |
+
cuda_device = "cuda:" + str(k)
|
| 692 |
+
self.h[block] = self.h[block].to(cuda_device)
|
| 693 |
+
# ln_f to last
|
| 694 |
+
self.ln_f = self.ln_f.to(self.last_device)
|
| 695 |
+
|
| 696 |
+
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
|
| 697 |
+
def deparallelize(self):
|
| 698 |
+
warnings.warn(
|
| 699 |
+
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
|
| 700 |
+
FutureWarning,
|
| 701 |
+
)
|
| 702 |
+
self.model_parallel = False
|
| 703 |
+
self.device_map = None
|
| 704 |
+
self.first_device = "cpu"
|
| 705 |
+
self.last_device = "cpu"
|
| 706 |
+
self.wte = self.wte.to("cpu")
|
| 707 |
+
for index in range(len(self.h)):
|
| 708 |
+
self.h[index] = self.h[index].to("cpu")
|
| 709 |
+
self.ln_f = self.ln_f.to("cpu")
|
| 710 |
+
torch.cuda.empty_cache()
|
| 711 |
+
|
| 712 |
+
def get_input_embeddings(self):
|
| 713 |
+
return self.wte
|
| 714 |
+
|
| 715 |
+
def set_input_embeddings(self, new_embeddings):
|
| 716 |
+
self.wte = new_embeddings
|
| 717 |
+
|
| 718 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 719 |
+
@add_code_sample_docstrings(
|
| 720 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 721 |
+
output_type=BaseModelOutputWithPast,
|
| 722 |
+
config_class=_CONFIG_FOR_DOC,
|
| 723 |
+
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
|
| 724 |
+
)
|
| 725 |
+
def forward(
|
| 726 |
+
self,
|
| 727 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 728 |
+
past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None,
|
| 729 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 730 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 731 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 732 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 733 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 734 |
+
use_cache: Optional[bool] = None,
|
| 735 |
+
output_attentions: Optional[bool] = None,
|
| 736 |
+
output_hidden_states: Optional[bool] = None,
|
| 737 |
+
return_dict: Optional[bool] = None,
|
| 738 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 739 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 740 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 741 |
+
output_hidden_states = (
|
| 742 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 743 |
+
)
|
| 744 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 745 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 746 |
+
|
| 747 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 748 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 749 |
+
|
| 750 |
+
if self.gradient_checkpointing and self.training:
|
| 751 |
+
if use_cache:
|
| 752 |
+
logger.warning_once(
|
| 753 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 754 |
+
)
|
| 755 |
+
use_cache = False
|
| 756 |
+
|
| 757 |
+
if inputs_embeds is None:
|
| 758 |
+
inputs_embeds = self.wte(input_ids)
|
| 759 |
+
|
| 760 |
+
# kept for BC (non `Cache` `past_key_values` inputs)
|
| 761 |
+
return_legacy_cache = False
|
| 762 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 763 |
+
return_legacy_cache = True
|
| 764 |
+
if past_key_values is None:
|
| 765 |
+
past_key_values = DynamicCache()
|
| 766 |
+
else:
|
| 767 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 768 |
+
logger.warning_once(
|
| 769 |
+
"We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
|
| 770 |
+
"will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
|
| 771 |
+
"(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
seq_length = inputs_embeds.shape[1]
|
| 775 |
+
if cache_position is None:
|
| 776 |
+
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 777 |
+
cache_position = torch.arange(
|
| 778 |
+
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
if position_ids is None:
|
| 782 |
+
position_ids = cache_position.unsqueeze(0)
|
| 783 |
+
|
| 784 |
+
causal_mask = self._update_causal_mask(
|
| 785 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
# Prepare head mask if needed
|
| 789 |
+
# 1.0 in head_mask indicate we keep the head
|
| 790 |
+
# attention_probs has shape bsz x num_attention_heads x N x N
|
| 791 |
+
# head_mask has shape n_layer x batch x num_attention_heads x N x N
|
| 792 |
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
| 793 |
+
hidden_states = inputs_embeds
|
| 794 |
+
|
| 795 |
+
if token_type_ids is not None:
|
| 796 |
+
token_type_ids = token_type_ids.view(-1, seq_length)
|
| 797 |
+
token_type_embeds = self.wte(token_type_ids)
|
| 798 |
+
hidden_states = hidden_states + token_type_embeds
|
| 799 |
+
|
| 800 |
+
hidden_states = self.drop(hidden_states)
|
| 801 |
+
output_shape = (-1, seq_length, hidden_states.size(-1))
|
| 802 |
+
|
| 803 |
+
next_decoder_cache = None
|
| 804 |
+
all_self_attentions = () if output_attentions else None
|
| 805 |
+
all_hidden_states = () if output_hidden_states else None
|
| 806 |
+
for i, block in enumerate(self.h):
|
| 807 |
+
# Model parallel
|
| 808 |
+
if self.model_parallel:
|
| 809 |
+
torch.cuda.set_device(hidden_states.device)
|
| 810 |
+
|
| 811 |
+
# Ensure layer_past is on same device as hidden_states (might not be correct)
|
| 812 |
+
if past_key_values is not None:
|
| 813 |
+
past_key_values.key_cache = past_key_values.key_cache.to(hidden_states.device)
|
| 814 |
+
past_key_values.value_cache = past_key_values.value_cache.to(hidden_states.device)
|
| 815 |
+
|
| 816 |
+
# Ensure that attention_mask is always on the same device as hidden_states
|
| 817 |
+
if causal_mask is not None:
|
| 818 |
+
causal_mask = causal_mask.to(hidden_states.device)
|
| 819 |
+
if isinstance(head_mask, torch.Tensor):
|
| 820 |
+
head_mask = head_mask.to(hidden_states.device)
|
| 821 |
+
if output_hidden_states:
|
| 822 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 823 |
+
|
| 824 |
+
if self.gradient_checkpointing and self.training:
|
| 825 |
+
outputs = self._gradient_checkpointing_func(
|
| 826 |
+
block.__call__,
|
| 827 |
+
hidden_states,
|
| 828 |
+
None,
|
| 829 |
+
causal_mask,
|
| 830 |
+
position_ids,
|
| 831 |
+
head_mask[i],
|
| 832 |
+
use_cache,
|
| 833 |
+
output_attentions,
|
| 834 |
+
cache_position,
|
| 835 |
+
)
|
| 836 |
+
else:
|
| 837 |
+
outputs = block(
|
| 838 |
+
hidden_states=hidden_states,
|
| 839 |
+
layer_past=past_key_values,
|
| 840 |
+
attention_mask=causal_mask,
|
| 841 |
+
position_ids=position_ids,
|
| 842 |
+
head_mask=head_mask[i],
|
| 843 |
+
use_cache=use_cache,
|
| 844 |
+
output_attentions=output_attentions,
|
| 845 |
+
cache_position=cache_position,
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
hidden_states = outputs[0]
|
| 849 |
+
if use_cache is True:
|
| 850 |
+
next_decoder_cache = outputs[1]
|
| 851 |
+
|
| 852 |
+
if output_attentions:
|
| 853 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
| 854 |
+
|
| 855 |
+
# Model Parallel: If it's the last layer for that device, put things on the next device
|
| 856 |
+
if self.model_parallel:
|
| 857 |
+
for k, v in self.device_map.items():
|
| 858 |
+
if i == v[-1] and "cuda:" + str(k) != self.last_device:
|
| 859 |
+
hidden_states = hidden_states.to("cuda:" + str(k + 1))
|
| 860 |
+
|
| 861 |
+
hidden_states = self.ln_f(hidden_states)
|
| 862 |
+
|
| 863 |
+
hidden_states = hidden_states.view(output_shape)
|
| 864 |
+
# Add last hidden state
|
| 865 |
+
if output_hidden_states:
|
| 866 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 867 |
+
|
| 868 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 869 |
+
if return_legacy_cache:
|
| 870 |
+
next_cache = next_cache.to_legacy_cache()
|
| 871 |
+
|
| 872 |
+
if not return_dict:
|
| 873 |
+
return tuple(
|
| 874 |
+
v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
return BaseModelOutputWithPast(
|
| 878 |
+
last_hidden_state=hidden_states,
|
| 879 |
+
past_key_values=next_cache,
|
| 880 |
+
hidden_states=all_hidden_states,
|
| 881 |
+
attentions=all_self_attentions,
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
|
| 885 |
+
def _update_causal_mask(
|
| 886 |
+
self,
|
| 887 |
+
attention_mask: torch.Tensor,
|
| 888 |
+
input_tensor: torch.Tensor,
|
| 889 |
+
cache_position: torch.Tensor,
|
| 890 |
+
past_key_values: Cache,
|
| 891 |
+
output_attentions: bool,
|
| 892 |
+
):
|
| 893 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 894 |
+
if attention_mask is not None and (attention_mask == 0.0).any():
|
| 895 |
+
return attention_mask
|
| 896 |
+
return None
|
| 897 |
+
|
| 898 |
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
| 899 |
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
| 900 |
+
# to infer the attention mask.
|
| 901 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 902 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
| 903 |
+
|
| 904 |
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
| 905 |
+
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
|
| 906 |
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
| 907 |
+
attention_mask,
|
| 908 |
+
inputs_embeds=input_tensor,
|
| 909 |
+
past_key_values_length=past_seen_tokens,
|
| 910 |
+
is_training=self.training,
|
| 911 |
+
):
|
| 912 |
+
return None
|
| 913 |
+
|
| 914 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
| 915 |
+
sequence_length = input_tensor.shape[1]
|
| 916 |
+
if using_static_cache:
|
| 917 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 918 |
+
else:
|
| 919 |
+
target_length = (
|
| 920 |
+
attention_mask.shape[-1]
|
| 921 |
+
if isinstance(attention_mask, torch.Tensor)
|
| 922 |
+
else past_seen_tokens + sequence_length + 1
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
| 926 |
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
| 927 |
+
attention_mask,
|
| 928 |
+
sequence_length=sequence_length,
|
| 929 |
+
target_length=target_length,
|
| 930 |
+
dtype=dtype,
|
| 931 |
+
device=device,
|
| 932 |
+
cache_position=cache_position,
|
| 933 |
+
batch_size=input_tensor.shape[0],
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
if (
|
| 937 |
+
self.config._attn_implementation == "sdpa"
|
| 938 |
+
and attention_mask is not None
|
| 939 |
+
and attention_mask.device.type == "cuda"
|
| 940 |
+
and not output_attentions
|
| 941 |
+
):
|
| 942 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
| 943 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
| 944 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
| 945 |
+
min_dtype = torch.finfo(dtype).min
|
| 946 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
| 947 |
+
|
| 948 |
+
return causal_mask
|
| 949 |
+
|
| 950 |
+
@staticmethod
|
| 951 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
|
| 952 |
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
| 953 |
+
attention_mask: torch.Tensor,
|
| 954 |
+
sequence_length: int,
|
| 955 |
+
target_length: int,
|
| 956 |
+
dtype: torch.dtype,
|
| 957 |
+
device: torch.device,
|
| 958 |
+
cache_position: torch.Tensor,
|
| 959 |
+
batch_size: int,
|
| 960 |
+
**kwargs,
|
| 961 |
+
):
|
| 962 |
+
"""
|
| 963 |
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
| 964 |
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
| 965 |
+
|
| 966 |
+
Args:
|
| 967 |
+
attention_mask (`torch.Tensor`):
|
| 968 |
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
|
| 969 |
+
`(batch_size, 1, query_length, key_value_length)`.
|
| 970 |
+
sequence_length (`int`):
|
| 971 |
+
The sequence length being processed.
|
| 972 |
+
target_length (`int`):
|
| 973 |
+
The target length: when generating with static cache, the mask should be as long as the static cache,
|
| 974 |
+
to account for the 0 padding, the part of the cache that is not filled yet.
|
| 975 |
+
dtype (`torch.dtype`):
|
| 976 |
+
The dtype to use for the 4D attention mask.
|
| 977 |
+
device (`torch.device`):
|
| 978 |
+
The device to plcae the 4D attention mask on.
|
| 979 |
+
cache_position (`torch.Tensor`):
|
| 980 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 981 |
+
batch_size (`torch.Tensor`):
|
| 982 |
+
Batch size.
|
| 983 |
+
"""
|
| 984 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
| 985 |
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
| 986 |
+
causal_mask = attention_mask
|
| 987 |
+
else:
|
| 988 |
+
min_dtype = torch.finfo(dtype).min
|
| 989 |
+
causal_mask = torch.full(
|
| 990 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
| 991 |
+
)
|
| 992 |
+
if sequence_length != 1:
|
| 993 |
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
| 994 |
+
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
| 995 |
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
| 996 |
+
if attention_mask is not None:
|
| 997 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 998 |
+
mask_length = attention_mask.shape[-1]
|
| 999 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
| 1000 |
+
padding_mask = padding_mask == 0
|
| 1001 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
| 1002 |
+
padding_mask, min_dtype
|
| 1003 |
+
)
|
| 1004 |
+
|
| 1005 |
+
return causal_mask
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
@add_start_docstrings(
|
| 1009 |
+
"""
|
| 1010 |
+
The GPT-J Model transformer with a language modeling head on top.
|
| 1011 |
+
""",
|
| 1012 |
+
GPTJ_START_DOCSTRING,
|
| 1013 |
+
)
|
| 1014 |
+
class GPTJForCausalLM(GPTJPreTrainedModel, GenerationMixin):
|
| 1015 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1016 |
+
|
| 1017 |
+
def __init__(self, config):
|
| 1018 |
+
super().__init__(config)
|
| 1019 |
+
self.transformer = GPTJModel(config)
|
| 1020 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
|
| 1021 |
+
|
| 1022 |
+
# Model parallel
|
| 1023 |
+
self.model_parallel = False
|
| 1024 |
+
self.device_map = None
|
| 1025 |
+
|
| 1026 |
+
# Initialize weights and apply final processing
|
| 1027 |
+
self.post_init()
|
| 1028 |
+
|
| 1029 |
+
@add_start_docstrings(PARALLELIZE_DOCSTRING)
|
| 1030 |
+
def parallelize(self, device_map=None):
|
| 1031 |
+
warnings.warn(
|
| 1032 |
+
"`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
|
| 1033 |
+
" your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
|
| 1034 |
+
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
|
| 1035 |
+
" 0, 'transformer.h.1': 1, ...}",
|
| 1036 |
+
FutureWarning,
|
| 1037 |
+
)
|
| 1038 |
+
self.device_map = (
|
| 1039 |
+
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
|
| 1040 |
+
if device_map is None
|
| 1041 |
+
else device_map
|
| 1042 |
+
)
|
| 1043 |
+
assert_device_map(self.device_map, len(self.transformer.h))
|
| 1044 |
+
self.transformer.parallelize(self.device_map)
|
| 1045 |
+
self.lm_head = self.lm_head.to(self.transformer.first_device)
|
| 1046 |
+
self.model_parallel = True
|
| 1047 |
+
|
| 1048 |
+
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
|
| 1049 |
+
def deparallelize(self):
|
| 1050 |
+
warnings.warn(
|
| 1051 |
+
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
|
| 1052 |
+
FutureWarning,
|
| 1053 |
+
)
|
| 1054 |
+
self.transformer.deparallelize()
|
| 1055 |
+
self.transformer = self.transformer.to("cpu")
|
| 1056 |
+
self.lm_head = self.lm_head.to("cpu")
|
| 1057 |
+
self.model_parallel = False
|
| 1058 |
+
torch.cuda.empty_cache()
|
| 1059 |
+
|
| 1060 |
+
def get_output_embeddings(self):
|
| 1061 |
+
return self.lm_head
|
| 1062 |
+
|
| 1063 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1064 |
+
self.lm_head = new_embeddings
|
| 1065 |
+
|
| 1066 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1067 |
+
@add_code_sample_docstrings(
|
| 1068 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1069 |
+
output_type=CausalLMOutputWithPast,
|
| 1070 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1071 |
+
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
|
| 1072 |
+
)
|
| 1073 |
+
def forward(
|
| 1074 |
+
self,
|
| 1075 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1076 |
+
past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None,
|
| 1077 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1078 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1079 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1080 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1081 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1082 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1083 |
+
use_cache: Optional[bool] = None,
|
| 1084 |
+
output_attentions: Optional[bool] = None,
|
| 1085 |
+
output_hidden_states: Optional[bool] = None,
|
| 1086 |
+
return_dict: Optional[bool] = None,
|
| 1087 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1088 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1089 |
+
r"""
|
| 1090 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1091 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 1092 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 1093 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 1094 |
+
"""
|
| 1095 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1096 |
+
|
| 1097 |
+
transformer_outputs = self.transformer(
|
| 1098 |
+
input_ids,
|
| 1099 |
+
past_key_values=past_key_values,
|
| 1100 |
+
attention_mask=attention_mask,
|
| 1101 |
+
token_type_ids=token_type_ids,
|
| 1102 |
+
position_ids=position_ids,
|
| 1103 |
+
head_mask=head_mask,
|
| 1104 |
+
inputs_embeds=inputs_embeds,
|
| 1105 |
+
use_cache=use_cache,
|
| 1106 |
+
output_attentions=output_attentions,
|
| 1107 |
+
output_hidden_states=output_hidden_states,
|
| 1108 |
+
return_dict=return_dict,
|
| 1109 |
+
cache_position=cache_position,
|
| 1110 |
+
)
|
| 1111 |
+
hidden_states = transformer_outputs[0]
|
| 1112 |
+
|
| 1113 |
+
# Set device for model parallelism
|
| 1114 |
+
if self.model_parallel:
|
| 1115 |
+
torch.cuda.set_device(self.transformer.first_device)
|
| 1116 |
+
hidden_states = hidden_states.to(self.lm_head.weight.device)
|
| 1117 |
+
|
| 1118 |
+
# make sure sampling in fp16 works correctly and
|
| 1119 |
+
# compute loss in fp32 to match with mesh-tf version
|
| 1120 |
+
# https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
|
| 1121 |
+
lm_logits = self.lm_head(hidden_states).to(torch.float32)
|
| 1122 |
+
|
| 1123 |
+
loss = None
|
| 1124 |
+
if labels is not None:
|
| 1125 |
+
# move labels to correct device to enable model parallelism
|
| 1126 |
+
labels = labels.to(lm_logits.device)
|
| 1127 |
+
# Shift so that tokens < n predict n
|
| 1128 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
| 1129 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1130 |
+
# Flatten the tokens
|
| 1131 |
+
loss_fct = CrossEntropyLoss()
|
| 1132 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 1133 |
+
|
| 1134 |
+
loss = loss.to(hidden_states.dtype)
|
| 1135 |
+
|
| 1136 |
+
if not return_dict:
|
| 1137 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
| 1138 |
+
return ((loss,) + output) if loss is not None else output
|
| 1139 |
+
|
| 1140 |
+
return CausalLMOutputWithPast(
|
| 1141 |
+
loss=loss,
|
| 1142 |
+
logits=lm_logits,
|
| 1143 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1144 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1145 |
+
attentions=transformer_outputs.attentions,
|
| 1146 |
+
)
|
| 1147 |
+
|
| 1148 |
+
@staticmethod
|
| 1149 |
+
def _reorder_cache(
|
| 1150 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
| 1151 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
| 1152 |
+
"""
|
| 1153 |
+
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
|
| 1154 |
+
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
| 1155 |
+
beam_idx at every generation step.
|
| 1156 |
+
"""
|
| 1157 |
+
return tuple(
|
| 1158 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
| 1159 |
+
for layer_past in past_key_values
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
|
| 1163 |
+
@add_start_docstrings(
|
| 1164 |
+
"""
|
| 1165 |
+
The GPT-J Model transformer with a sequence classification head on top (linear layer).
|
| 1166 |
+
|
| 1167 |
+
[`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1168 |
+
(e.g. GPT, GPT-2, GPT-Neo) do.
|
| 1169 |
+
|
| 1170 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1171 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1172 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1173 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1174 |
+
each row of the batch).
|
| 1175 |
+
""",
|
| 1176 |
+
GPTJ_START_DOCSTRING,
|
| 1177 |
+
)
|
| 1178 |
+
class GPTJForSequenceClassification(GPTJPreTrainedModel):
|
| 1179 |
+
def __init__(self, config):
|
| 1180 |
+
super().__init__(config)
|
| 1181 |
+
self.num_labels = config.num_labels
|
| 1182 |
+
self.transformer = GPTJModel(config)
|
| 1183 |
+
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
| 1184 |
+
|
| 1185 |
+
# Model parallel
|
| 1186 |
+
self.model_parallel = False
|
| 1187 |
+
self.device_map = None
|
| 1188 |
+
|
| 1189 |
+
# Initialize weights and apply final processing
|
| 1190 |
+
self.post_init()
|
| 1191 |
+
|
| 1192 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1193 |
+
@add_code_sample_docstrings(
|
| 1194 |
+
checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
|
| 1195 |
+
output_type=SequenceClassifierOutputWithPast,
|
| 1196 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1197 |
+
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
|
| 1198 |
+
)
|
| 1199 |
+
def forward(
|
| 1200 |
+
self,
|
| 1201 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1202 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
| 1203 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1204 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1205 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1206 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1207 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1208 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1209 |
+
use_cache: Optional[bool] = None,
|
| 1210 |
+
output_attentions: Optional[bool] = None,
|
| 1211 |
+
output_hidden_states: Optional[bool] = None,
|
| 1212 |
+
return_dict: Optional[bool] = None,
|
| 1213 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1214 |
+
r"""
|
| 1215 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1216 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1217 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1218 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1219 |
+
"""
|
| 1220 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1221 |
+
|
| 1222 |
+
transformer_outputs = self.transformer(
|
| 1223 |
+
input_ids,
|
| 1224 |
+
past_key_values=past_key_values,
|
| 1225 |
+
attention_mask=attention_mask,
|
| 1226 |
+
token_type_ids=token_type_ids,
|
| 1227 |
+
position_ids=position_ids,
|
| 1228 |
+
head_mask=head_mask,
|
| 1229 |
+
inputs_embeds=inputs_embeds,
|
| 1230 |
+
use_cache=use_cache,
|
| 1231 |
+
output_attentions=output_attentions,
|
| 1232 |
+
output_hidden_states=output_hidden_states,
|
| 1233 |
+
return_dict=return_dict,
|
| 1234 |
+
)
|
| 1235 |
+
hidden_states = transformer_outputs[0]
|
| 1236 |
+
logits = self.score(hidden_states)
|
| 1237 |
+
|
| 1238 |
+
if input_ids is not None:
|
| 1239 |
+
batch_size = input_ids.shape[0]
|
| 1240 |
+
else:
|
| 1241 |
+
batch_size = inputs_embeds.shape[0]
|
| 1242 |
+
|
| 1243 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1244 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1245 |
+
if self.config.pad_token_id is None:
|
| 1246 |
+
sequence_lengths = -1
|
| 1247 |
+
else:
|
| 1248 |
+
if input_ids is not None:
|
| 1249 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1250 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1251 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1252 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1253 |
+
else:
|
| 1254 |
+
sequence_lengths = -1
|
| 1255 |
+
logger.warning_once(
|
| 1256 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 1257 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 1258 |
+
)
|
| 1259 |
+
|
| 1260 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1261 |
+
|
| 1262 |
+
loss = None
|
| 1263 |
+
if labels is not None:
|
| 1264 |
+
labels = labels.to(pooled_logits.device)
|
| 1265 |
+
if self.config.problem_type is None:
|
| 1266 |
+
if self.num_labels == 1:
|
| 1267 |
+
self.config.problem_type = "regression"
|
| 1268 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1269 |
+
self.config.problem_type = "single_label_classification"
|
| 1270 |
+
else:
|
| 1271 |
+
self.config.problem_type = "multi_label_classification"
|
| 1272 |
+
|
| 1273 |
+
if self.config.problem_type == "regression":
|
| 1274 |
+
loss_fct = MSELoss()
|
| 1275 |
+
if self.num_labels == 1:
|
| 1276 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1277 |
+
else:
|
| 1278 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1279 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1280 |
+
loss_fct = CrossEntropyLoss()
|
| 1281 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1282 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1283 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1284 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1285 |
+
if not return_dict:
|
| 1286 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1287 |
+
return ((loss,) + output) if loss is not None else output
|
| 1288 |
+
|
| 1289 |
+
return SequenceClassifierOutputWithPast(
|
| 1290 |
+
loss=loss,
|
| 1291 |
+
logits=pooled_logits,
|
| 1292 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1293 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1294 |
+
attentions=transformer_outputs.attentions,
|
| 1295 |
+
)
|
| 1296 |
+
|
| 1297 |
+
|
| 1298 |
+
@add_start_docstrings(
|
| 1299 |
+
"""
|
| 1300 |
+
The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
|
| 1301 |
+
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1302 |
+
""",
|
| 1303 |
+
GPTJ_START_DOCSTRING,
|
| 1304 |
+
)
|
| 1305 |
+
class GPTJForQuestionAnswering(GPTJPreTrainedModel):
|
| 1306 |
+
def __init__(self, config):
|
| 1307 |
+
super().__init__(config)
|
| 1308 |
+
self.num_labels = config.num_labels
|
| 1309 |
+
self.transformer = GPTJModel(config)
|
| 1310 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
| 1311 |
+
|
| 1312 |
+
# Model parallel
|
| 1313 |
+
self.model_parallel = False
|
| 1314 |
+
self.device_map = None
|
| 1315 |
+
|
| 1316 |
+
# Initialize weights and apply final processing
|
| 1317 |
+
self.post_init()
|
| 1318 |
+
|
| 1319 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1320 |
+
@add_code_sample_docstrings(
|
| 1321 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1322 |
+
output_type=QuestionAnsweringModelOutput,
|
| 1323 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1324 |
+
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
|
| 1325 |
+
)
|
| 1326 |
+
def forward(
|
| 1327 |
+
self,
|
| 1328 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1329 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1330 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1331 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1332 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1333 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1334 |
+
start_positions: Optional[torch.LongTensor] = None,
|
| 1335 |
+
end_positions: Optional[torch.LongTensor] = None,
|
| 1336 |
+
output_attentions: Optional[bool] = None,
|
| 1337 |
+
output_hidden_states: Optional[bool] = None,
|
| 1338 |
+
return_dict: Optional[bool] = None,
|
| 1339 |
+
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
| 1340 |
+
r"""
|
| 1341 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1342 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1343 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1344 |
+
are not taken into account for computing the loss.
|
| 1345 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1346 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1347 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1348 |
+
are not taken into account for computing the loss.
|
| 1349 |
+
"""
|
| 1350 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1351 |
+
|
| 1352 |
+
outputs = self.transformer(
|
| 1353 |
+
input_ids,
|
| 1354 |
+
attention_mask=attention_mask,
|
| 1355 |
+
token_type_ids=token_type_ids,
|
| 1356 |
+
position_ids=position_ids,
|
| 1357 |
+
head_mask=head_mask,
|
| 1358 |
+
inputs_embeds=inputs_embeds,
|
| 1359 |
+
output_attentions=output_attentions,
|
| 1360 |
+
output_hidden_states=output_hidden_states,
|
| 1361 |
+
return_dict=return_dict,
|
| 1362 |
+
)
|
| 1363 |
+
|
| 1364 |
+
sequence_output = outputs[0]
|
| 1365 |
+
|
| 1366 |
+
logits = self.qa_outputs(sequence_output)
|
| 1367 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 1368 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 1369 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 1370 |
+
|
| 1371 |
+
total_loss = None
|
| 1372 |
+
if start_positions is not None and end_positions is not None:
|
| 1373 |
+
# If we are on multi-GPU, split add a dimension
|
| 1374 |
+
if len(start_positions.size()) > 1:
|
| 1375 |
+
start_positions = start_positions.squeeze(-1).to(start_logits.device)
|
| 1376 |
+
if len(end_positions.size()) > 1:
|
| 1377 |
+
end_positions = end_positions.squeeze(-1).to(end_logits.device)
|
| 1378 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 1379 |
+
ignored_index = start_logits.size(1)
|
| 1380 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 1381 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 1382 |
+
|
| 1383 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
| 1384 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 1385 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 1386 |
+
total_loss = (start_loss + end_loss) / 2
|
| 1387 |
+
|
| 1388 |
+
if not return_dict:
|
| 1389 |
+
output = (start_logits, end_logits) + outputs[2:]
|
| 1390 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
| 1391 |
+
|
| 1392 |
+
return QuestionAnsweringModelOutput(
|
| 1393 |
+
loss=total_loss,
|
| 1394 |
+
start_logits=start_logits,
|
| 1395 |
+
end_logits=end_logits,
|
| 1396 |
+
hidden_states=outputs.hidden_states,
|
| 1397 |
+
attentions=outputs.attentions,
|
| 1398 |
+
)
|
| 1399 |
+
|
| 1400 |
+
|
| 1401 |
+
__all__ = [
|
| 1402 |
+
"GPTJForCausalLM",
|
| 1403 |
+
"GPTJForQuestionAnswering",
|
| 1404 |
+
"GPTJForSequenceClassification",
|
| 1405 |
+
"GPTJModel",
|
| 1406 |
+
"GPTJPreTrainedModel",
|
| 1407 |
+
]
|
phi4/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py
ADDED
|
@@ -0,0 +1,1107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""TF 2.0 GPT-J model."""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
from typing import Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import tensorflow as tf
|
| 23 |
+
|
| 24 |
+
from ...activations_tf import get_tf_activation
|
| 25 |
+
from ...file_utils import (
|
| 26 |
+
add_code_sample_docstrings,
|
| 27 |
+
add_start_docstrings,
|
| 28 |
+
add_start_docstrings_to_model_forward,
|
| 29 |
+
)
|
| 30 |
+
from ...modeling_tf_outputs import (
|
| 31 |
+
TFBaseModelOutputWithPast,
|
| 32 |
+
TFCausalLMOutputWithPast,
|
| 33 |
+
TFQuestionAnsweringModelOutput,
|
| 34 |
+
TFSequenceClassifierOutputWithPast,
|
| 35 |
+
)
|
| 36 |
+
from ...modeling_tf_utils import (
|
| 37 |
+
TFCausalLanguageModelingLoss,
|
| 38 |
+
TFModelInputType,
|
| 39 |
+
TFPreTrainedModel,
|
| 40 |
+
TFQuestionAnsweringLoss,
|
| 41 |
+
TFSequenceClassificationLoss,
|
| 42 |
+
TFSharedEmbeddings,
|
| 43 |
+
get_initializer,
|
| 44 |
+
keras,
|
| 45 |
+
keras_serializable,
|
| 46 |
+
unpack_inputs,
|
| 47 |
+
)
|
| 48 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
| 49 |
+
from ...utils import logging
|
| 50 |
+
from .configuration_gptj import GPTJConfig
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
logger = logging.get_logger(__name__)
|
| 54 |
+
|
| 55 |
+
_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
|
| 56 |
+
_CONFIG_FOR_DOC = "GPTJConfig"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
|
| 60 |
+
inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
|
| 61 |
+
sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
|
| 62 |
+
sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
|
| 63 |
+
out = tf.concat((sin, cos), axis=1)
|
| 64 |
+
return out
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
|
| 68 |
+
rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
|
| 69 |
+
new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
|
| 70 |
+
rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
|
| 71 |
+
return rotate_half_tensor
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
|
| 75 |
+
sin_pos, cos_pos = sincos
|
| 76 |
+
sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
|
| 77 |
+
cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
|
| 78 |
+
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class TFGPTJAttention(keras.layers.Layer):
|
| 82 |
+
def __init__(self, config: GPTJConfig, **kwargs):
|
| 83 |
+
super().__init__(**kwargs)
|
| 84 |
+
|
| 85 |
+
self.embed_dim = config.hidden_size
|
| 86 |
+
self.num_attention_heads = config.num_attention_heads
|
| 87 |
+
self.head_dim = self.embed_dim // self.num_attention_heads
|
| 88 |
+
if self.head_dim * self.num_attention_heads != self.embed_dim:
|
| 89 |
+
raise ValueError(
|
| 90 |
+
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
|
| 91 |
+
f" `num_attention_heads`: {self.num_attention_heads})."
|
| 92 |
+
)
|
| 93 |
+
self.scale_attn = self.head_dim**0.5
|
| 94 |
+
self.rotary_dim = config.rotary_dim
|
| 95 |
+
|
| 96 |
+
self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
|
| 97 |
+
self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
|
| 98 |
+
|
| 99 |
+
self.q_proj = keras.layers.Dense(
|
| 100 |
+
self.embed_dim,
|
| 101 |
+
use_bias=False,
|
| 102 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 103 |
+
name="q_proj",
|
| 104 |
+
)
|
| 105 |
+
self.k_proj = keras.layers.Dense(
|
| 106 |
+
self.embed_dim,
|
| 107 |
+
use_bias=False,
|
| 108 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 109 |
+
name="k_proj",
|
| 110 |
+
)
|
| 111 |
+
self.v_proj = keras.layers.Dense(
|
| 112 |
+
self.embed_dim,
|
| 113 |
+
use_bias=False,
|
| 114 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 115 |
+
name="v_proj",
|
| 116 |
+
)
|
| 117 |
+
self.out_proj = keras.layers.Dense(
|
| 118 |
+
self.embed_dim,
|
| 119 |
+
use_bias=False,
|
| 120 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 121 |
+
name="out_proj",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
self.max_positions = config.max_position_embeddings
|
| 125 |
+
self.lower_triangle_mask = tf.reshape(
|
| 126 |
+
tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
|
| 127 |
+
(1, 1, self.max_positions, self.max_positions),
|
| 128 |
+
)
|
| 129 |
+
pos_embd_dim = self.rotary_dim or self.embed_dim
|
| 130 |
+
self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
|
| 131 |
+
|
| 132 |
+
def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
|
| 133 |
+
return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
|
| 137 |
+
return tf.cast(tf.constant(-1e9), dtype)
|
| 138 |
+
|
| 139 |
+
def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
|
| 140 |
+
"""
|
| 141 |
+
Splits hidden dim into attn_head_size and num_attention_heads
|
| 142 |
+
"""
|
| 143 |
+
new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
|
| 144 |
+
hidden_states = tf.reshape(hidden_states, new_shape)
|
| 145 |
+
if rotary:
|
| 146 |
+
return hidden_states
|
| 147 |
+
if len(shape_list(hidden_states)) == 4:
|
| 148 |
+
return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
|
| 149 |
+
if len(shape_list(hidden_states)) == 5:
|
| 150 |
+
return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
|
| 151 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
|
| 152 |
+
|
| 153 |
+
def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 154 |
+
"""
|
| 155 |
+
Merges attn_head_size dim and num_attn_heads dim into hidden dim
|
| 156 |
+
"""
|
| 157 |
+
if len(shape_list(hidden_states)) == 4:
|
| 158 |
+
hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
|
| 159 |
+
elif len(shape_list(hidden_states)) == 5:
|
| 160 |
+
hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
|
| 161 |
+
else:
|
| 162 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
|
| 163 |
+
new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
|
| 164 |
+
return tf.reshape(hidden_states, new_shape)
|
| 165 |
+
|
| 166 |
+
def _attn(
|
| 167 |
+
self,
|
| 168 |
+
query: tf.Tensor,
|
| 169 |
+
key: tf.Tensor,
|
| 170 |
+
value: tf.Tensor,
|
| 171 |
+
attention_mask: tf.Tensor | None = None,
|
| 172 |
+
head_mask: tf.Tensor | None = None,
|
| 173 |
+
) -> Tuple[tf.Tensor, tf.Tensor]:
|
| 174 |
+
# compute causal mask from causal mask buffer
|
| 175 |
+
query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
|
| 176 |
+
causal_mask = self.get_causal_mask(key_length, query_length)
|
| 177 |
+
|
| 178 |
+
# Keep the attention weights computation in fp32 to avoid overflow issues
|
| 179 |
+
query = tf.cast(query, tf.float32)
|
| 180 |
+
key = tf.cast(key, tf.float32)
|
| 181 |
+
|
| 182 |
+
attn_weights = tf.matmul(query, key, transpose_b=True)
|
| 183 |
+
attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
|
| 184 |
+
|
| 185 |
+
attn_weights = attn_weights / self.scale_attn
|
| 186 |
+
|
| 187 |
+
if attention_mask is not None:
|
| 188 |
+
# Apply the attention mask
|
| 189 |
+
attn_weights = attn_weights + attention_mask
|
| 190 |
+
|
| 191 |
+
attn_weights = stable_softmax(attn_weights, axis=-1)
|
| 192 |
+
attn_weights = tf.cast(attn_weights, value.dtype)
|
| 193 |
+
attn_weights = self.attn_dropout(attn_weights)
|
| 194 |
+
|
| 195 |
+
# Mask heads if we want to
|
| 196 |
+
if head_mask is not None:
|
| 197 |
+
attn_weights = attn_weights * head_mask
|
| 198 |
+
|
| 199 |
+
attn_output = tf.matmul(attn_weights, value)
|
| 200 |
+
|
| 201 |
+
return attn_output, attn_weights
|
| 202 |
+
|
| 203 |
+
def call(
|
| 204 |
+
self,
|
| 205 |
+
hidden_states: tf.Tensor,
|
| 206 |
+
layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
|
| 207 |
+
attention_mask: tf.Tensor | None = None,
|
| 208 |
+
position_ids: tf.Tensor | None = None,
|
| 209 |
+
head_mask: tf.Tensor | None = None,
|
| 210 |
+
use_cache: bool = False,
|
| 211 |
+
output_attentions: bool = False,
|
| 212 |
+
):
|
| 213 |
+
query = self.q_proj(hidden_states)
|
| 214 |
+
key = self.k_proj(hidden_states)
|
| 215 |
+
value = self.v_proj(hidden_states)
|
| 216 |
+
|
| 217 |
+
query = self._split_heads(query, True)
|
| 218 |
+
key = self._split_heads(key, True)
|
| 219 |
+
value = self._split_heads(value, False)
|
| 220 |
+
|
| 221 |
+
sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
|
| 222 |
+
sincos = tf.split(sincos, 2, axis=-1)
|
| 223 |
+
if self.rotary_dim is not None:
|
| 224 |
+
k_rot = key[:, :, :, : self.rotary_dim]
|
| 225 |
+
k_pass = key[:, :, :, self.rotary_dim :]
|
| 226 |
+
|
| 227 |
+
q_rot = query[:, :, :, : self.rotary_dim]
|
| 228 |
+
q_pass = query[:, :, :, self.rotary_dim :]
|
| 229 |
+
|
| 230 |
+
k_rot = apply_rotary_pos_emb(k_rot, sincos)
|
| 231 |
+
q_rot = apply_rotary_pos_emb(q_rot, sincos)
|
| 232 |
+
|
| 233 |
+
key = tf.concat((k_rot, k_pass), axis=-1)
|
| 234 |
+
query = tf.concat((q_rot, q_pass), axis=-1)
|
| 235 |
+
else:
|
| 236 |
+
key = apply_rotary_pos_emb(key, sincos)
|
| 237 |
+
query = apply_rotary_pos_emb(query, sincos)
|
| 238 |
+
|
| 239 |
+
key = tf.transpose(key, (0, 2, 1, 3))
|
| 240 |
+
query = tf.transpose(query, (0, 2, 1, 3))
|
| 241 |
+
|
| 242 |
+
if layer_past is not None:
|
| 243 |
+
past_key = layer_past[0]
|
| 244 |
+
past_value = layer_past[1]
|
| 245 |
+
key = tf.concat((past_key, key), axis=-2)
|
| 246 |
+
value = tf.concat((past_value, value), axis=-2)
|
| 247 |
+
|
| 248 |
+
if use_cache is True:
|
| 249 |
+
present = (key, value)
|
| 250 |
+
else:
|
| 251 |
+
present = None
|
| 252 |
+
|
| 253 |
+
# compute self-attention: V x Softmax(QK^T)
|
| 254 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
| 255 |
+
|
| 256 |
+
attn_output = self._merge_heads(attn_output)
|
| 257 |
+
attn_output = self.out_proj(attn_output)
|
| 258 |
+
attn_output = self.resid_dropout(attn_output)
|
| 259 |
+
|
| 260 |
+
outputs = (attn_output, present)
|
| 261 |
+
if output_attentions:
|
| 262 |
+
outputs += (attn_weights,)
|
| 263 |
+
|
| 264 |
+
return outputs # a, present, (attentions)
|
| 265 |
+
|
| 266 |
+
def build(self, input_shape=None):
|
| 267 |
+
if self.built:
|
| 268 |
+
return
|
| 269 |
+
self.built = True
|
| 270 |
+
if getattr(self, "q_proj", None) is not None:
|
| 271 |
+
with tf.name_scope(self.q_proj.name):
|
| 272 |
+
self.q_proj.build([None, None, self.embed_dim])
|
| 273 |
+
if getattr(self, "k_proj", None) is not None:
|
| 274 |
+
with tf.name_scope(self.k_proj.name):
|
| 275 |
+
self.k_proj.build([None, None, self.embed_dim])
|
| 276 |
+
if getattr(self, "v_proj", None) is not None:
|
| 277 |
+
with tf.name_scope(self.v_proj.name):
|
| 278 |
+
self.v_proj.build([None, None, self.embed_dim])
|
| 279 |
+
if getattr(self, "out_proj", None) is not None:
|
| 280 |
+
with tf.name_scope(self.out_proj.name):
|
| 281 |
+
self.out_proj.build([None, None, self.embed_dim])
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
class TFGPTJMLP(keras.layers.Layer):
|
| 285 |
+
def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
|
| 286 |
+
super().__init__(**kwargs)
|
| 287 |
+
embed_dim = config.n_embd
|
| 288 |
+
|
| 289 |
+
self.fc_in = keras.layers.Dense(
|
| 290 |
+
intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
|
| 291 |
+
)
|
| 292 |
+
self.fc_out = keras.layers.Dense(
|
| 293 |
+
embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
self.act = get_tf_activation(config.activation_function)
|
| 297 |
+
self.dropout = keras.layers.Dropout(config.embd_pdrop)
|
| 298 |
+
self.embed_dim = config.n_embd
|
| 299 |
+
self.intermediate_size = intermediate_size
|
| 300 |
+
|
| 301 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 302 |
+
hidden_states = self.fc_in(hidden_states)
|
| 303 |
+
hidden_states = self.act(hidden_states)
|
| 304 |
+
hidden_states = self.fc_out(hidden_states)
|
| 305 |
+
hidden_states = self.dropout(hidden_states)
|
| 306 |
+
return hidden_states
|
| 307 |
+
|
| 308 |
+
def build(self, input_shape=None):
|
| 309 |
+
if self.built:
|
| 310 |
+
return
|
| 311 |
+
self.built = True
|
| 312 |
+
if getattr(self, "fc_in", None) is not None:
|
| 313 |
+
with tf.name_scope(self.fc_in.name):
|
| 314 |
+
self.fc_in.build([None, None, self.embed_dim])
|
| 315 |
+
if getattr(self, "fc_out", None) is not None:
|
| 316 |
+
with tf.name_scope(self.fc_out.name):
|
| 317 |
+
self.fc_out.build([None, None, self.intermediate_size])
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class TFGPTJBlock(keras.layers.Layer):
|
| 321 |
+
def __init__(self, config: GPTJConfig, **kwargs):
|
| 322 |
+
super().__init__(**kwargs)
|
| 323 |
+
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
|
| 324 |
+
self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
|
| 325 |
+
self.attn = TFGPTJAttention(config, name="attn")
|
| 326 |
+
self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
|
| 327 |
+
self.config = config
|
| 328 |
+
|
| 329 |
+
def call(
|
| 330 |
+
self,
|
| 331 |
+
hidden_states: tf.Tensor,
|
| 332 |
+
layer_past: tf.Tensor | None = None,
|
| 333 |
+
attention_mask: tf.Tensor | None = None,
|
| 334 |
+
position_ids: tf.Tensor | None = None,
|
| 335 |
+
head_mask: tf.Tensor | None = None,
|
| 336 |
+
use_cache: bool = False,
|
| 337 |
+
output_attentions: bool = False,
|
| 338 |
+
):
|
| 339 |
+
residual = hidden_states
|
| 340 |
+
hidden_states = self.ln_1(hidden_states)
|
| 341 |
+
attn_outputs = self.attn(
|
| 342 |
+
hidden_states=hidden_states,
|
| 343 |
+
layer_past=layer_past,
|
| 344 |
+
attention_mask=attention_mask,
|
| 345 |
+
position_ids=position_ids,
|
| 346 |
+
head_mask=head_mask,
|
| 347 |
+
use_cache=use_cache,
|
| 348 |
+
output_attentions=output_attentions,
|
| 349 |
+
) # attn_outputs: attn_output, present, (attentions)
|
| 350 |
+
attn_output = attn_outputs[0]
|
| 351 |
+
outputs = attn_outputs[1:]
|
| 352 |
+
|
| 353 |
+
feed_forward_hidden_states = self.mlp(hidden_states)
|
| 354 |
+
hidden_states = attn_output + feed_forward_hidden_states + residual
|
| 355 |
+
|
| 356 |
+
if use_cache:
|
| 357 |
+
outputs = (hidden_states,) + outputs
|
| 358 |
+
else:
|
| 359 |
+
outputs = (hidden_states,) + outputs[1:]
|
| 360 |
+
return outputs # hidden_states, present, (attentions)
|
| 361 |
+
|
| 362 |
+
def build(self, input_shape=None):
|
| 363 |
+
if self.built:
|
| 364 |
+
return
|
| 365 |
+
self.built = True
|
| 366 |
+
if getattr(self, "ln_1", None) is not None:
|
| 367 |
+
with tf.name_scope(self.ln_1.name):
|
| 368 |
+
self.ln_1.build([None, None, self.config.n_embd])
|
| 369 |
+
if getattr(self, "attn", None) is not None:
|
| 370 |
+
with tf.name_scope(self.attn.name):
|
| 371 |
+
self.attn.build(None)
|
| 372 |
+
if getattr(self, "mlp", None) is not None:
|
| 373 |
+
with tf.name_scope(self.mlp.name):
|
| 374 |
+
self.mlp.build(None)
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
@keras_serializable
|
| 378 |
+
class TFGPTJMainLayer(keras.layers.Layer):
|
| 379 |
+
config_class = GPTJConfig
|
| 380 |
+
|
| 381 |
+
def __init__(self, config: GPTJConfig, *inputs, **kwargs):
|
| 382 |
+
super().__init__(*inputs, **kwargs)
|
| 383 |
+
|
| 384 |
+
self.config = config
|
| 385 |
+
self.output_attentions = config.output_attentions
|
| 386 |
+
self.output_hidden_states = config.output_hidden_states
|
| 387 |
+
self.use_cache = config.use_cache
|
| 388 |
+
self.return_dict = config.use_return_dict
|
| 389 |
+
|
| 390 |
+
self.num_hidden_layers = config.n_layer
|
| 391 |
+
self.n_embd = config.n_embd
|
| 392 |
+
self.n_positions = config.n_positions
|
| 393 |
+
self.initializer_range = config.initializer_range
|
| 394 |
+
|
| 395 |
+
self.wte = TFSharedEmbeddings(
|
| 396 |
+
config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
|
| 397 |
+
)
|
| 398 |
+
self.drop = keras.layers.Dropout(config.embd_pdrop)
|
| 399 |
+
self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
|
| 400 |
+
self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
|
| 401 |
+
self.embed_dim = config.n_embd
|
| 402 |
+
|
| 403 |
+
def get_input_embeddings(self):
|
| 404 |
+
return self.wte
|
| 405 |
+
|
| 406 |
+
def set_input_embeddings(self, value: tf.Tensor):
|
| 407 |
+
self.wte.weight = value
|
| 408 |
+
self.wte.vocab_size = shape_list(value)[0]
|
| 409 |
+
|
| 410 |
+
def _prune_heads(self, heads_to_prune):
|
| 411 |
+
"""
|
| 412 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
| 413 |
+
"""
|
| 414 |
+
raise NotImplementedError
|
| 415 |
+
|
| 416 |
+
@unpack_inputs
|
| 417 |
+
def call(
|
| 418 |
+
self,
|
| 419 |
+
input_ids=None,
|
| 420 |
+
past_key_values=None,
|
| 421 |
+
attention_mask=None,
|
| 422 |
+
token_type_ids=None,
|
| 423 |
+
position_ids=None,
|
| 424 |
+
head_mask=None,
|
| 425 |
+
inputs_embeds=None,
|
| 426 |
+
use_cache=None,
|
| 427 |
+
output_attentions=None,
|
| 428 |
+
output_hidden_states=None,
|
| 429 |
+
return_dict=None,
|
| 430 |
+
training=False,
|
| 431 |
+
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
|
| 432 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 433 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 434 |
+
elif input_ids is not None:
|
| 435 |
+
input_shape = shape_list(input_ids)
|
| 436 |
+
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
|
| 437 |
+
elif inputs_embeds is not None:
|
| 438 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 439 |
+
else:
|
| 440 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 441 |
+
|
| 442 |
+
if past_key_values is None:
|
| 443 |
+
past_length = 0
|
| 444 |
+
past_key_values = [None] * len(self.h)
|
| 445 |
+
else:
|
| 446 |
+
past_length = shape_list(past_key_values[0][0])[-2]
|
| 447 |
+
|
| 448 |
+
if position_ids is None:
|
| 449 |
+
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
|
| 450 |
+
|
| 451 |
+
if attention_mask is not None:
|
| 452 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 453 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 454 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 455 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 456 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 457 |
+
attention_mask_shape = shape_list(attention_mask)
|
| 458 |
+
attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
|
| 459 |
+
|
| 460 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 461 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 462 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 463 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 464 |
+
# effectively the same as removing these entirely.
|
| 465 |
+
one_cst = tf.constant(1.0)
|
| 466 |
+
attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
|
| 467 |
+
attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
|
| 468 |
+
|
| 469 |
+
# Prepare head mask if needed
|
| 470 |
+
# 1.0 in head_mask indicate we keep the head
|
| 471 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 472 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 473 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 474 |
+
if head_mask is not None:
|
| 475 |
+
raise NotImplementedError
|
| 476 |
+
else:
|
| 477 |
+
head_mask = [None] * self.num_hidden_layers
|
| 478 |
+
# head_mask = tf.constant([0] * self.num_hidden_layers)
|
| 479 |
+
|
| 480 |
+
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
|
| 481 |
+
|
| 482 |
+
if inputs_embeds is None:
|
| 483 |
+
check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
|
| 484 |
+
inputs_embeds = self.wte(input_ids, mode="embedding")
|
| 485 |
+
|
| 486 |
+
if token_type_ids is not None:
|
| 487 |
+
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
|
| 488 |
+
token_type_embeds = self.wte(token_type_ids, mode="embedding")
|
| 489 |
+
else:
|
| 490 |
+
token_type_embeds = tf.constant(0.0)
|
| 491 |
+
|
| 492 |
+
token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
|
| 493 |
+
hidden_states = inputs_embeds + token_type_embeds
|
| 494 |
+
hidden_states = self.drop(hidden_states, training=training)
|
| 495 |
+
|
| 496 |
+
output_shape = input_shape + [shape_list(hidden_states)[-1]]
|
| 497 |
+
|
| 498 |
+
presents = () if use_cache else None
|
| 499 |
+
all_attentions = () if output_attentions else None
|
| 500 |
+
all_hidden_states = () if output_hidden_states else None
|
| 501 |
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
| 502 |
+
if output_hidden_states:
|
| 503 |
+
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
|
| 504 |
+
|
| 505 |
+
outputs = block(
|
| 506 |
+
hidden_states=hidden_states,
|
| 507 |
+
layer_past=layer_past,
|
| 508 |
+
attention_mask=attention_mask,
|
| 509 |
+
position_ids=position_ids,
|
| 510 |
+
head_mask=head_mask[i],
|
| 511 |
+
use_cache=use_cache,
|
| 512 |
+
output_attentions=output_attentions,
|
| 513 |
+
training=training,
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
hidden_states = outputs[0]
|
| 517 |
+
if use_cache:
|
| 518 |
+
presents = presents + (outputs[1],)
|
| 519 |
+
|
| 520 |
+
if output_attentions:
|
| 521 |
+
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
|
| 522 |
+
|
| 523 |
+
hidden_states = self.ln_f(hidden_states)
|
| 524 |
+
|
| 525 |
+
hidden_states = tf.reshape(hidden_states, output_shape)
|
| 526 |
+
# Add last hidden state
|
| 527 |
+
if output_hidden_states:
|
| 528 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 529 |
+
|
| 530 |
+
if output_attentions:
|
| 531 |
+
# let the number of heads free (-1) so we can extract attention even after head pruning
|
| 532 |
+
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
|
| 533 |
+
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
|
| 534 |
+
|
| 535 |
+
if not return_dict:
|
| 536 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
| 537 |
+
|
| 538 |
+
return TFBaseModelOutputWithPast(
|
| 539 |
+
last_hidden_state=hidden_states,
|
| 540 |
+
past_key_values=presents,
|
| 541 |
+
hidden_states=all_hidden_states,
|
| 542 |
+
attentions=all_attentions,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
def build(self, input_shape=None):
|
| 546 |
+
if self.built:
|
| 547 |
+
return
|
| 548 |
+
self.built = True
|
| 549 |
+
if getattr(self, "wte", None) is not None:
|
| 550 |
+
with tf.name_scope(self.wte.name):
|
| 551 |
+
self.wte.build(None)
|
| 552 |
+
if getattr(self, "ln_f", None) is not None:
|
| 553 |
+
with tf.name_scope(self.ln_f.name):
|
| 554 |
+
self.ln_f.build([None, None, self.embed_dim])
|
| 555 |
+
if getattr(self, "h", None) is not None:
|
| 556 |
+
for layer in self.h:
|
| 557 |
+
with tf.name_scope(layer.name):
|
| 558 |
+
layer.build(None)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class TFGPTJPreTrainedModel(TFPreTrainedModel):
|
| 562 |
+
"""
|
| 563 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 564 |
+
models.
|
| 565 |
+
"""
|
| 566 |
+
|
| 567 |
+
config_class = GPTJConfig
|
| 568 |
+
base_model_prefix = "transformer"
|
| 569 |
+
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
|
| 570 |
+
_keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
GPTJ_START_DOCSTRING = r"""
|
| 574 |
+
|
| 575 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 576 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 577 |
+
etc.)
|
| 578 |
+
|
| 579 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 580 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 581 |
+
behavior.
|
| 582 |
+
|
| 583 |
+
<Tip>
|
| 584 |
+
|
| 585 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 586 |
+
|
| 587 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 588 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 589 |
+
|
| 590 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 591 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 592 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 593 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 594 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 595 |
+
positional argument:
|
| 596 |
+
|
| 597 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
| 598 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 599 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
| 600 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 601 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
| 602 |
+
|
| 603 |
+
Note that when creating models and layers with
|
| 604 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 605 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 606 |
+
|
| 607 |
+
</Tip>
|
| 608 |
+
|
| 609 |
+
Parameters:
|
| 610 |
+
config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
|
| 611 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 612 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
GPTJ_INPUTS_DOCSTRING = r"""
|
| 616 |
+
Args:
|
| 617 |
+
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
|
| 618 |
+
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
|
| 619 |
+
input past key value states). Indices of input sequence tokens in the vocabulary.
|
| 620 |
+
|
| 621 |
+
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
|
| 622 |
+
|
| 623 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 624 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 625 |
+
|
| 626 |
+
[What are input IDs?](../glossary#input-ids)
|
| 627 |
+
past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
|
| 628 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
| 629 |
+
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
|
| 630 |
+
given to this model should not be passed as input ids as they have already been computed.
|
| 631 |
+
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 632 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 633 |
+
|
| 634 |
+
- 1 for tokens that are **not masked**,
|
| 635 |
+
- 0 for tokens that are **masked**.
|
| 636 |
+
|
| 637 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 638 |
+
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 639 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 640 |
+
1]`:
|
| 641 |
+
|
| 642 |
+
- 0 corresponds to a *sentence A* token,
|
| 643 |
+
- 1 corresponds to a *sentence B* token.
|
| 644 |
+
|
| 645 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 646 |
+
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 647 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 648 |
+
config.max_position_embeddings - 1]`.
|
| 649 |
+
|
| 650 |
+
[What are position IDs?](../glossary#position-ids)
|
| 651 |
+
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 652 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 653 |
+
|
| 654 |
+
- 1 indicates the head is **not masked**,
|
| 655 |
+
- 0 indicates the head is **masked**.
|
| 656 |
+
|
| 657 |
+
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 658 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 659 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 660 |
+
model's internal embedding lookup matrix.
|
| 661 |
+
output_attentions (`bool`, *optional*):
|
| 662 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 663 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 664 |
+
config will be used instead.
|
| 665 |
+
output_hidden_states (`bool`, *optional*):
|
| 666 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 667 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 668 |
+
used instead.
|
| 669 |
+
return_dict (`bool`, *optional*):
|
| 670 |
+
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
|
| 671 |
+
in eager mode, in graph mode the value will always be set to True.
|
| 672 |
+
training (`bool`, *optional*, defaults to `False`):
|
| 673 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 674 |
+
behaviors between training and evaluation).
|
| 675 |
+
"""
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
@add_start_docstrings(
|
| 679 |
+
"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
|
| 680 |
+
GPTJ_START_DOCSTRING,
|
| 681 |
+
)
|
| 682 |
+
class TFGPTJModel(TFGPTJPreTrainedModel):
|
| 683 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 684 |
+
super().__init__(config, *inputs, **kwargs)
|
| 685 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 686 |
+
|
| 687 |
+
@unpack_inputs
|
| 688 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
|
| 689 |
+
@add_code_sample_docstrings(
|
| 690 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 691 |
+
output_type=TFBaseModelOutputWithPast,
|
| 692 |
+
config_class=_CONFIG_FOR_DOC,
|
| 693 |
+
)
|
| 694 |
+
def call(
|
| 695 |
+
self,
|
| 696 |
+
input_ids: TFModelInputType | None = None,
|
| 697 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 698 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 699 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 700 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 701 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 702 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 703 |
+
use_cache: Optional[bool] = None,
|
| 704 |
+
output_attentions: Optional[bool] = None,
|
| 705 |
+
output_hidden_states: Optional[bool] = None,
|
| 706 |
+
return_dict: Optional[bool] = None,
|
| 707 |
+
training: Optional[bool] = False,
|
| 708 |
+
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
|
| 709 |
+
r"""
|
| 710 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 711 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 712 |
+
`past`). Set to `False` during training, `True` during generation
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
outputs = self.transformer(
|
| 716 |
+
input_ids=input_ids,
|
| 717 |
+
past_key_values=past_key_values,
|
| 718 |
+
attention_mask=attention_mask,
|
| 719 |
+
token_type_ids=token_type_ids,
|
| 720 |
+
position_ids=position_ids,
|
| 721 |
+
head_mask=head_mask,
|
| 722 |
+
inputs_embeds=inputs_embeds,
|
| 723 |
+
use_cache=use_cache,
|
| 724 |
+
output_attentions=output_attentions,
|
| 725 |
+
output_hidden_states=output_hidden_states,
|
| 726 |
+
return_dict=return_dict,
|
| 727 |
+
training=training,
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
return outputs
|
| 731 |
+
|
| 732 |
+
def build(self, input_shape=None):
|
| 733 |
+
if self.built:
|
| 734 |
+
return
|
| 735 |
+
self.built = True
|
| 736 |
+
if getattr(self, "transformer", None) is not None:
|
| 737 |
+
with tf.name_scope(self.transformer.name):
|
| 738 |
+
self.transformer.build(None)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
@add_start_docstrings(
|
| 742 |
+
"""
|
| 743 |
+
The GPT-J Model transformer with a language modeling head on top.
|
| 744 |
+
""",
|
| 745 |
+
GPTJ_START_DOCSTRING,
|
| 746 |
+
)
|
| 747 |
+
class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
|
| 748 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 749 |
+
super().__init__(config, *inputs, **kwargs)
|
| 750 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 751 |
+
self.lm_head = keras.layers.Dense(
|
| 752 |
+
config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
|
| 753 |
+
)
|
| 754 |
+
self.config = config
|
| 755 |
+
|
| 756 |
+
def get_output_embeddings(self):
|
| 757 |
+
return self.lm_head
|
| 758 |
+
|
| 759 |
+
def set_output_embeddings(self, new_embeddings):
|
| 760 |
+
self.lm_head = new_embeddings
|
| 761 |
+
|
| 762 |
+
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
|
| 763 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
| 764 |
+
# only last token for inputs_ids if past is defined in kwargs
|
| 765 |
+
if past_key_values:
|
| 766 |
+
inputs = tf.expand_dims(inputs[:, -1], -1)
|
| 767 |
+
if token_type_ids is not None:
|
| 768 |
+
token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
|
| 769 |
+
|
| 770 |
+
position_ids = kwargs.get("position_ids", None)
|
| 771 |
+
attention_mask = kwargs.get("attention_mask", None)
|
| 772 |
+
|
| 773 |
+
if attention_mask is not None and position_ids is None:
|
| 774 |
+
position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
|
| 775 |
+
if past_key_values:
|
| 776 |
+
position_ids = tf.expand_dims(position_ids[:, -1], -1)
|
| 777 |
+
|
| 778 |
+
return {
|
| 779 |
+
"input_ids": inputs,
|
| 780 |
+
"attention_mask": attention_mask,
|
| 781 |
+
"position_ids": position_ids,
|
| 782 |
+
"past_key_values": past_key_values,
|
| 783 |
+
"use_cache": use_cache,
|
| 784 |
+
"token_type_ids": token_type_ids,
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
@unpack_inputs
|
| 788 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 789 |
+
@add_code_sample_docstrings(
|
| 790 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 791 |
+
output_type=TFCausalLMOutputWithPast,
|
| 792 |
+
config_class=_CONFIG_FOR_DOC,
|
| 793 |
+
)
|
| 794 |
+
def call(
|
| 795 |
+
self,
|
| 796 |
+
input_ids: TFModelInputType | None = None,
|
| 797 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 798 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 799 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 800 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 801 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 802 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 803 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 804 |
+
use_cache: Optional[bool] = None,
|
| 805 |
+
output_attentions: Optional[bool] = None,
|
| 806 |
+
output_hidden_states: Optional[bool] = None,
|
| 807 |
+
return_dict: Optional[bool] = None,
|
| 808 |
+
training: Optional[bool] = False,
|
| 809 |
+
) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
|
| 810 |
+
r"""
|
| 811 |
+
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 812 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 813 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 814 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
transformer_outputs = self.transformer(
|
| 818 |
+
input_ids=input_ids,
|
| 819 |
+
past_key_values=past_key_values,
|
| 820 |
+
attention_mask=attention_mask,
|
| 821 |
+
token_type_ids=token_type_ids,
|
| 822 |
+
position_ids=position_ids,
|
| 823 |
+
head_mask=head_mask,
|
| 824 |
+
inputs_embeds=inputs_embeds,
|
| 825 |
+
use_cache=use_cache,
|
| 826 |
+
output_attentions=output_attentions,
|
| 827 |
+
output_hidden_states=output_hidden_states,
|
| 828 |
+
return_dict=return_dict,
|
| 829 |
+
training=training,
|
| 830 |
+
)
|
| 831 |
+
hidden_states = transformer_outputs[0]
|
| 832 |
+
lm_logits = self.lm_head(hidden_states)
|
| 833 |
+
|
| 834 |
+
loss = None
|
| 835 |
+
if labels is not None:
|
| 836 |
+
# shift labels to the left and cut last logit token
|
| 837 |
+
shifted_logits = lm_logits[:, :-1]
|
| 838 |
+
labels = labels[:, 1:]
|
| 839 |
+
loss = self.hf_compute_loss(labels, shifted_logits)
|
| 840 |
+
|
| 841 |
+
if not return_dict:
|
| 842 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
| 843 |
+
return ((loss,) + output) if loss is not None else output
|
| 844 |
+
|
| 845 |
+
return TFCausalLMOutputWithPast(
|
| 846 |
+
loss=loss,
|
| 847 |
+
logits=lm_logits,
|
| 848 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 849 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 850 |
+
attentions=transformer_outputs.attentions,
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
def build(self, input_shape=None):
|
| 854 |
+
if self.built:
|
| 855 |
+
return
|
| 856 |
+
self.built = True
|
| 857 |
+
if getattr(self, "transformer", None) is not None:
|
| 858 |
+
with tf.name_scope(self.transformer.name):
|
| 859 |
+
self.transformer.build(None)
|
| 860 |
+
if getattr(self, "lm_head", None) is not None:
|
| 861 |
+
with tf.name_scope(self.lm_head.name):
|
| 862 |
+
self.lm_head.build([None, None, self.config.n_embd])
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
@add_start_docstrings(
|
| 866 |
+
"""
|
| 867 |
+
The GPT-J Model transformer with a sequence classification head on top (linear layer).
|
| 868 |
+
|
| 869 |
+
[`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 870 |
+
(e.g. GPT, GPT-2, GPT-Neo) do.
|
| 871 |
+
|
| 872 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 873 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 874 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 875 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 876 |
+
each row of the batch).
|
| 877 |
+
""",
|
| 878 |
+
GPTJ_START_DOCSTRING,
|
| 879 |
+
)
|
| 880 |
+
class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
|
| 881 |
+
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
|
| 882 |
+
|
| 883 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 884 |
+
super().__init__(config, *inputs, **kwargs)
|
| 885 |
+
self.num_labels = config.num_labels
|
| 886 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 887 |
+
self.score = keras.layers.Dense(
|
| 888 |
+
self.num_labels,
|
| 889 |
+
use_bias=False,
|
| 890 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 891 |
+
name="score",
|
| 892 |
+
)
|
| 893 |
+
self.config = config
|
| 894 |
+
|
| 895 |
+
@unpack_inputs
|
| 896 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 897 |
+
@add_code_sample_docstrings(
|
| 898 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 899 |
+
output_type=TFSequenceClassifierOutputWithPast,
|
| 900 |
+
config_class=_CONFIG_FOR_DOC,
|
| 901 |
+
)
|
| 902 |
+
def call(
|
| 903 |
+
self,
|
| 904 |
+
input_ids: TFModelInputType | None = None,
|
| 905 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 906 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 907 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 908 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 909 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 910 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 911 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 912 |
+
use_cache: Optional[bool] = None,
|
| 913 |
+
output_attentions: Optional[bool] = None,
|
| 914 |
+
output_hidden_states: Optional[bool] = None,
|
| 915 |
+
return_dict: Optional[bool] = None,
|
| 916 |
+
training: Optional[bool] = False,
|
| 917 |
+
) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
|
| 918 |
+
r"""
|
| 919 |
+
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 920 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 921 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 922 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 923 |
+
"""
|
| 924 |
+
if labels is not None and self.config.pad_token_id is None and input_ids.shape[0] != 1:
|
| 925 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 926 |
+
|
| 927 |
+
transformer_outputs = self.transformer(
|
| 928 |
+
input_ids=input_ids,
|
| 929 |
+
past_key_values=past_key_values,
|
| 930 |
+
attention_mask=attention_mask,
|
| 931 |
+
token_type_ids=token_type_ids,
|
| 932 |
+
position_ids=position_ids,
|
| 933 |
+
head_mask=head_mask,
|
| 934 |
+
inputs_embeds=inputs_embeds,
|
| 935 |
+
use_cache=use_cache,
|
| 936 |
+
output_attentions=output_attentions,
|
| 937 |
+
output_hidden_states=output_hidden_states,
|
| 938 |
+
return_dict=return_dict,
|
| 939 |
+
training=training,
|
| 940 |
+
)
|
| 941 |
+
hidden_states = transformer_outputs[0]
|
| 942 |
+
logits = self.score(hidden_states)
|
| 943 |
+
logits_shape = shape_list(logits)
|
| 944 |
+
in_logits = None
|
| 945 |
+
if self.config.pad_token_id is None:
|
| 946 |
+
sequence_lengths = -1
|
| 947 |
+
else:
|
| 948 |
+
if input_ids is not None:
|
| 949 |
+
sequence_lengths = (
|
| 950 |
+
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
|
| 951 |
+
- 1
|
| 952 |
+
)
|
| 953 |
+
sequence_lengths = tf.where(
|
| 954 |
+
sequence_lengths >= 0,
|
| 955 |
+
sequence_lengths,
|
| 956 |
+
tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
|
| 957 |
+
)
|
| 958 |
+
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
|
| 959 |
+
else:
|
| 960 |
+
sequence_lengths = -1
|
| 961 |
+
logger.warning_once(
|
| 962 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 963 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 964 |
+
)
|
| 965 |
+
loss = None
|
| 966 |
+
|
| 967 |
+
if labels is not None:
|
| 968 |
+
if not tf.is_tensor(sequence_lengths):
|
| 969 |
+
in_logits = logits[0 : logits_shape[0], sequence_lengths]
|
| 970 |
+
|
| 971 |
+
loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
|
| 972 |
+
pooled_logits = in_logits if in_logits is not None else logits
|
| 973 |
+
|
| 974 |
+
if not return_dict:
|
| 975 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 976 |
+
return ((loss,) + output) if loss is not None else output
|
| 977 |
+
|
| 978 |
+
return TFSequenceClassifierOutputWithPast(
|
| 979 |
+
loss=loss,
|
| 980 |
+
logits=pooled_logits,
|
| 981 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 982 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 983 |
+
attentions=transformer_outputs.attentions,
|
| 984 |
+
)
|
| 985 |
+
|
| 986 |
+
def build(self, input_shape=None):
|
| 987 |
+
if self.built:
|
| 988 |
+
return
|
| 989 |
+
self.built = True
|
| 990 |
+
if getattr(self, "transformer", None) is not None:
|
| 991 |
+
with tf.name_scope(self.transformer.name):
|
| 992 |
+
self.transformer.build(None)
|
| 993 |
+
if getattr(self, "score", None) is not None:
|
| 994 |
+
with tf.name_scope(self.score.name):
|
| 995 |
+
self.score.build([None, None, self.config.n_embd])
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
@add_start_docstrings(
|
| 999 |
+
"""
|
| 1000 |
+
The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
|
| 1001 |
+
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1002 |
+
""",
|
| 1003 |
+
GPTJ_START_DOCSTRING,
|
| 1004 |
+
)
|
| 1005 |
+
class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
|
| 1006 |
+
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
|
| 1007 |
+
|
| 1008 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1009 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1010 |
+
self.num_labels = config.num_labels
|
| 1011 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 1012 |
+
self.qa_outputs = keras.layers.Dense(
|
| 1013 |
+
self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
|
| 1014 |
+
)
|
| 1015 |
+
self.config = config
|
| 1016 |
+
|
| 1017 |
+
@unpack_inputs
|
| 1018 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1019 |
+
@add_code_sample_docstrings(
|
| 1020 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1021 |
+
output_type=TFQuestionAnsweringModelOutput,
|
| 1022 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1023 |
+
)
|
| 1024 |
+
def call(
|
| 1025 |
+
self,
|
| 1026 |
+
input_ids: TFModelInputType | None = None,
|
| 1027 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 1028 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1029 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1030 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1031 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1032 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 1033 |
+
start_positions: np.ndarray | tf.Tensor | None = None,
|
| 1034 |
+
end_positions: np.ndarray | tf.Tensor | None = None,
|
| 1035 |
+
output_attentions: Optional[bool] = None,
|
| 1036 |
+
output_hidden_states: Optional[bool] = None,
|
| 1037 |
+
return_dict: Optional[bool] = None,
|
| 1038 |
+
training: Optional[bool] = False,
|
| 1039 |
+
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
|
| 1040 |
+
r"""
|
| 1041 |
+
start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1042 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1043 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1044 |
+
are not taken into account for computing the loss.
|
| 1045 |
+
end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1046 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1047 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1048 |
+
are not taken into account for computing the loss.
|
| 1049 |
+
"""
|
| 1050 |
+
|
| 1051 |
+
transformer_outputs = self.transformer(
|
| 1052 |
+
input_ids=input_ids,
|
| 1053 |
+
past_key_values=past_key_values,
|
| 1054 |
+
attention_mask=attention_mask,
|
| 1055 |
+
token_type_ids=token_type_ids,
|
| 1056 |
+
position_ids=position_ids,
|
| 1057 |
+
head_mask=head_mask,
|
| 1058 |
+
inputs_embeds=inputs_embeds,
|
| 1059 |
+
output_attentions=output_attentions,
|
| 1060 |
+
output_hidden_states=output_hidden_states,
|
| 1061 |
+
return_dict=return_dict,
|
| 1062 |
+
training=training,
|
| 1063 |
+
)
|
| 1064 |
+
sequence_output = transformer_outputs[0]
|
| 1065 |
+
|
| 1066 |
+
logits = self.qa_outputs(sequence_output)
|
| 1067 |
+
start_logits, end_logits = tf.split(logits, 2, axis=-1)
|
| 1068 |
+
start_logits = tf.squeeze(start_logits, axis=-1)
|
| 1069 |
+
end_logits = tf.squeeze(end_logits, axis=-1)
|
| 1070 |
+
|
| 1071 |
+
loss = None
|
| 1072 |
+
if start_positions is not None and end_positions is not None:
|
| 1073 |
+
labels = {"start_position": start_positions}
|
| 1074 |
+
labels["end_position"] = end_positions
|
| 1075 |
+
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
|
| 1076 |
+
|
| 1077 |
+
if not return_dict:
|
| 1078 |
+
output = (start_logits, end_logits) + transformer_outputs[2:]
|
| 1079 |
+
return ((loss,) + output) if loss is not None else output
|
| 1080 |
+
|
| 1081 |
+
return TFQuestionAnsweringModelOutput(
|
| 1082 |
+
loss=loss,
|
| 1083 |
+
start_logits=start_logits,
|
| 1084 |
+
end_logits=end_logits,
|
| 1085 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1086 |
+
attentions=transformer_outputs.attentions,
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
def build(self, input_shape=None):
|
| 1090 |
+
if self.built:
|
| 1091 |
+
return
|
| 1092 |
+
self.built = True
|
| 1093 |
+
if getattr(self, "transformer", None) is not None:
|
| 1094 |
+
with tf.name_scope(self.transformer.name):
|
| 1095 |
+
self.transformer.build(None)
|
| 1096 |
+
if getattr(self, "qa_outputs", None) is not None:
|
| 1097 |
+
with tf.name_scope(self.qa_outputs.name):
|
| 1098 |
+
self.qa_outputs.build([None, None, self.config.hidden_size])
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
__all__ = [
|
| 1102 |
+
"TFGPTJForCausalLM",
|
| 1103 |
+
"TFGPTJForQuestionAnswering",
|
| 1104 |
+
"TFGPTJForSequenceClassification",
|
| 1105 |
+
"TFGPTJModel",
|
| 1106 |
+
"TFGPTJPreTrainedModel",
|
| 1107 |
+
]
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Microsoft and The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_phimoe import *
|
| 22 |
+
from .modeling_phimoe import *
|
| 23 |
+
|
| 24 |
+
else:
|
| 25 |
+
import sys
|
| 26 |
+
|
| 27 |
+
_file = globals()["__file__"]
|
| 28 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (536 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/configuration_phimoe.cpython-310.pyc
ADDED
|
Binary file (8.44 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/__pycache__/modeling_phimoe.cpython-310.pyc
ADDED
|
Binary file (47 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/configuration_phimoe.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""PyTorch Phi-MoE model."""
|
| 17 |
+
|
| 18 |
+
from ...configuration_utils import PretrainedConfig
|
| 19 |
+
from ...modeling_rope_utils import rope_config_validation
|
| 20 |
+
from ...utils import logging
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PhimoeConfig(PretrainedConfig):
|
| 27 |
+
r"""
|
| 28 |
+
This is the configuration class to store the configuration of a [`PhimoeModel`]. It is used to instantiate a Phi-moe
|
| 29 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 30 |
+
defaults will yield a similar configuration to that of the
|
| 31 |
+
[microsoft/Phi-3.5-MoE-instruct](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct).
|
| 32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 33 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 34 |
+
Args:
|
| 35 |
+
vocab_size (`int`, *optional*, defaults to 32064):
|
| 36 |
+
Vocabulary size of the Phimoe model. Defines the number of different tokens that can be represented by the
|
| 37 |
+
`inputs_ids` passed when calling [`PhimoeModel`]
|
| 38 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 39 |
+
Dimension of the hidden representations.
|
| 40 |
+
intermediate_size (`int`, *optional*, defaults to 6400):
|
| 41 |
+
Dimension of the MLP representations.
|
| 42 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 43 |
+
Number of hidden layers in the Transformer encoder.
|
| 44 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 45 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 46 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
| 47 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 48 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 49 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 50 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 51 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 52 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
|
| 53 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 54 |
+
The non-linear activation function (function or string) in the decoder.
|
| 55 |
+
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
| 56 |
+
The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
|
| 57 |
+
allows sequence of up to 4096*32 tokens.
|
| 58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 60 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 61 |
+
The epsilon used by the rms normalization layers.
|
| 62 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 63 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 64 |
+
relevant if `config.is_decoder=True`.
|
| 65 |
+
pad_token_id (`int`, *optional*):
|
| 66 |
+
The id of the padding token.
|
| 67 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 68 |
+
The id of the "beginning-of-sequence" token.
|
| 69 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 70 |
+
The id of the "end-of-sequence" token.
|
| 71 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 72 |
+
Whether the model's input and output word embeddings should be tied.
|
| 73 |
+
rope_theta (`float`, *optional*, defaults to 1000000.0):
|
| 74 |
+
The base period of the RoPE embeddings.
|
| 75 |
+
rope_scaling (`dict`, *optional*):
|
| 76 |
+
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
|
| 77 |
+
contain the following keys: `type`, `short_factor`, `long_factor`, `short_mscale`, `long_mscale` and
|
| 78 |
+
`original_max_position_embeddings`. The `type` must be `longrope`, the `short_mscale` and `long_scale` must
|
| 79 |
+
be numbers, the `short_factor` and `long_factor` must be lists of numbers with the same length as half of
|
| 80 |
+
the attention head size and the `original_max_position_embeddings` must be an integer.
|
| 81 |
+
sliding_window (`int`, *optional*):
|
| 82 |
+
Sliding window attention window size. If not specified, will default to `262144`.
|
| 83 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 84 |
+
The dropout ratio for the attention probabilities.
|
| 85 |
+
num_experts_per_tok (`int`, *optional*, defaults to 2):
|
| 86 |
+
The number of experts to root per-token, can be also interpreted as the `top-p` routing
|
| 87 |
+
parameter
|
| 88 |
+
num_local_experts (`int`, *optional*, defaults to 16):
|
| 89 |
+
Number of experts per Sparse MLP layer.
|
| 90 |
+
output_router_logits (`bool`, *optional*, defaults to `False`):
|
| 91 |
+
Whether or not the router logits should be returned by the model. Enabeling this will also
|
| 92 |
+
allow the model to output the auxiliary loss. See [here]() for more details
|
| 93 |
+
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
|
| 94 |
+
The aux loss factor for the total loss.
|
| 95 |
+
router_jitter_noise (`float`, *optional*, defaults to 0.01):
|
| 96 |
+
Amount of noise to add to the router.
|
| 97 |
+
input_jitter_noise (`float`, *optional*, defaults to 0.0): Input jitter noise
|
| 98 |
+
attention_bias (`bool`, *optional*, defaults to `False`): Attention bias
|
| 99 |
+
lm_head_bias (`bool`, *optional*, defaults to `False`): LM head bias
|
| 100 |
+
|
| 101 |
+
Example:
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
>>> from transformers import PhimoeModel, PhimoeConfig
|
| 105 |
+
>>> # Initializing a Phi-3 style configuration
|
| 106 |
+
>>> configuration = PhimoeConfig.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
|
| 107 |
+
>>> # Initializing a model from the configuration
|
| 108 |
+
>>> model = PhimoeModel(configuration)
|
| 109 |
+
>>> # Accessing the model configuration
|
| 110 |
+
>>> configuration = model.config
|
| 111 |
+
```"""
|
| 112 |
+
|
| 113 |
+
model_type = "phimoe"
|
| 114 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 115 |
+
|
| 116 |
+
def __init__(
|
| 117 |
+
self,
|
| 118 |
+
vocab_size=32064,
|
| 119 |
+
hidden_size=4096,
|
| 120 |
+
intermediate_size=6400,
|
| 121 |
+
num_hidden_layers=32,
|
| 122 |
+
num_attention_heads=32,
|
| 123 |
+
num_key_value_heads=8,
|
| 124 |
+
hidden_act="silu",
|
| 125 |
+
max_position_embeddings=4096 * 32,
|
| 126 |
+
initializer_range=0.02,
|
| 127 |
+
rms_norm_eps=1e-5,
|
| 128 |
+
use_cache=True,
|
| 129 |
+
pad_token_id=None,
|
| 130 |
+
bos_token_id=1,
|
| 131 |
+
eos_token_id=2,
|
| 132 |
+
tie_word_embeddings=False,
|
| 133 |
+
rope_theta=1e6,
|
| 134 |
+
rope_scaling=None,
|
| 135 |
+
sliding_window=None,
|
| 136 |
+
attention_dropout=0.0,
|
| 137 |
+
num_experts_per_tok=2,
|
| 138 |
+
num_local_experts=16,
|
| 139 |
+
output_router_logits=False,
|
| 140 |
+
router_aux_loss_coef=0.001,
|
| 141 |
+
router_jitter_noise=0.01,
|
| 142 |
+
input_jitter_noise=0.0,
|
| 143 |
+
attention_bias=False,
|
| 144 |
+
lm_head_bias=False,
|
| 145 |
+
**kwargs,
|
| 146 |
+
):
|
| 147 |
+
self.vocab_size = vocab_size
|
| 148 |
+
self.max_position_embeddings = max_position_embeddings
|
| 149 |
+
self.hidden_size = hidden_size
|
| 150 |
+
self.intermediate_size = intermediate_size
|
| 151 |
+
self.num_hidden_layers = num_hidden_layers
|
| 152 |
+
self.num_attention_heads = num_attention_heads
|
| 153 |
+
self.sliding_window = sliding_window
|
| 154 |
+
self.attention_bias = attention_bias
|
| 155 |
+
self.lm_head_bias = lm_head_bias
|
| 156 |
+
# for backward compatibility
|
| 157 |
+
if num_key_value_heads is None:
|
| 158 |
+
num_key_value_heads = num_attention_heads
|
| 159 |
+
|
| 160 |
+
self.num_key_value_heads = num_key_value_heads
|
| 161 |
+
self.hidden_act = hidden_act
|
| 162 |
+
self.initializer_range = initializer_range
|
| 163 |
+
self.rms_norm_eps = rms_norm_eps
|
| 164 |
+
self.use_cache = use_cache
|
| 165 |
+
self.rope_theta = rope_theta
|
| 166 |
+
self.attention_dropout = attention_dropout
|
| 167 |
+
|
| 168 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 169 |
+
self.num_local_experts = num_local_experts
|
| 170 |
+
self.output_router_logits = output_router_logits
|
| 171 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
| 172 |
+
self.router_jitter_noise = router_jitter_noise
|
| 173 |
+
self.input_jitter_noise = input_jitter_noise
|
| 174 |
+
|
| 175 |
+
self.rope_scaling = rope_scaling
|
| 176 |
+
if isinstance(self.rope_scaling, dict):
|
| 177 |
+
if "rope_type" not in self.rope_scaling:
|
| 178 |
+
self.rope_scaling["rope_type"] = self.rope_scaling.get("type", None)
|
| 179 |
+
if "original_max_position_embeddings" in self.rope_scaling:
|
| 180 |
+
self.original_max_position_embeddings = self.rope_scaling["original_max_position_embeddings"]
|
| 181 |
+
rope_scaling_short_mscale = self.rope_scaling.get("short_mscale", None)
|
| 182 |
+
rope_scaling_long_mscale = self.rope_scaling.get("long_mscale", None)
|
| 183 |
+
if not isinstance(rope_scaling_short_mscale, (int, float)):
|
| 184 |
+
raise ValueError(
|
| 185 |
+
f"`rope_scaling`'s short_mscale field must be a number, got {rope_scaling_short_mscale}"
|
| 186 |
+
)
|
| 187 |
+
if not isinstance(rope_scaling_long_mscale, (int, float)):
|
| 188 |
+
raise ValueError(
|
| 189 |
+
f"`rope_scaling`'s long_mscale field must be a number, got {rope_scaling_long_mscale}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
rope_config_validation(self)
|
| 193 |
+
|
| 194 |
+
super().__init__(
|
| 195 |
+
pad_token_id=pad_token_id,
|
| 196 |
+
bos_token_id=bos_token_id,
|
| 197 |
+
eos_token_id=eos_token_id,
|
| 198 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 199 |
+
**kwargs,
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
__all__ = ["PhimoeConfig"]
|
phi4/lib/python3.10/site-packages/transformers/models/phimoe/modeling_phimoe.py
ADDED
|
@@ -0,0 +1,1631 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""PyTorch Phimoe model."""
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from typing import List, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
|
| 25 |
+
from ...activations import ACT2FN
|
| 26 |
+
from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
|
| 27 |
+
from ...generation import GenerationMixin
|
| 28 |
+
from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask
|
| 29 |
+
from ...modeling_outputs import (
|
| 30 |
+
MoeCausalLMOutputWithPast,
|
| 31 |
+
MoeModelOutputWithPast,
|
| 32 |
+
SequenceClassifierOutputWithPast,
|
| 33 |
+
)
|
| 34 |
+
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
| 35 |
+
from ...modeling_utils import PreTrainedModel
|
| 36 |
+
from ...utils import (
|
| 37 |
+
add_start_docstrings,
|
| 38 |
+
add_start_docstrings_to_model_forward,
|
| 39 |
+
is_flash_attn_2_available,
|
| 40 |
+
logging,
|
| 41 |
+
replace_return_docstrings,
|
| 42 |
+
)
|
| 43 |
+
from ...utils.import_utils import is_torch_fx_available
|
| 44 |
+
from .configuration_phimoe import PhimoeConfig
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if is_flash_attn_2_available():
|
| 48 |
+
from ...modeling_flash_attention_utils import _flash_attention_forward
|
| 49 |
+
|
| 50 |
+
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
| 51 |
+
# It means that the function will not be traced through and simply appear as a node in the graph.
|
| 52 |
+
if is_torch_fx_available():
|
| 53 |
+
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__)
|
| 57 |
+
|
| 58 |
+
_CONFIG_FOR_DOC = "PhimoeConfig"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func
|
| 62 |
+
def load_balancing_loss_func(
|
| 63 |
+
gate_logits: Union[torch.Tensor, Tuple[torch.Tensor], None],
|
| 64 |
+
num_experts: Optional[int] = None,
|
| 65 |
+
top_k=2,
|
| 66 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 67 |
+
) -> Union[torch.Tensor, int]:
|
| 68 |
+
r"""
|
| 69 |
+
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
| 70 |
+
|
| 71 |
+
See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
|
| 72 |
+
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
|
| 73 |
+
experts is too unbalanced.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
gate_logits:
|
| 77 |
+
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
|
| 78 |
+
shape [batch_size X sequence_length, num_experts].
|
| 79 |
+
num_experts:
|
| 80 |
+
Number of experts
|
| 81 |
+
top_k:
|
| 82 |
+
The number of experts to route per-token, can be also interpreted as the `top-k` routing
|
| 83 |
+
parameter.
|
| 84 |
+
attention_mask (`torch.Tensor`, *optional*):
|
| 85 |
+
The attention_mask used in forward function
|
| 86 |
+
shape [batch_size X sequence_length] if not None.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
The auxiliary loss.
|
| 90 |
+
"""
|
| 91 |
+
if gate_logits is None or not isinstance(gate_logits, tuple):
|
| 92 |
+
return 0
|
| 93 |
+
|
| 94 |
+
if isinstance(gate_logits, tuple):
|
| 95 |
+
compute_device = gate_logits[0].device
|
| 96 |
+
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
|
| 97 |
+
|
| 98 |
+
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
|
| 99 |
+
|
| 100 |
+
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
| 101 |
+
|
| 102 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
|
| 103 |
+
|
| 104 |
+
if attention_mask is None:
|
| 105 |
+
# Compute the percentage of tokens routed to each experts
|
| 106 |
+
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
|
| 107 |
+
|
| 108 |
+
# Compute the average probability of routing to these experts
|
| 109 |
+
router_prob_per_expert = torch.mean(routing_weights, dim=0)
|
| 110 |
+
else:
|
| 111 |
+
batch_size, sequence_length = attention_mask.shape
|
| 112 |
+
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
|
| 113 |
+
|
| 114 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
|
| 115 |
+
expert_attention_mask = (
|
| 116 |
+
attention_mask[None, :, :, None, None]
|
| 117 |
+
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
|
| 118 |
+
.reshape(-1, top_k, num_experts)
|
| 119 |
+
.to(compute_device)
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Compute the percentage of tokens routed to each experts
|
| 123 |
+
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
|
| 124 |
+
expert_attention_mask, dim=0
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
|
| 128 |
+
router_per_expert_attention_mask = (
|
| 129 |
+
attention_mask[None, :, :, None]
|
| 130 |
+
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
|
| 131 |
+
.reshape(-1, num_experts)
|
| 132 |
+
.to(compute_device)
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Compute the average probability of routing to these experts
|
| 136 |
+
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
|
| 137 |
+
router_per_expert_attention_mask, dim=0
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
|
| 141 |
+
return overall_loss * num_experts
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class PhimoeRotaryEmbedding(nn.Module):
|
| 145 |
+
def __init__(
|
| 146 |
+
self,
|
| 147 |
+
config: Optional[PhimoeConfig] = None,
|
| 148 |
+
):
|
| 149 |
+
super().__init__()
|
| 150 |
+
|
| 151 |
+
self.config = config
|
| 152 |
+
if config.rope_scaling is not None:
|
| 153 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
| 154 |
+
self.short_mscale = config.rope_scaling.get("short_mscale")
|
| 155 |
+
self.long_mscale = config.rope_scaling.get("long_mscale")
|
| 156 |
+
else:
|
| 157 |
+
self.rope_type = "default"
|
| 158 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 159 |
+
|
| 160 |
+
def forward(self, x, seq_len=None):
|
| 161 |
+
mscale = None
|
| 162 |
+
if self.config.rope_scaling and seq_len:
|
| 163 |
+
mscale = (
|
| 164 |
+
self.long_mscale
|
| 165 |
+
if seq_len > self.config.rope_scaling["original_max_position_embeddings"]
|
| 166 |
+
else self.short_mscale
|
| 167 |
+
)
|
| 168 |
+
inv_freq, attention_scaling = self.rope_init_fn(self.config, x.device, seq_len)
|
| 169 |
+
mscale = attention_scaling if mscale is None else mscale
|
| 170 |
+
t = torch.arange(seq_len, device=x.device, dtype=torch.float32)
|
| 171 |
+
freqs = torch.outer(t, inv_freq)
|
| 172 |
+
|
| 173 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 174 |
+
return (emb.cos() * mscale).to(x.dtype), (emb.sin() * mscale).to(x.dtype)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 178 |
+
def rotate_half(x):
|
| 179 |
+
"""Rotates half the hidden dims of the input."""
|
| 180 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 181 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 182 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 186 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
q (`torch.Tensor`): The query tensor.
|
| 190 |
+
k (`torch.Tensor`): The key tensor.
|
| 191 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 192 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 193 |
+
position_ids (`torch.Tensor`):
|
| 194 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 195 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 196 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 197 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 198 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 199 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 200 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 201 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 202 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 203 |
+
Returns:
|
| 204 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 205 |
+
"""
|
| 206 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 207 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 208 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 209 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 210 |
+
return q_embed, k_embed
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 214 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 215 |
+
"""
|
| 216 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 217 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 218 |
+
"""
|
| 219 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 220 |
+
if n_rep == 1:
|
| 221 |
+
return hidden_states
|
| 222 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 223 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class PhimoeAttention(nn.Module):
|
| 227 |
+
"""
|
| 228 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 229 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
def __init__(self, config: PhimoeConfig, layer_idx: Optional[int] = None):
|
| 233 |
+
super().__init__()
|
| 234 |
+
self.config = config
|
| 235 |
+
self.layer_idx = layer_idx
|
| 236 |
+
if layer_idx is None:
|
| 237 |
+
logger.warning_once(
|
| 238 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 239 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 240 |
+
"when creating this class."
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
self.hidden_size = config.hidden_size
|
| 244 |
+
self.num_heads = config.num_attention_heads
|
| 245 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 246 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 247 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 248 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 249 |
+
self.rope_theta = config.rope_theta
|
| 250 |
+
self.is_causal = True
|
| 251 |
+
self.attention_dropout = config.attention_dropout
|
| 252 |
+
|
| 253 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 254 |
+
raise ValueError(
|
| 255 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 256 |
+
f" and `num_heads`: {self.num_heads})."
|
| 257 |
+
)
|
| 258 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=self.config.attention_bias)
|
| 259 |
+
self.k_proj = nn.Linear(
|
| 260 |
+
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.config.attention_bias
|
| 261 |
+
)
|
| 262 |
+
self.v_proj = nn.Linear(
|
| 263 |
+
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.config.attention_bias
|
| 264 |
+
)
|
| 265 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=self.config.attention_bias)
|
| 266 |
+
|
| 267 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 268 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 269 |
+
|
| 270 |
+
def forward(
|
| 271 |
+
self,
|
| 272 |
+
hidden_states: torch.Tensor,
|
| 273 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 274 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 275 |
+
past_key_value: Optional[Cache] = None,
|
| 276 |
+
output_attentions: bool = False,
|
| 277 |
+
use_cache: bool = False,
|
| 278 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 279 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 280 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 281 |
+
bsz, q_len, _ = hidden_states.size()
|
| 282 |
+
|
| 283 |
+
query_states = self.q_proj(hidden_states)
|
| 284 |
+
key_states = self.k_proj(hidden_states)
|
| 285 |
+
value_states = self.v_proj(hidden_states)
|
| 286 |
+
|
| 287 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 288 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 289 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 290 |
+
|
| 291 |
+
cos, sin = position_embeddings
|
| 292 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 293 |
+
|
| 294 |
+
if past_key_value is not None:
|
| 295 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
| 296 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 297 |
+
|
| 298 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 299 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 300 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 301 |
+
|
| 302 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 303 |
+
|
| 304 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
| 305 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 306 |
+
attn_weights = attn_weights + causal_mask
|
| 307 |
+
|
| 308 |
+
# upcast attention to fp32
|
| 309 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 310 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 311 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 312 |
+
|
| 313 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 314 |
+
raise ValueError(
|
| 315 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 316 |
+
f" {attn_output.size()}"
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 320 |
+
|
| 321 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 322 |
+
|
| 323 |
+
attn_output = self.o_proj(attn_output)
|
| 324 |
+
|
| 325 |
+
if not output_attentions:
|
| 326 |
+
attn_weights = None
|
| 327 |
+
|
| 328 |
+
return attn_output, attn_weights, past_key_value
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class PhimoeFlashAttention2(PhimoeAttention):
|
| 332 |
+
"""
|
| 333 |
+
Phimoe flash attention module. This module inherits from `PhimoeAttention` as the weights of the module stays
|
| 334 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 335 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
def forward(
|
| 339 |
+
self,
|
| 340 |
+
hidden_states: torch.Tensor,
|
| 341 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 342 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 343 |
+
past_key_value: Optional[Cache] = None,
|
| 344 |
+
output_attentions: bool = False,
|
| 345 |
+
use_cache: bool = False,
|
| 346 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 347 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 348 |
+
):
|
| 349 |
+
bsz, q_len, _ = hidden_states.size()
|
| 350 |
+
|
| 351 |
+
query_states = self.q_proj(hidden_states)
|
| 352 |
+
key_states = self.k_proj(hidden_states)
|
| 353 |
+
value_states = self.v_proj(hidden_states)
|
| 354 |
+
|
| 355 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 356 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 357 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 358 |
+
|
| 359 |
+
kv_seq_len = key_states.shape[-2]
|
| 360 |
+
if past_key_value is not None:
|
| 361 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 362 |
+
|
| 363 |
+
cos, sin = position_embeddings
|
| 364 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 365 |
+
|
| 366 |
+
if past_key_value is not None:
|
| 367 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
| 368 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 369 |
+
|
| 370 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 371 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 372 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 373 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 374 |
+
|
| 375 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 376 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 377 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 378 |
+
input_dtype = query_states.dtype
|
| 379 |
+
if input_dtype == torch.float32:
|
| 380 |
+
if torch.is_autocast_enabled():
|
| 381 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 382 |
+
# Handle the case where the model is quantized
|
| 383 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 384 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 385 |
+
else:
|
| 386 |
+
target_dtype = self.q_proj.weight.dtype
|
| 387 |
+
|
| 388 |
+
logger.warning_once(
|
| 389 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 390 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 391 |
+
f" {target_dtype}."
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
query_states = query_states.to(target_dtype)
|
| 395 |
+
key_states = key_states.to(target_dtype)
|
| 396 |
+
value_states = value_states.to(target_dtype)
|
| 397 |
+
|
| 398 |
+
# Reashape to the expected shape for Flash Attention
|
| 399 |
+
query_states = query_states.transpose(1, 2)
|
| 400 |
+
key_states = key_states.transpose(1, 2)
|
| 401 |
+
value_states = value_states.transpose(1, 2)
|
| 402 |
+
|
| 403 |
+
attn_output = _flash_attention_forward(
|
| 404 |
+
query_states,
|
| 405 |
+
key_states,
|
| 406 |
+
value_states,
|
| 407 |
+
attention_mask,
|
| 408 |
+
q_len,
|
| 409 |
+
position_ids=position_ids,
|
| 410 |
+
dropout=dropout_rate,
|
| 411 |
+
sliding_window=getattr(self.config, "sliding_window", None),
|
| 412 |
+
is_causal=self.is_causal,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 416 |
+
attn_output = self.o_proj(attn_output)
|
| 417 |
+
|
| 418 |
+
if not output_attentions:
|
| 419 |
+
attn_weights = None
|
| 420 |
+
|
| 421 |
+
return attn_output, attn_weights, past_key_value
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class PhimoeSdpaAttention(PhimoeAttention):
|
| 425 |
+
"""
|
| 426 |
+
Phimoe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 427 |
+
`PhimoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 428 |
+
SDPA API.
|
| 429 |
+
"""
|
| 430 |
+
|
| 431 |
+
# Adapted from PhimoeAttention.forward
|
| 432 |
+
def forward(
|
| 433 |
+
self,
|
| 434 |
+
hidden_states: torch.Tensor,
|
| 435 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 436 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 437 |
+
past_key_value: Optional[Cache] = None,
|
| 438 |
+
output_attentions: bool = False,
|
| 439 |
+
use_cache: bool = False,
|
| 440 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 441 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 442 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 443 |
+
if output_attentions:
|
| 444 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 445 |
+
logger.warning_once(
|
| 446 |
+
"PhimoeModel is using PhimoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 447 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 448 |
+
)
|
| 449 |
+
return super().forward(
|
| 450 |
+
hidden_states=hidden_states,
|
| 451 |
+
attention_mask=attention_mask,
|
| 452 |
+
position_ids=position_ids,
|
| 453 |
+
past_key_value=past_key_value,
|
| 454 |
+
output_attentions=output_attentions,
|
| 455 |
+
use_cache=use_cache,
|
| 456 |
+
position_embeddings=position_embeddings,
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
bsz, q_len, _ = hidden_states.size()
|
| 460 |
+
|
| 461 |
+
query_states = self.q_proj(hidden_states)
|
| 462 |
+
key_states = self.k_proj(hidden_states)
|
| 463 |
+
value_states = self.v_proj(hidden_states)
|
| 464 |
+
|
| 465 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 466 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 467 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 468 |
+
|
| 469 |
+
cos, sin = position_embeddings
|
| 470 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 471 |
+
|
| 472 |
+
if past_key_value is not None:
|
| 473 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
| 474 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 475 |
+
|
| 476 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 477 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 478 |
+
|
| 479 |
+
causal_mask = attention_mask
|
| 480 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
| 481 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 482 |
+
|
| 483 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 484 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 485 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 486 |
+
query_states = query_states.contiguous()
|
| 487 |
+
key_states = key_states.contiguous()
|
| 488 |
+
value_states = value_states.contiguous()
|
| 489 |
+
|
| 490 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
| 491 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
| 492 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 493 |
+
is_causal = True if causal_mask is None and q_len > 1 else False
|
| 494 |
+
|
| 495 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 496 |
+
query_states,
|
| 497 |
+
key_states,
|
| 498 |
+
value_states,
|
| 499 |
+
attn_mask=causal_mask,
|
| 500 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 501 |
+
is_causal=is_causal,
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 505 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 506 |
+
|
| 507 |
+
attn_output = self.o_proj(attn_output)
|
| 508 |
+
|
| 509 |
+
return attn_output, None, past_key_value
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
PHIMOE_ATTENTION_CLASSES = {
|
| 513 |
+
"eager": PhimoeAttention,
|
| 514 |
+
"flash_attention_2": PhimoeFlashAttention2,
|
| 515 |
+
"sdpa": PhimoeSdpaAttention,
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.MixtralBlockSparseTop2MLP with Mixtral->Phimoe
|
| 520 |
+
class PhimoeBlockSparseTop2MLP(nn.Module):
|
| 521 |
+
def __init__(self, config: PhimoeConfig):
|
| 522 |
+
super().__init__()
|
| 523 |
+
self.ffn_dim = config.intermediate_size
|
| 524 |
+
self.hidden_dim = config.hidden_size
|
| 525 |
+
|
| 526 |
+
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 527 |
+
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
|
| 528 |
+
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 529 |
+
|
| 530 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 531 |
+
|
| 532 |
+
def forward(self, hidden_states):
|
| 533 |
+
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
|
| 534 |
+
current_hidden_states = self.w2(current_hidden_states)
|
| 535 |
+
return current_hidden_states
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class MultiplierProcessor(torch.autograd.Function):
|
| 539 |
+
@staticmethod
|
| 540 |
+
def forward(
|
| 541 |
+
ctx,
|
| 542 |
+
scores: torch.Tensor,
|
| 543 |
+
multiplier: torch.Tensor,
|
| 544 |
+
selected_experts: torch.Tensor,
|
| 545 |
+
masked_gates: torch.Tensor,
|
| 546 |
+
mask_for_one: torch.Tensor,
|
| 547 |
+
):
|
| 548 |
+
"""
|
| 549 |
+
Forward pass for the custom autograd function.
|
| 550 |
+
|
| 551 |
+
Args:
|
| 552 |
+
ctx: Context object to save information for backward computation.
|
| 553 |
+
scores (torch.Tensor): Input scores tensor.
|
| 554 |
+
multiplier (torch.Tensor): Multiplier tensor.
|
| 555 |
+
selected_experts (torch.Tensor): Tensor of selected experts.
|
| 556 |
+
masked_gates (torch.Tensor): Masked gates tensor.
|
| 557 |
+
mask_for_one (torch.Tensor): Mask for one tensor.
|
| 558 |
+
|
| 559 |
+
Returns:
|
| 560 |
+
torch.Tensor: Result of the forward pass.
|
| 561 |
+
"""
|
| 562 |
+
ctx.save_for_backward(multiplier, selected_experts, masked_gates)
|
| 563 |
+
return multiplier * mask_for_one
|
| 564 |
+
|
| 565 |
+
@staticmethod
|
| 566 |
+
def backward(
|
| 567 |
+
ctx,
|
| 568 |
+
grad_at_output: torch.Tensor,
|
| 569 |
+
):
|
| 570 |
+
"""
|
| 571 |
+
Backward pass for the custom autograd function.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
ctx: Context object with saved tensors from the forward pass.
|
| 575 |
+
grad_at_output (torch.Tensor): Gradient at the output.
|
| 576 |
+
|
| 577 |
+
Returns:
|
| 578 |
+
Tuple[torch.Tensor, None, None, None, None]: Gradients for the inputs.
|
| 579 |
+
"""
|
| 580 |
+
multiplier, selected_experts, masked_gates = ctx.saved_tensors
|
| 581 |
+
|
| 582 |
+
grad_at_output = grad_at_output * multiplier
|
| 583 |
+
|
| 584 |
+
grad_at_scores_expanded = masked_gates * grad_at_output.mul(-1)
|
| 585 |
+
grad_at_scores_expanded.scatter_add_(
|
| 586 |
+
dim=-1,
|
| 587 |
+
index=selected_experts,
|
| 588 |
+
src=grad_at_output,
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
return (
|
| 592 |
+
grad_at_scores_expanded,
|
| 593 |
+
None,
|
| 594 |
+
None,
|
| 595 |
+
None,
|
| 596 |
+
None,
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
def sparsemixer(scores, jitter_eps, training, top_k=2):
|
| 601 |
+
"""
|
| 602 |
+
Sparse mixer function to select top-k experts and compute multipliers.
|
| 603 |
+
Based on the paper: https://arxiv.org/pdf/2409.12136
|
| 604 |
+
We first replace the TopK(·) function as random sampling of discrete variables
|
| 605 |
+
in model training. Then, following Liu et al. (2023a) and Liu et al. (2023b), we apply Heun's
|
| 606 |
+
third order method to approximate the expert routing gradient and construct a modified
|
| 607 |
+
back-propagation to give a mathematically sound gradient estimation for expert routing.
|
| 608 |
+
|
| 609 |
+
Args:
|
| 610 |
+
scores (torch.Tensor): Input scores tensor.
|
| 611 |
+
jitter_eps (float): Jitter epsilon for numerical stability.
|
| 612 |
+
training (bool): Flag indicating if the model is in training mode.
|
| 613 |
+
top_k (int): Number of top experts to select.
|
| 614 |
+
|
| 615 |
+
Returns:
|
| 616 |
+
Tuple[torch.Tensor, torch.Tensor]: Multiplier and selected experts tensors.
|
| 617 |
+
"""
|
| 618 |
+
if top_k != 2:
|
| 619 |
+
raise ValueError("top_k must be equal to 2")
|
| 620 |
+
|
| 621 |
+
# first expert
|
| 622 |
+
|
| 623 |
+
with torch.no_grad():
|
| 624 |
+
# Compute mask for sparsity
|
| 625 |
+
mask_logits_threshold, max_ind = scores.max(dim=-1, keepdim=True)
|
| 626 |
+
factor = scores.abs().clamp(min=mask_logits_threshold)
|
| 627 |
+
mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps)
|
| 628 |
+
|
| 629 |
+
# Apply mask
|
| 630 |
+
masked_gates = scores.masked_fill(mask_logits_threshold, float("-inf"))
|
| 631 |
+
if training:
|
| 632 |
+
selected_experts = (
|
| 633 |
+
(
|
| 634 |
+
masked_gates
|
| 635 |
+
- torch.empty_like(masked_gates, memory_format=torch.legacy_contiguous_format).exponential_().log()
|
| 636 |
+
)
|
| 637 |
+
.max(dim=-1)[1]
|
| 638 |
+
.unsqueeze(-1)
|
| 639 |
+
) # Gumbel sampling, more robust than the multinomial method
|
| 640 |
+
else:
|
| 641 |
+
selected_experts = max_ind
|
| 642 |
+
|
| 643 |
+
# Compute scores for gradients
|
| 644 |
+
masked_gates = torch.softmax(masked_gates, dim=-1)
|
| 645 |
+
multiplier_o = masked_gates.gather(dim=-1, index=selected_experts)
|
| 646 |
+
|
| 647 |
+
if training:
|
| 648 |
+
# Compute midpoint mask
|
| 649 |
+
max_scores, max_ind = masked_gates.max(dim=-1, keepdim=True)
|
| 650 |
+
mask_for_one = torch.logical_or(
|
| 651 |
+
selected_experts == max_ind,
|
| 652 |
+
torch.rand_like(max_scores) > 0.75, # Heun's third-order method
|
| 653 |
+
)
|
| 654 |
+
# 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5
|
| 655 |
+
mask_for_one = torch.add(0.3333, mask_for_one, alpha=0.6667).type_as(masked_gates)
|
| 656 |
+
|
| 657 |
+
multiplier = MultiplierProcessor.apply(
|
| 658 |
+
scores,
|
| 659 |
+
multiplier_o,
|
| 660 |
+
selected_experts,
|
| 661 |
+
masked_gates,
|
| 662 |
+
mask_for_one,
|
| 663 |
+
)
|
| 664 |
+
else:
|
| 665 |
+
multiplier = multiplier_o
|
| 666 |
+
|
| 667 |
+
# Masked out first expert
|
| 668 |
+
masked_scores = torch.scatter(
|
| 669 |
+
scores,
|
| 670 |
+
-1,
|
| 671 |
+
selected_experts,
|
| 672 |
+
float("-inf"),
|
| 673 |
+
)
|
| 674 |
+
with torch.no_grad():
|
| 675 |
+
# Compute mask for sparsity
|
| 676 |
+
mask_logits_threshold, max_ind = masked_scores.max(dim=-1, keepdim=True)
|
| 677 |
+
factor = scores.abs().clamp(min=mask_logits_threshold)
|
| 678 |
+
mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps)
|
| 679 |
+
|
| 680 |
+
# Apply mask
|
| 681 |
+
masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold, float("-inf"))
|
| 682 |
+
if training:
|
| 683 |
+
selected_experts_top2 = (
|
| 684 |
+
(
|
| 685 |
+
masked_gates_top2
|
| 686 |
+
- torch.empty_like(masked_gates_top2, memory_format=torch.legacy_contiguous_format)
|
| 687 |
+
.exponential_()
|
| 688 |
+
.log()
|
| 689 |
+
)
|
| 690 |
+
.max(dim=-1)[1]
|
| 691 |
+
.unsqueeze(-1)
|
| 692 |
+
) # Gumbel sampling, more robust than the multinomial method
|
| 693 |
+
else:
|
| 694 |
+
selected_experts_top2 = max_ind
|
| 695 |
+
# Compute scores for gradients
|
| 696 |
+
masked_gates_top2 = torch.softmax(masked_gates_top2, dim=-1)
|
| 697 |
+
multiplier_top2_o = masked_gates_top2.gather(dim=-1, index=selected_experts_top2)
|
| 698 |
+
|
| 699 |
+
if training:
|
| 700 |
+
# Compute midpoint mask
|
| 701 |
+
max_scores, max_ind = masked_gates_top2.max(dim=-1, keepdim=True)
|
| 702 |
+
mask_for_one_top2 = torch.logical_or(
|
| 703 |
+
selected_experts_top2 == max_ind,
|
| 704 |
+
torch.rand_like(max_scores).uniform_() > 0.75, # Heun's third-order method
|
| 705 |
+
)
|
| 706 |
+
# 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5
|
| 707 |
+
mask_for_one_top2 = torch.add(0.3333, mask_for_one_top2, alpha=0.6667).type_as(masked_gates_top2)
|
| 708 |
+
|
| 709 |
+
multiplier_top2 = MultiplierProcessor.apply(
|
| 710 |
+
scores,
|
| 711 |
+
multiplier_top2_o,
|
| 712 |
+
selected_experts_top2,
|
| 713 |
+
masked_gates_top2,
|
| 714 |
+
mask_for_one_top2,
|
| 715 |
+
)
|
| 716 |
+
else:
|
| 717 |
+
multiplier_top2 = multiplier_top2_o
|
| 718 |
+
|
| 719 |
+
multiplier = torch.concat((multiplier, multiplier_top2), dim=-1)
|
| 720 |
+
selected_experts = torch.concat((selected_experts, selected_experts_top2), dim=-1)
|
| 721 |
+
|
| 722 |
+
return (
|
| 723 |
+
multiplier,
|
| 724 |
+
selected_experts,
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
class PhimoeSparseMoeBlock(nn.Module):
|
| 729 |
+
"""
|
| 730 |
+
This implementation is
|
| 731 |
+
strictly equivalent to standard MoE with full capacity (no
|
| 732 |
+
dropped tokens). It's faster since it formulates MoE operations
|
| 733 |
+
in terms of block-sparse operations to accommodate imbalanced
|
| 734 |
+
assignments of tokens to experts, whereas standard MoE either
|
| 735 |
+
(1) drop tokens at the cost of reduced performance or (2) set
|
| 736 |
+
capacity factor to number of experts and thus waste computation
|
| 737 |
+
and memory on padding.
|
| 738 |
+
"""
|
| 739 |
+
|
| 740 |
+
def __init__(self, config):
|
| 741 |
+
super().__init__()
|
| 742 |
+
self.hidden_dim = config.hidden_size
|
| 743 |
+
self.ffn_dim = config.intermediate_size
|
| 744 |
+
self.num_experts = config.num_local_experts
|
| 745 |
+
self.top_k = config.num_experts_per_tok
|
| 746 |
+
# gating
|
| 747 |
+
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
|
| 748 |
+
|
| 749 |
+
self.experts = nn.ModuleList([PhimoeBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
|
| 750 |
+
|
| 751 |
+
# Jitter parameters
|
| 752 |
+
self.router_jitter_noise = config.router_jitter_noise
|
| 753 |
+
self.input_jitter_noise = config.input_jitter_noise
|
| 754 |
+
|
| 755 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 756 |
+
""" """
|
| 757 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
| 758 |
+
if self.training and self.input_jitter_noise > 0:
|
| 759 |
+
hidden_states *= torch.empty_like(hidden_states).uniform_(
|
| 760 |
+
1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise
|
| 761 |
+
)
|
| 762 |
+
hidden_states = hidden_states.view(-1, hidden_dim)
|
| 763 |
+
router_logits = self.gate(hidden_states)
|
| 764 |
+
|
| 765 |
+
routing_weights, selected_experts = sparsemixer(
|
| 766 |
+
router_logits,
|
| 767 |
+
jitter_eps=self.router_jitter_noise,
|
| 768 |
+
training=self.training,
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
final_hidden_states = torch.zeros(
|
| 772 |
+
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
# One hot encode the selected experts to create an expert mask
|
| 776 |
+
# this will be used to easily index which expert is going to be sollicitated
|
| 777 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
|
| 778 |
+
|
| 779 |
+
# Loop over all available experts in the model and perform the computation on each expert
|
| 780 |
+
for expert_idx in range(self.num_experts):
|
| 781 |
+
expert_layer = self.experts[expert_idx]
|
| 782 |
+
idx, top_x = torch.where(expert_mask[expert_idx])
|
| 783 |
+
|
| 784 |
+
if top_x.shape[0] == 0:
|
| 785 |
+
continue
|
| 786 |
+
|
| 787 |
+
# Index the correct hidden states and compute the expert hidden state for
|
| 788 |
+
# the current expert. We need to make sure to multiply the output hidden
|
| 789 |
+
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
|
| 790 |
+
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
|
| 791 |
+
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
|
| 792 |
+
|
| 793 |
+
# However `index_add_` only support torch tensors for indexing so we'll use
|
| 794 |
+
# the `top_x` tensor here.
|
| 795 |
+
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
|
| 796 |
+
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
|
| 797 |
+
return final_hidden_states, router_logits
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
class PhimoeDecoderLayer(nn.Module):
|
| 801 |
+
def __init__(self, config: PhimoeConfig, layer_idx: int):
|
| 802 |
+
super().__init__()
|
| 803 |
+
self.hidden_size = config.hidden_size
|
| 804 |
+
|
| 805 |
+
self.self_attn = PHIMOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 806 |
+
|
| 807 |
+
self.block_sparse_moe = PhimoeSparseMoeBlock(config)
|
| 808 |
+
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True)
|
| 809 |
+
self.post_attention_layernorm = nn.LayerNorm(
|
| 810 |
+
config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True
|
| 811 |
+
)
|
| 812 |
+
|
| 813 |
+
def forward(
|
| 814 |
+
self,
|
| 815 |
+
hidden_states: torch.Tensor,
|
| 816 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 817 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 818 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 819 |
+
output_attentions: Optional[bool] = False,
|
| 820 |
+
output_router_logits: Optional[bool] = False,
|
| 821 |
+
use_cache: Optional[bool] = False,
|
| 822 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 823 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 824 |
+
**kwargs,
|
| 825 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 826 |
+
"""
|
| 827 |
+
Args:
|
| 828 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 829 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 830 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 831 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 832 |
+
output_attentions (`bool`, *optional*):
|
| 833 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 834 |
+
returned tensors for more detail.
|
| 835 |
+
output_router_logits (`bool`, *optional*):
|
| 836 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
| 837 |
+
should not be returned during inference.
|
| 838 |
+
use_cache (`bool`, *optional*):
|
| 839 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 840 |
+
(see `past_key_values`).
|
| 841 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 842 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 843 |
+
kwargs (`dict`, *optional*):
|
| 844 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
| 845 |
+
into the model
|
| 846 |
+
"""
|
| 847 |
+
|
| 848 |
+
residual = hidden_states
|
| 849 |
+
|
| 850 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 851 |
+
|
| 852 |
+
# Self Attention
|
| 853 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 854 |
+
hidden_states=hidden_states,
|
| 855 |
+
attention_mask=attention_mask,
|
| 856 |
+
position_ids=position_ids,
|
| 857 |
+
past_key_value=past_key_value,
|
| 858 |
+
output_attentions=output_attentions,
|
| 859 |
+
use_cache=use_cache,
|
| 860 |
+
cache_position=cache_position,
|
| 861 |
+
position_embeddings=position_embeddings,
|
| 862 |
+
)
|
| 863 |
+
hidden_states = residual + hidden_states
|
| 864 |
+
|
| 865 |
+
# Fully Connected
|
| 866 |
+
residual = hidden_states
|
| 867 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 868 |
+
hidden_states, router_logits = self.block_sparse_moe(hidden_states)
|
| 869 |
+
hidden_states = residual + hidden_states
|
| 870 |
+
|
| 871 |
+
outputs = (hidden_states,)
|
| 872 |
+
|
| 873 |
+
if output_attentions:
|
| 874 |
+
outputs += (self_attn_weights,)
|
| 875 |
+
|
| 876 |
+
if use_cache:
|
| 877 |
+
outputs += (present_key_value,)
|
| 878 |
+
|
| 879 |
+
if output_router_logits:
|
| 880 |
+
outputs += (router_logits,)
|
| 881 |
+
|
| 882 |
+
return outputs
|
| 883 |
+
|
| 884 |
+
|
| 885 |
+
PHIMOE_START_DOCSTRING = r"""
|
| 886 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 887 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 888 |
+
etc.)
|
| 889 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 890 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 891 |
+
and behavior.
|
| 892 |
+
Parameters:
|
| 893 |
+
config ([`PhimoeConfig`]):
|
| 894 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 895 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 896 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 897 |
+
"""
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
@add_start_docstrings(
|
| 901 |
+
"The bare Phimoe Model outputting raw hidden-states without any specific head on top.",
|
| 902 |
+
PHIMOE_START_DOCSTRING,
|
| 903 |
+
)
|
| 904 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.MixtralPreTrainedModel with Mixtral->Phimoe
|
| 905 |
+
class PhimoePreTrainedModel(PreTrainedModel):
|
| 906 |
+
config_class = PhimoeConfig
|
| 907 |
+
base_model_prefix = "model"
|
| 908 |
+
supports_gradient_checkpointing = True
|
| 909 |
+
_no_split_modules = ["PhimoeDecoderLayer"]
|
| 910 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 911 |
+
_supports_flash_attn_2 = True
|
| 912 |
+
_supports_sdpa = True
|
| 913 |
+
_supports_flex_attn = True
|
| 914 |
+
_supports_cache_class = True
|
| 915 |
+
_supports_quantized_cache = True
|
| 916 |
+
_supports_static_cache = True
|
| 917 |
+
|
| 918 |
+
def _init_weights(self, module):
|
| 919 |
+
std = self.config.initializer_range
|
| 920 |
+
if isinstance(module, nn.Linear):
|
| 921 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 922 |
+
if module.bias is not None:
|
| 923 |
+
module.bias.data.zero_()
|
| 924 |
+
elif isinstance(module, nn.Embedding):
|
| 925 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 926 |
+
if module.padding_idx is not None:
|
| 927 |
+
module.weight.data[module.padding_idx].zero_()
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
PHIMOE_INPUTS_DOCSTRING = r"""
|
| 931 |
+
Args:
|
| 932 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 933 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 934 |
+
it.
|
| 935 |
+
|
| 936 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 937 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 938 |
+
|
| 939 |
+
[What are input IDs?](../glossary#input-ids)
|
| 940 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 941 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 942 |
+
|
| 943 |
+
- 1 for tokens that are **not masked**,
|
| 944 |
+
- 0 for tokens that are **masked**.
|
| 945 |
+
|
| 946 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 947 |
+
|
| 948 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 949 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 950 |
+
|
| 951 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 952 |
+
`past_key_values`).
|
| 953 |
+
|
| 954 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 955 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 956 |
+
information on the default strategy.
|
| 957 |
+
|
| 958 |
+
- 1 indicates the head is **not masked**,
|
| 959 |
+
- 0 indicates the head is **masked**.
|
| 960 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 961 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 962 |
+
config.n_positions - 1]`.
|
| 963 |
+
|
| 964 |
+
[What are position IDs?](../glossary#position-ids)
|
| 965 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 966 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 967 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| 968 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 969 |
+
|
| 970 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 971 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 972 |
+
|
| 973 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 974 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 975 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 976 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 977 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 978 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 979 |
+
model's internal embedding lookup matrix.
|
| 980 |
+
use_cache (`bool`, *optional*):
|
| 981 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 982 |
+
`past_key_values`).
|
| 983 |
+
output_attentions (`bool`, *optional*):
|
| 984 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 985 |
+
tensors for more detail.
|
| 986 |
+
output_hidden_states (`bool`, *optional*):
|
| 987 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 988 |
+
more detail.
|
| 989 |
+
output_router_logits (`bool`, *optional*):
|
| 990 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
| 991 |
+
should not be returned during inference.
|
| 992 |
+
return_dict (`bool`, *optional*):
|
| 993 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 994 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 995 |
+
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
| 996 |
+
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
| 997 |
+
the complete sequence length.
|
| 998 |
+
"""
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
@add_start_docstrings(
|
| 1002 |
+
"The bare Phimoe Model outputting raw hidden-states without any specific head on top.",
|
| 1003 |
+
PHIMOE_START_DOCSTRING,
|
| 1004 |
+
)
|
| 1005 |
+
class PhimoeModel(PhimoePreTrainedModel):
|
| 1006 |
+
"""
|
| 1007 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PhimoeDecoderLayer`]
|
| 1008 |
+
Args:
|
| 1009 |
+
config: PhimoeConfig
|
| 1010 |
+
"""
|
| 1011 |
+
|
| 1012 |
+
def __init__(self, config: PhimoeConfig):
|
| 1013 |
+
super().__init__(config)
|
| 1014 |
+
self.padding_idx = config.pad_token_id
|
| 1015 |
+
self.vocab_size = config.vocab_size
|
| 1016 |
+
|
| 1017 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1018 |
+
self.layers = nn.ModuleList(
|
| 1019 |
+
[PhimoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1020 |
+
)
|
| 1021 |
+
self._attn_implementation = config._attn_implementation
|
| 1022 |
+
self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True)
|
| 1023 |
+
self.rotary_emb = PhimoeRotaryEmbedding(config=config)
|
| 1024 |
+
|
| 1025 |
+
self.gradient_checkpointing = False
|
| 1026 |
+
# Initialize weights and apply final processing
|
| 1027 |
+
self.post_init()
|
| 1028 |
+
|
| 1029 |
+
def get_input_embeddings(self):
|
| 1030 |
+
return self.embed_tokens
|
| 1031 |
+
|
| 1032 |
+
def set_input_embeddings(self, value):
|
| 1033 |
+
self.embed_tokens = value
|
| 1034 |
+
|
| 1035 |
+
@add_start_docstrings_to_model_forward(PHIMOE_INPUTS_DOCSTRING)
|
| 1036 |
+
def forward(
|
| 1037 |
+
self,
|
| 1038 |
+
input_ids: torch.LongTensor = None,
|
| 1039 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1040 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1041 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1042 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1043 |
+
use_cache: Optional[bool] = None,
|
| 1044 |
+
output_attentions: Optional[bool] = None,
|
| 1045 |
+
output_hidden_states: Optional[bool] = None,
|
| 1046 |
+
output_router_logits: Optional[bool] = None,
|
| 1047 |
+
return_dict: Optional[bool] = None,
|
| 1048 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1049 |
+
) -> Union[Tuple, MoeModelOutputWithPast]:
|
| 1050 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1051 |
+
output_router_logits = (
|
| 1052 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 1053 |
+
)
|
| 1054 |
+
output_hidden_states = (
|
| 1055 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1056 |
+
)
|
| 1057 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1058 |
+
|
| 1059 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1060 |
+
|
| 1061 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 1062 |
+
raise ValueError(
|
| 1063 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| 1064 |
+
)
|
| 1065 |
+
|
| 1066 |
+
if self.gradient_checkpointing and self.training:
|
| 1067 |
+
if use_cache:
|
| 1068 |
+
logger.warning_once(
|
| 1069 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1070 |
+
)
|
| 1071 |
+
use_cache = False
|
| 1072 |
+
|
| 1073 |
+
# kept for BC (non `Cache` `past_key_values` inputs)
|
| 1074 |
+
return_legacy_cache = False
|
| 1075 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 1076 |
+
return_legacy_cache = True
|
| 1077 |
+
if past_key_values is None:
|
| 1078 |
+
past_key_values = DynamicCache()
|
| 1079 |
+
else:
|
| 1080 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1081 |
+
logger.warning_once(
|
| 1082 |
+
"We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
|
| 1083 |
+
"will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
|
| 1084 |
+
"(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
|
| 1085 |
+
)
|
| 1086 |
+
|
| 1087 |
+
if inputs_embeds is None:
|
| 1088 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1089 |
+
|
| 1090 |
+
if cache_position is None:
|
| 1091 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 1092 |
+
cache_position = torch.arange(
|
| 1093 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 1094 |
+
)
|
| 1095 |
+
if position_ids is None:
|
| 1096 |
+
position_ids = cache_position.unsqueeze(0)
|
| 1097 |
+
|
| 1098 |
+
causal_mask = self._update_causal_mask(
|
| 1099 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
| 1100 |
+
)
|
| 1101 |
+
|
| 1102 |
+
hidden_states = inputs_embeds
|
| 1103 |
+
|
| 1104 |
+
position_embeddings = self.rotary_emb(hidden_states, seq_len=cache_position[-1] + 1)
|
| 1105 |
+
|
| 1106 |
+
# decoder layers
|
| 1107 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1108 |
+
all_self_attns = () if output_attentions else None
|
| 1109 |
+
all_router_logits = () if output_router_logits else None
|
| 1110 |
+
next_decoder_cache = None
|
| 1111 |
+
|
| 1112 |
+
for decoder_layer in self.layers:
|
| 1113 |
+
if output_hidden_states:
|
| 1114 |
+
all_hidden_states += (hidden_states,)
|
| 1115 |
+
|
| 1116 |
+
if self.gradient_checkpointing and self.training:
|
| 1117 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1118 |
+
decoder_layer.__call__,
|
| 1119 |
+
hidden_states,
|
| 1120 |
+
causal_mask,
|
| 1121 |
+
position_ids,
|
| 1122 |
+
past_key_values,
|
| 1123 |
+
output_attentions,
|
| 1124 |
+
output_router_logits,
|
| 1125 |
+
use_cache,
|
| 1126 |
+
cache_position,
|
| 1127 |
+
position_embeddings,
|
| 1128 |
+
)
|
| 1129 |
+
else:
|
| 1130 |
+
layer_outputs = decoder_layer(
|
| 1131 |
+
hidden_states,
|
| 1132 |
+
attention_mask=causal_mask,
|
| 1133 |
+
position_ids=position_ids,
|
| 1134 |
+
past_key_value=past_key_values,
|
| 1135 |
+
output_attentions=output_attentions,
|
| 1136 |
+
output_router_logits=output_router_logits,
|
| 1137 |
+
use_cache=use_cache,
|
| 1138 |
+
cache_position=cache_position,
|
| 1139 |
+
position_embeddings=position_embeddings,
|
| 1140 |
+
)
|
| 1141 |
+
|
| 1142 |
+
hidden_states = layer_outputs[0]
|
| 1143 |
+
|
| 1144 |
+
if use_cache:
|
| 1145 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1146 |
+
|
| 1147 |
+
if output_attentions:
|
| 1148 |
+
all_self_attns += (layer_outputs[1],)
|
| 1149 |
+
|
| 1150 |
+
if output_router_logits:
|
| 1151 |
+
all_router_logits += (layer_outputs[-1],)
|
| 1152 |
+
|
| 1153 |
+
hidden_states = self.norm(hidden_states)
|
| 1154 |
+
|
| 1155 |
+
# add hidden states from the last decoder layer
|
| 1156 |
+
if output_hidden_states:
|
| 1157 |
+
all_hidden_states += (hidden_states,)
|
| 1158 |
+
|
| 1159 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 1160 |
+
if return_legacy_cache:
|
| 1161 |
+
next_cache = next_cache.to_legacy_cache()
|
| 1162 |
+
|
| 1163 |
+
if not return_dict:
|
| 1164 |
+
return tuple(
|
| 1165 |
+
v
|
| 1166 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
|
| 1167 |
+
if v is not None
|
| 1168 |
+
)
|
| 1169 |
+
return MoeModelOutputWithPast(
|
| 1170 |
+
last_hidden_state=hidden_states,
|
| 1171 |
+
past_key_values=next_cache,
|
| 1172 |
+
hidden_states=all_hidden_states,
|
| 1173 |
+
attentions=all_self_attns,
|
| 1174 |
+
router_logits=all_router_logits,
|
| 1175 |
+
)
|
| 1176 |
+
|
| 1177 |
+
# Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask with Phi3->Phimoe
|
| 1178 |
+
def _update_causal_mask(
|
| 1179 |
+
self,
|
| 1180 |
+
attention_mask: torch.Tensor,
|
| 1181 |
+
input_tensor: torch.Tensor,
|
| 1182 |
+
cache_position: torch.Tensor,
|
| 1183 |
+
past_key_values: Cache,
|
| 1184 |
+
output_attentions: bool,
|
| 1185 |
+
):
|
| 1186 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 1187 |
+
if attention_mask is not None and past_key_values is not None:
|
| 1188 |
+
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
|
| 1189 |
+
if is_padding_right:
|
| 1190 |
+
raise ValueError(
|
| 1191 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1192 |
+
" this may lead to unexpected behaviour for Flash Attention version of Phimoe. Make sure to "
|
| 1193 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1194 |
+
)
|
| 1195 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
| 1196 |
+
return attention_mask
|
| 1197 |
+
return None
|
| 1198 |
+
|
| 1199 |
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
| 1200 |
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
| 1201 |
+
# to infer the attention mask.
|
| 1202 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 1203 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
| 1204 |
+
using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
|
| 1205 |
+
|
| 1206 |
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
| 1207 |
+
if (
|
| 1208 |
+
self.config._attn_implementation == "sdpa"
|
| 1209 |
+
and not (using_static_cache or using_sliding_window_cache)
|
| 1210 |
+
and not output_attentions
|
| 1211 |
+
):
|
| 1212 |
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
| 1213 |
+
attention_mask,
|
| 1214 |
+
inputs_embeds=input_tensor,
|
| 1215 |
+
past_key_values_length=past_seen_tokens,
|
| 1216 |
+
sliding_window=self.config.sliding_window,
|
| 1217 |
+
is_training=self.training,
|
| 1218 |
+
):
|
| 1219 |
+
return None
|
| 1220 |
+
|
| 1221 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
| 1222 |
+
min_dtype = torch.finfo(dtype).min
|
| 1223 |
+
sequence_length = input_tensor.shape[1]
|
| 1224 |
+
# SlidingWindowCache or StaticCache
|
| 1225 |
+
if using_sliding_window_cache or using_static_cache:
|
| 1226 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 1227 |
+
# DynamicCache or no cache
|
| 1228 |
+
else:
|
| 1229 |
+
target_length = (
|
| 1230 |
+
attention_mask.shape[-1]
|
| 1231 |
+
if isinstance(attention_mask, torch.Tensor)
|
| 1232 |
+
else past_seen_tokens + sequence_length + 1
|
| 1233 |
+
)
|
| 1234 |
+
|
| 1235 |
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
| 1236 |
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
| 1237 |
+
attention_mask,
|
| 1238 |
+
sequence_length=sequence_length,
|
| 1239 |
+
target_length=target_length,
|
| 1240 |
+
dtype=dtype,
|
| 1241 |
+
device=device,
|
| 1242 |
+
cache_position=cache_position,
|
| 1243 |
+
batch_size=input_tensor.shape[0],
|
| 1244 |
+
config=self.config,
|
| 1245 |
+
past_key_values=past_key_values,
|
| 1246 |
+
)
|
| 1247 |
+
|
| 1248 |
+
if (
|
| 1249 |
+
self.config._attn_implementation == "sdpa"
|
| 1250 |
+
and attention_mask is not None
|
| 1251 |
+
and attention_mask.device.type == "cuda"
|
| 1252 |
+
and not output_attentions
|
| 1253 |
+
):
|
| 1254 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
| 1255 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
| 1256 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
| 1257 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
| 1258 |
+
|
| 1259 |
+
return causal_mask
|
| 1260 |
+
|
| 1261 |
+
@staticmethod
|
| 1262 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Phimoe
|
| 1263 |
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
| 1264 |
+
attention_mask: torch.Tensor,
|
| 1265 |
+
sequence_length: int,
|
| 1266 |
+
target_length: int,
|
| 1267 |
+
dtype: torch.dtype,
|
| 1268 |
+
device: torch.device,
|
| 1269 |
+
cache_position: torch.Tensor,
|
| 1270 |
+
batch_size: int,
|
| 1271 |
+
config: PhimoeConfig,
|
| 1272 |
+
past_key_values: Cache,
|
| 1273 |
+
):
|
| 1274 |
+
"""
|
| 1275 |
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
| 1276 |
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
| 1277 |
+
|
| 1278 |
+
Args:
|
| 1279 |
+
attention_mask (`torch.Tensor`):
|
| 1280 |
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
|
| 1281 |
+
sequence_length (`int`):
|
| 1282 |
+
The sequence length being processed.
|
| 1283 |
+
target_length (`int`):
|
| 1284 |
+
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
|
| 1285 |
+
dtype (`torch.dtype`):
|
| 1286 |
+
The dtype to use for the 4D attention mask.
|
| 1287 |
+
device (`torch.device`):
|
| 1288 |
+
The device to plcae the 4D attention mask on.
|
| 1289 |
+
cache_position (`torch.Tensor`):
|
| 1290 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 1291 |
+
batch_size (`torch.Tensor`):
|
| 1292 |
+
Batch size.
|
| 1293 |
+
config (`PhimoeConfig`):
|
| 1294 |
+
The model's configuration class
|
| 1295 |
+
past_key_values (`Cache`):
|
| 1296 |
+
The cache class that is being used currently to generate
|
| 1297 |
+
"""
|
| 1298 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
| 1299 |
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
| 1300 |
+
causal_mask = attention_mask
|
| 1301 |
+
else:
|
| 1302 |
+
min_dtype = torch.finfo(dtype).min
|
| 1303 |
+
causal_mask = torch.full(
|
| 1304 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
| 1305 |
+
)
|
| 1306 |
+
diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
| 1307 |
+
if config.sliding_window is not None:
|
| 1308 |
+
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
|
| 1309 |
+
# the check is needed to verify is current checkpoint was trained with sliding window or not
|
| 1310 |
+
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
|
| 1311 |
+
sliding_attend_mask = torch.arange(target_length, device=device) <= (
|
| 1312 |
+
cache_position.reshape(-1, 1) - config.sliding_window
|
| 1313 |
+
)
|
| 1314 |
+
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
|
| 1315 |
+
causal_mask *= diagonal_attend_mask
|
| 1316 |
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
| 1317 |
+
if attention_mask is not None:
|
| 1318 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 1319 |
+
if attention_mask.shape[-1] > target_length:
|
| 1320 |
+
attention_mask = attention_mask[:, :target_length]
|
| 1321 |
+
mask_length = attention_mask.shape[-1]
|
| 1322 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
| 1323 |
+
padding_mask = padding_mask == 0
|
| 1324 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
| 1325 |
+
padding_mask, min_dtype
|
| 1326 |
+
)
|
| 1327 |
+
return causal_mask
|
| 1328 |
+
|
| 1329 |
+
|
| 1330 |
+
class PhimoeForCausalLM(PhimoePreTrainedModel, GenerationMixin):
|
| 1331 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1332 |
+
|
| 1333 |
+
def __init__(self, config):
|
| 1334 |
+
super().__init__(config)
|
| 1335 |
+
self.model = PhimoeModel(config)
|
| 1336 |
+
self.vocab_size = config.vocab_size
|
| 1337 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=self.config.lm_head_bias)
|
| 1338 |
+
self.router_aux_loss_coef = config.router_aux_loss_coef
|
| 1339 |
+
self.num_experts = config.num_local_experts
|
| 1340 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 1341 |
+
# Initialize weights and apply final processing
|
| 1342 |
+
self.post_init()
|
| 1343 |
+
|
| 1344 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
|
| 1345 |
+
def get_input_embeddings(self):
|
| 1346 |
+
return self.model.embed_tokens
|
| 1347 |
+
|
| 1348 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
|
| 1349 |
+
def set_input_embeddings(self, value):
|
| 1350 |
+
self.model.embed_tokens = value
|
| 1351 |
+
|
| 1352 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
|
| 1353 |
+
def get_output_embeddings(self):
|
| 1354 |
+
return self.lm_head
|
| 1355 |
+
|
| 1356 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
|
| 1357 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1358 |
+
self.lm_head = new_embeddings
|
| 1359 |
+
|
| 1360 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
|
| 1361 |
+
def set_decoder(self, decoder):
|
| 1362 |
+
self.model = decoder
|
| 1363 |
+
|
| 1364 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
|
| 1365 |
+
def get_decoder(self):
|
| 1366 |
+
return self.model
|
| 1367 |
+
|
| 1368 |
+
@add_start_docstrings_to_model_forward(PHIMOE_INPUTS_DOCSTRING)
|
| 1369 |
+
@replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1370 |
+
# Ignore copy
|
| 1371 |
+
def forward(
|
| 1372 |
+
self,
|
| 1373 |
+
input_ids: torch.LongTensor = None,
|
| 1374 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1375 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1376 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1377 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1378 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1379 |
+
use_cache: Optional[bool] = None,
|
| 1380 |
+
output_attentions: Optional[bool] = None,
|
| 1381 |
+
output_hidden_states: Optional[bool] = None,
|
| 1382 |
+
output_router_logits: Optional[bool] = None,
|
| 1383 |
+
return_dict: Optional[bool] = None,
|
| 1384 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1385 |
+
num_logits_to_keep: int = 0,
|
| 1386 |
+
**loss_kwargs,
|
| 1387 |
+
) -> Union[Tuple, MoeCausalLMOutputWithPast]:
|
| 1388 |
+
r"""
|
| 1389 |
+
Args:
|
| 1390 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1391 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1392 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1393 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1394 |
+
|
| 1395 |
+
num_logits_to_keep (`int`, *optional*):
|
| 1396 |
+
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
|
| 1397 |
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
| 1398 |
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
| 1399 |
+
Returns:
|
| 1400 |
+
Example:
|
| 1401 |
+
```python
|
| 1402 |
+
>>> from transformers import AutoTokenizer, PhimoeForCausalLM
|
| 1403 |
+
>>> model = PhimoeForCausalLM.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
|
| 1404 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
|
| 1405 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1406 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1407 |
+
>>> # Generate
|
| 1408 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1409 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1410 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1411 |
+
```"""
|
| 1412 |
+
if (
|
| 1413 |
+
use_cache
|
| 1414 |
+
and self.config.rope_scaling
|
| 1415 |
+
and cache_position is not None
|
| 1416 |
+
and cache_position[0] == self.config.original_max_position_embeddings
|
| 1417 |
+
):
|
| 1418 |
+
logger.warning(
|
| 1419 |
+
f"If you are not using the generate method, you may encounter nonsensical outputs after the {self.config.original_max_position_embeddings}th token, as the KV cache needs to be recomputed."
|
| 1420 |
+
)
|
| 1421 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1422 |
+
output_router_logits = (
|
| 1423 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 1424 |
+
)
|
| 1425 |
+
|
| 1426 |
+
output_hidden_states = (
|
| 1427 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1428 |
+
)
|
| 1429 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1430 |
+
|
| 1431 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1432 |
+
outputs = self.model(
|
| 1433 |
+
input_ids=input_ids,
|
| 1434 |
+
attention_mask=attention_mask,
|
| 1435 |
+
position_ids=position_ids,
|
| 1436 |
+
past_key_values=past_key_values,
|
| 1437 |
+
inputs_embeds=inputs_embeds,
|
| 1438 |
+
use_cache=use_cache,
|
| 1439 |
+
output_attentions=output_attentions,
|
| 1440 |
+
output_hidden_states=output_hidden_states,
|
| 1441 |
+
output_router_logits=output_router_logits,
|
| 1442 |
+
return_dict=return_dict,
|
| 1443 |
+
cache_position=cache_position,
|
| 1444 |
+
)
|
| 1445 |
+
|
| 1446 |
+
hidden_states = outputs[0]
|
| 1447 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 1448 |
+
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
| 1449 |
+
|
| 1450 |
+
loss = None
|
| 1451 |
+
if labels is not None:
|
| 1452 |
+
loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
|
| 1453 |
+
|
| 1454 |
+
aux_loss = None
|
| 1455 |
+
if output_router_logits:
|
| 1456 |
+
aux_loss = load_balancing_loss_func(
|
| 1457 |
+
outputs.router_logits if return_dict else outputs[-1],
|
| 1458 |
+
self.num_experts,
|
| 1459 |
+
self.num_experts_per_tok,
|
| 1460 |
+
attention_mask,
|
| 1461 |
+
)
|
| 1462 |
+
if labels is not None:
|
| 1463 |
+
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
|
| 1464 |
+
|
| 1465 |
+
if not return_dict:
|
| 1466 |
+
output = (logits,) + outputs[1:]
|
| 1467 |
+
if output_router_logits:
|
| 1468 |
+
output = (aux_loss,) + output
|
| 1469 |
+
return (loss,) + output if loss is not None else output
|
| 1470 |
+
|
| 1471 |
+
return MoeCausalLMOutputWithPast(
|
| 1472 |
+
loss=loss,
|
| 1473 |
+
aux_loss=aux_loss,
|
| 1474 |
+
logits=logits,
|
| 1475 |
+
past_key_values=outputs.past_key_values,
|
| 1476 |
+
hidden_states=outputs.hidden_states,
|
| 1477 |
+
attentions=outputs.attentions,
|
| 1478 |
+
router_logits=outputs.router_logits,
|
| 1479 |
+
)
|
| 1480 |
+
|
| 1481 |
+
# Copied from transformers.models.phi3.modeling_phi3.Phi3ForCausalLM.prepare_inputs_for_generation
|
| 1482 |
+
def prepare_inputs_for_generation(
|
| 1483 |
+
self,
|
| 1484 |
+
input_ids,
|
| 1485 |
+
past_key_values=None,
|
| 1486 |
+
attention_mask=None,
|
| 1487 |
+
inputs_embeds=None,
|
| 1488 |
+
cache_position=None,
|
| 1489 |
+
position_ids=None,
|
| 1490 |
+
use_cache=True,
|
| 1491 |
+
num_logits_to_keep=None,
|
| 1492 |
+
**kwargs,
|
| 1493 |
+
):
|
| 1494 |
+
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
|
| 1495 |
+
# process
|
| 1496 |
+
|
| 1497 |
+
# When the first time input length reached long and short factor switching point, enforce re-compute cache
|
| 1498 |
+
# It will cause downside of slower at this single token position, however, better than current failure.
|
| 1499 |
+
if (
|
| 1500 |
+
past_key_values
|
| 1501 |
+
and self.config.rope_scaling
|
| 1502 |
+
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
|
| 1503 |
+
):
|
| 1504 |
+
past_length = cache_position[0]
|
| 1505 |
+
if past_length <= self.config.original_max_position_embeddings:
|
| 1506 |
+
past_key_values = None
|
| 1507 |
+
|
| 1508 |
+
model_inputs = super().prepare_inputs_for_generation(
|
| 1509 |
+
input_ids=input_ids,
|
| 1510 |
+
past_key_values=past_key_values,
|
| 1511 |
+
attention_mask=attention_mask,
|
| 1512 |
+
inputs_embeds=inputs_embeds,
|
| 1513 |
+
cache_position=cache_position,
|
| 1514 |
+
position_ids=position_ids,
|
| 1515 |
+
use_cache=use_cache,
|
| 1516 |
+
num_logits_to_keep=num_logits_to_keep,
|
| 1517 |
+
**kwargs,
|
| 1518 |
+
)
|
| 1519 |
+
return model_inputs
|
| 1520 |
+
|
| 1521 |
+
|
| 1522 |
+
@add_start_docstrings(
|
| 1523 |
+
"""
|
| 1524 |
+
The Phimoe Model transformer with a sequence classification head on top (linear layer).
|
| 1525 |
+
[`PhimoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1526 |
+
(e.g. GPT-2) do.
|
| 1527 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1528 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1529 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1530 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1531 |
+
each row of the batch).
|
| 1532 |
+
""",
|
| 1533 |
+
PHIMOE_START_DOCSTRING,
|
| 1534 |
+
)
|
| 1535 |
+
|
| 1536 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phimoe, LLAMA->PHIMOE
|
| 1537 |
+
class PhimoeForSequenceClassification(PhimoePreTrainedModel):
|
| 1538 |
+
def __init__(self, config):
|
| 1539 |
+
super().__init__(config)
|
| 1540 |
+
self.num_labels = config.num_labels
|
| 1541 |
+
self.model = PhimoeModel(config)
|
| 1542 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1543 |
+
|
| 1544 |
+
# Initialize weights and apply final processing
|
| 1545 |
+
self.post_init()
|
| 1546 |
+
|
| 1547 |
+
def get_input_embeddings(self):
|
| 1548 |
+
return self.model.embed_tokens
|
| 1549 |
+
|
| 1550 |
+
def set_input_embeddings(self, value):
|
| 1551 |
+
self.model.embed_tokens = value
|
| 1552 |
+
|
| 1553 |
+
@add_start_docstrings_to_model_forward(PHIMOE_INPUTS_DOCSTRING)
|
| 1554 |
+
def forward(
|
| 1555 |
+
self,
|
| 1556 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1557 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1558 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1559 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 1560 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1561 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1562 |
+
use_cache: Optional[bool] = None,
|
| 1563 |
+
output_attentions: Optional[bool] = None,
|
| 1564 |
+
output_hidden_states: Optional[bool] = None,
|
| 1565 |
+
return_dict: Optional[bool] = None,
|
| 1566 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1567 |
+
r"""
|
| 1568 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1569 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1570 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1571 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1572 |
+
"""
|
| 1573 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1574 |
+
|
| 1575 |
+
transformer_outputs = self.model(
|
| 1576 |
+
input_ids,
|
| 1577 |
+
attention_mask=attention_mask,
|
| 1578 |
+
position_ids=position_ids,
|
| 1579 |
+
past_key_values=past_key_values,
|
| 1580 |
+
inputs_embeds=inputs_embeds,
|
| 1581 |
+
use_cache=use_cache,
|
| 1582 |
+
output_attentions=output_attentions,
|
| 1583 |
+
output_hidden_states=output_hidden_states,
|
| 1584 |
+
return_dict=return_dict,
|
| 1585 |
+
)
|
| 1586 |
+
hidden_states = transformer_outputs[0]
|
| 1587 |
+
logits = self.score(hidden_states)
|
| 1588 |
+
|
| 1589 |
+
if input_ids is not None:
|
| 1590 |
+
batch_size = input_ids.shape[0]
|
| 1591 |
+
else:
|
| 1592 |
+
batch_size = inputs_embeds.shape[0]
|
| 1593 |
+
|
| 1594 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1595 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1596 |
+
if self.config.pad_token_id is None:
|
| 1597 |
+
sequence_lengths = -1
|
| 1598 |
+
else:
|
| 1599 |
+
if input_ids is not None:
|
| 1600 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1601 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1602 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1603 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1604 |
+
else:
|
| 1605 |
+
sequence_lengths = -1
|
| 1606 |
+
|
| 1607 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1608 |
+
|
| 1609 |
+
loss = None
|
| 1610 |
+
if labels is not None:
|
| 1611 |
+
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
|
| 1612 |
+
|
| 1613 |
+
if not return_dict:
|
| 1614 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1615 |
+
return ((loss,) + output) if loss is not None else output
|
| 1616 |
+
|
| 1617 |
+
return SequenceClassifierOutputWithPast(
|
| 1618 |
+
loss=loss,
|
| 1619 |
+
logits=pooled_logits,
|
| 1620 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1621 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1622 |
+
attentions=transformer_outputs.attentions,
|
| 1623 |
+
)
|
| 1624 |
+
|
| 1625 |
+
|
| 1626 |
+
__all__ = [
|
| 1627 |
+
"PhimoePreTrainedModel",
|
| 1628 |
+
"PhimoeModel",
|
| 1629 |
+
"PhimoeForCausalLM",
|
| 1630 |
+
"PhimoeForSequenceClassification",
|
| 1631 |
+
]
|
phi4/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (719 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/transformers/utils/__init__.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding=utf-8
|
| 3 |
+
|
| 4 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
|
| 18 |
+
from functools import lru_cache
|
| 19 |
+
from typing import FrozenSet
|
| 20 |
+
|
| 21 |
+
from huggingface_hub import get_full_repo_name # for backward compatibility
|
| 22 |
+
from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
|
| 23 |
+
from packaging import version
|
| 24 |
+
|
| 25 |
+
from .. import __version__
|
| 26 |
+
from .backbone_utils import BackboneConfigMixin, BackboneMixin
|
| 27 |
+
from .chat_template_utils import DocstringParsingException, TypeHintParsingException, get_json_schema
|
| 28 |
+
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
|
| 29 |
+
from .doc import (
|
| 30 |
+
add_code_sample_docstrings,
|
| 31 |
+
add_end_docstrings,
|
| 32 |
+
add_start_docstrings,
|
| 33 |
+
add_start_docstrings_to_model_forward,
|
| 34 |
+
copy_func,
|
| 35 |
+
replace_return_docstrings,
|
| 36 |
+
)
|
| 37 |
+
from .generic import (
|
| 38 |
+
ContextManagers,
|
| 39 |
+
ExplicitEnum,
|
| 40 |
+
LossKwargs,
|
| 41 |
+
ModelOutput,
|
| 42 |
+
PaddingStrategy,
|
| 43 |
+
TensorType,
|
| 44 |
+
add_model_info_to_auto_map,
|
| 45 |
+
add_model_info_to_custom_pipelines,
|
| 46 |
+
cached_property,
|
| 47 |
+
can_return_loss,
|
| 48 |
+
expand_dims,
|
| 49 |
+
filter_out_non_signature_kwargs,
|
| 50 |
+
find_labels,
|
| 51 |
+
flatten_dict,
|
| 52 |
+
infer_framework,
|
| 53 |
+
is_jax_tensor,
|
| 54 |
+
is_numpy_array,
|
| 55 |
+
is_tensor,
|
| 56 |
+
is_tf_symbolic_tensor,
|
| 57 |
+
is_tf_tensor,
|
| 58 |
+
is_timm_config_dict,
|
| 59 |
+
is_timm_local_checkpoint,
|
| 60 |
+
is_torch_device,
|
| 61 |
+
is_torch_dtype,
|
| 62 |
+
is_torch_tensor,
|
| 63 |
+
reshape,
|
| 64 |
+
squeeze,
|
| 65 |
+
strtobool,
|
| 66 |
+
tensor_size,
|
| 67 |
+
to_numpy,
|
| 68 |
+
to_py_obj,
|
| 69 |
+
torch_float,
|
| 70 |
+
torch_int,
|
| 71 |
+
transpose,
|
| 72 |
+
working_or_temp_dir,
|
| 73 |
+
)
|
| 74 |
+
from .hub import (
|
| 75 |
+
CLOUDFRONT_DISTRIB_PREFIX,
|
| 76 |
+
HF_MODULES_CACHE,
|
| 77 |
+
HUGGINGFACE_CO_PREFIX,
|
| 78 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
| 79 |
+
PYTORCH_PRETRAINED_BERT_CACHE,
|
| 80 |
+
PYTORCH_TRANSFORMERS_CACHE,
|
| 81 |
+
S3_BUCKET_PREFIX,
|
| 82 |
+
TRANSFORMERS_CACHE,
|
| 83 |
+
TRANSFORMERS_DYNAMIC_MODULE_NAME,
|
| 84 |
+
EntryNotFoundError,
|
| 85 |
+
PushInProgress,
|
| 86 |
+
PushToHubMixin,
|
| 87 |
+
RepositoryNotFoundError,
|
| 88 |
+
RevisionNotFoundError,
|
| 89 |
+
cached_file,
|
| 90 |
+
default_cache_path,
|
| 91 |
+
define_sagemaker_information,
|
| 92 |
+
download_url,
|
| 93 |
+
extract_commit_hash,
|
| 94 |
+
get_cached_models,
|
| 95 |
+
get_file_from_repo,
|
| 96 |
+
has_file,
|
| 97 |
+
http_user_agent,
|
| 98 |
+
is_offline_mode,
|
| 99 |
+
is_remote_url,
|
| 100 |
+
move_cache,
|
| 101 |
+
send_example_telemetry,
|
| 102 |
+
try_to_load_from_cache,
|
| 103 |
+
)
|
| 104 |
+
from .import_utils import (
|
| 105 |
+
ACCELERATE_MIN_VERSION,
|
| 106 |
+
ENV_VARS_TRUE_AND_AUTO_VALUES,
|
| 107 |
+
ENV_VARS_TRUE_VALUES,
|
| 108 |
+
GGUF_MIN_VERSION,
|
| 109 |
+
TORCH_FX_REQUIRED_VERSION,
|
| 110 |
+
USE_JAX,
|
| 111 |
+
USE_TF,
|
| 112 |
+
USE_TORCH,
|
| 113 |
+
XLA_FSDPV2_MIN_VERSION,
|
| 114 |
+
DummyObject,
|
| 115 |
+
OptionalDependencyNotAvailable,
|
| 116 |
+
_LazyModule,
|
| 117 |
+
ccl_version,
|
| 118 |
+
direct_transformers_import,
|
| 119 |
+
get_torch_version,
|
| 120 |
+
is_accelerate_available,
|
| 121 |
+
is_apex_available,
|
| 122 |
+
is_aqlm_available,
|
| 123 |
+
is_auto_awq_available,
|
| 124 |
+
is_auto_gptq_available,
|
| 125 |
+
is_av_available,
|
| 126 |
+
is_bitsandbytes_available,
|
| 127 |
+
is_bitsandbytes_multi_backend_available,
|
| 128 |
+
is_bs4_available,
|
| 129 |
+
is_coloredlogs_available,
|
| 130 |
+
is_compressed_tensors_available,
|
| 131 |
+
is_cv2_available,
|
| 132 |
+
is_cython_available,
|
| 133 |
+
is_datasets_available,
|
| 134 |
+
is_detectron2_available,
|
| 135 |
+
is_eetq_available,
|
| 136 |
+
is_essentia_available,
|
| 137 |
+
is_faiss_available,
|
| 138 |
+
is_fbgemm_gpu_available,
|
| 139 |
+
is_flash_attn_2_available,
|
| 140 |
+
is_flash_attn_greater_or_equal,
|
| 141 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 142 |
+
is_flax_available,
|
| 143 |
+
is_flute_available,
|
| 144 |
+
is_fsdp_available,
|
| 145 |
+
is_ftfy_available,
|
| 146 |
+
is_g2p_en_available,
|
| 147 |
+
is_galore_torch_available,
|
| 148 |
+
is_gguf_available,
|
| 149 |
+
is_grokadamw_available,
|
| 150 |
+
is_hadamard_available,
|
| 151 |
+
is_hqq_available,
|
| 152 |
+
is_in_notebook,
|
| 153 |
+
is_ipex_available,
|
| 154 |
+
is_jieba_available,
|
| 155 |
+
is_jinja_available,
|
| 156 |
+
is_jumanpp_available,
|
| 157 |
+
is_kenlm_available,
|
| 158 |
+
is_keras_nlp_available,
|
| 159 |
+
is_levenshtein_available,
|
| 160 |
+
is_librosa_available,
|
| 161 |
+
is_liger_kernel_available,
|
| 162 |
+
is_lomo_available,
|
| 163 |
+
is_mlx_available,
|
| 164 |
+
is_natten_available,
|
| 165 |
+
is_ninja_available,
|
| 166 |
+
is_nltk_available,
|
| 167 |
+
is_onnx_available,
|
| 168 |
+
is_openai_available,
|
| 169 |
+
is_optimum_available,
|
| 170 |
+
is_optimum_quanto_available,
|
| 171 |
+
is_pandas_available,
|
| 172 |
+
is_peft_available,
|
| 173 |
+
is_phonemizer_available,
|
| 174 |
+
is_pretty_midi_available,
|
| 175 |
+
is_protobuf_available,
|
| 176 |
+
is_psutil_available,
|
| 177 |
+
is_py3nvml_available,
|
| 178 |
+
is_pyctcdecode_available,
|
| 179 |
+
is_pytesseract_available,
|
| 180 |
+
is_pytest_available,
|
| 181 |
+
is_pytorch_quantization_available,
|
| 182 |
+
is_rjieba_available,
|
| 183 |
+
is_sacremoses_available,
|
| 184 |
+
is_safetensors_available,
|
| 185 |
+
is_sagemaker_dp_enabled,
|
| 186 |
+
is_sagemaker_mp_enabled,
|
| 187 |
+
is_schedulefree_available,
|
| 188 |
+
is_scipy_available,
|
| 189 |
+
is_sentencepiece_available,
|
| 190 |
+
is_seqio_available,
|
| 191 |
+
is_sklearn_available,
|
| 192 |
+
is_soundfile_available,
|
| 193 |
+
is_spacy_available,
|
| 194 |
+
is_speech_available,
|
| 195 |
+
is_sudachi_available,
|
| 196 |
+
is_sudachi_projection_available,
|
| 197 |
+
is_tensorflow_probability_available,
|
| 198 |
+
is_tensorflow_text_available,
|
| 199 |
+
is_tf2onnx_available,
|
| 200 |
+
is_tf_available,
|
| 201 |
+
is_tiktoken_available,
|
| 202 |
+
is_timm_available,
|
| 203 |
+
is_tokenizers_available,
|
| 204 |
+
is_torch_available,
|
| 205 |
+
is_torch_bf16_available,
|
| 206 |
+
is_torch_bf16_available_on_device,
|
| 207 |
+
is_torch_bf16_cpu_available,
|
| 208 |
+
is_torch_bf16_gpu_available,
|
| 209 |
+
is_torch_compile_available,
|
| 210 |
+
is_torch_cuda_available,
|
| 211 |
+
is_torch_deterministic,
|
| 212 |
+
is_torch_flex_attn_available,
|
| 213 |
+
is_torch_fp16_available_on_device,
|
| 214 |
+
is_torch_fx_available,
|
| 215 |
+
is_torch_fx_proxy,
|
| 216 |
+
is_torch_greater_or_equal,
|
| 217 |
+
is_torch_mlu_available,
|
| 218 |
+
is_torch_mps_available,
|
| 219 |
+
is_torch_musa_available,
|
| 220 |
+
is_torch_neuroncore_available,
|
| 221 |
+
is_torch_npu_available,
|
| 222 |
+
is_torch_sdpa_available,
|
| 223 |
+
is_torch_tensorrt_fx_available,
|
| 224 |
+
is_torch_tf32_available,
|
| 225 |
+
is_torch_tpu_available,
|
| 226 |
+
is_torch_xla_available,
|
| 227 |
+
is_torch_xpu_available,
|
| 228 |
+
is_torchao_available,
|
| 229 |
+
is_torchaudio_available,
|
| 230 |
+
is_torchdistx_available,
|
| 231 |
+
is_torchdynamo_available,
|
| 232 |
+
is_torchdynamo_compiling,
|
| 233 |
+
is_torchvision_available,
|
| 234 |
+
is_torchvision_v2_available,
|
| 235 |
+
is_training_run_on_sagemaker,
|
| 236 |
+
is_uroman_available,
|
| 237 |
+
is_vision_available,
|
| 238 |
+
is_vptq_available,
|
| 239 |
+
requires_backends,
|
| 240 |
+
torch_only_method,
|
| 241 |
+
)
|
| 242 |
+
from .peft_utils import (
|
| 243 |
+
ADAPTER_CONFIG_NAME,
|
| 244 |
+
ADAPTER_SAFE_WEIGHTS_NAME,
|
| 245 |
+
ADAPTER_WEIGHTS_NAME,
|
| 246 |
+
check_peft_version,
|
| 247 |
+
find_adapter_config_file,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
WEIGHTS_NAME = "pytorch_model.bin"
|
| 252 |
+
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
|
| 253 |
+
TF2_WEIGHTS_NAME = "tf_model.h5"
|
| 254 |
+
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
|
| 255 |
+
TF_WEIGHTS_NAME = "model.ckpt"
|
| 256 |
+
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
|
| 257 |
+
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
|
| 258 |
+
SAFE_WEIGHTS_NAME = "model.safetensors"
|
| 259 |
+
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
|
| 260 |
+
CONFIG_NAME = "config.json"
|
| 261 |
+
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
|
| 262 |
+
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
|
| 263 |
+
PROCESSOR_NAME = "processor_config.json"
|
| 264 |
+
CHAT_TEMPLATE_NAME = "chat_template.json"
|
| 265 |
+
GENERATION_CONFIG_NAME = "generation_config.json"
|
| 266 |
+
MODEL_CARD_NAME = "modelcard.json"
|
| 267 |
+
|
| 268 |
+
SENTENCEPIECE_UNDERLINE = "▁"
|
| 269 |
+
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
|
| 270 |
+
|
| 271 |
+
MULTIPLE_CHOICE_DUMMY_INPUTS = [
|
| 272 |
+
[[0, 1, 0, 1], [1, 0, 0, 1]]
|
| 273 |
+
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
|
| 274 |
+
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
|
| 275 |
+
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def check_min_version(min_version):
|
| 279 |
+
if version.parse(__version__) < version.parse(min_version):
|
| 280 |
+
if "dev" in min_version:
|
| 281 |
+
error_message = (
|
| 282 |
+
"This example requires a source install from HuggingFace Transformers (see "
|
| 283 |
+
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
|
| 284 |
+
)
|
| 285 |
+
else:
|
| 286 |
+
error_message = f"This example requires a minimum version of {min_version},"
|
| 287 |
+
error_message += f" but the version found is {__version__}.\n"
|
| 288 |
+
raise ImportError(
|
| 289 |
+
error_message
|
| 290 |
+
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
|
| 291 |
+
"versions of HuggingFace Transformers."
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
@lru_cache()
|
| 296 |
+
def get_available_devices() -> FrozenSet[str]:
|
| 297 |
+
"""
|
| 298 |
+
Returns a frozenset of devices available for the current PyTorch installation.
|
| 299 |
+
"""
|
| 300 |
+
devices = {"cpu"} # `cpu` is always supported as a device in PyTorch
|
| 301 |
+
|
| 302 |
+
if is_torch_cuda_available():
|
| 303 |
+
devices.add("cuda")
|
| 304 |
+
|
| 305 |
+
if is_torch_mps_available():
|
| 306 |
+
devices.add("mps")
|
| 307 |
+
|
| 308 |
+
if is_torch_xpu_available():
|
| 309 |
+
devices.add("xpu")
|
| 310 |
+
|
| 311 |
+
if is_torch_npu_available():
|
| 312 |
+
devices.add("npu")
|
| 313 |
+
|
| 314 |
+
if is_torch_mlu_available():
|
| 315 |
+
devices.add("mlu")
|
| 316 |
+
|
| 317 |
+
if is_torch_musa_available():
|
| 318 |
+
devices.add("musa")
|
| 319 |
+
|
| 320 |
+
return frozenset(devices)
|
phi4/lib/python3.10/site-packages/transformers/utils/backbone_utils.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""Collection of utils to be used by backbones and their components."""
|
| 17 |
+
|
| 18 |
+
import enum
|
| 19 |
+
import inspect
|
| 20 |
+
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from ..configuration_utils import PretrainedConfig
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class BackboneType(enum.Enum):
|
| 28 |
+
TIMM = "timm"
|
| 29 |
+
TRANSFORMERS = "transformers"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def verify_out_features_out_indices(
|
| 33 |
+
out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]]
|
| 34 |
+
):
|
| 35 |
+
"""
|
| 36 |
+
Verify that out_indices and out_features are valid for the given stage_names.
|
| 37 |
+
"""
|
| 38 |
+
if stage_names is None:
|
| 39 |
+
raise ValueError("Stage_names must be set for transformers backbones")
|
| 40 |
+
|
| 41 |
+
if out_features is not None:
|
| 42 |
+
if not isinstance(out_features, (list,)):
|
| 43 |
+
raise ValueError(f"out_features must be a list got {type(out_features)}")
|
| 44 |
+
if any(feat not in stage_names for feat in out_features):
|
| 45 |
+
raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}")
|
| 46 |
+
if len(out_features) != len(set(out_features)):
|
| 47 |
+
raise ValueError(f"out_features must not contain any duplicates, got {out_features}")
|
| 48 |
+
if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]):
|
| 49 |
+
raise ValueError(
|
| 50 |
+
f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if out_indices is not None:
|
| 54 |
+
if not isinstance(out_indices, list):
|
| 55 |
+
raise ValueError(f"out_indices must be a list, got {type(out_indices)}")
|
| 56 |
+
# Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,]
|
| 57 |
+
positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices)
|
| 58 |
+
if any(idx for idx in positive_indices if idx not in range(len(stage_names))):
|
| 59 |
+
raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}")
|
| 60 |
+
if len(positive_indices) != len(set(positive_indices)):
|
| 61 |
+
msg = f"out_indices must not contain any duplicates, got {out_indices}"
|
| 62 |
+
msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else ""
|
| 63 |
+
raise ValueError(msg)
|
| 64 |
+
if positive_indices != tuple(sorted(positive_indices)):
|
| 65 |
+
sorted_negative = [idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])]
|
| 66 |
+
raise ValueError(
|
| 67 |
+
f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
if out_features is not None and out_indices is not None:
|
| 71 |
+
if len(out_features) != len(out_indices):
|
| 72 |
+
raise ValueError("out_features and out_indices should have the same length if both are set")
|
| 73 |
+
if out_features != [stage_names[idx] for idx in out_indices]:
|
| 74 |
+
raise ValueError("out_features and out_indices should correspond to the same stages if both are set")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _align_output_features_output_indices(
|
| 78 |
+
out_features: Optional[List[str]],
|
| 79 |
+
out_indices: Optional[Union[List[int], Tuple[int]]],
|
| 80 |
+
stage_names: List[str],
|
| 81 |
+
):
|
| 82 |
+
"""
|
| 83 |
+
Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.
|
| 84 |
+
|
| 85 |
+
The logic is as follows:
|
| 86 |
+
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
|
| 87 |
+
`out_indices`.
|
| 88 |
+
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
|
| 89 |
+
`out_features`.
|
| 90 |
+
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
|
| 91 |
+
- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
out_features (`List[str]`): The names of the features for the backbone to output.
|
| 95 |
+
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
|
| 96 |
+
stage_names (`List[str]`): The names of the stages of the backbone.
|
| 97 |
+
"""
|
| 98 |
+
if out_indices is None and out_features is None:
|
| 99 |
+
out_indices = [len(stage_names) - 1]
|
| 100 |
+
out_features = [stage_names[-1]]
|
| 101 |
+
elif out_indices is None and out_features is not None:
|
| 102 |
+
out_indices = [stage_names.index(layer) for layer in out_features]
|
| 103 |
+
elif out_features is None and out_indices is not None:
|
| 104 |
+
out_features = [stage_names[idx] for idx in out_indices]
|
| 105 |
+
return out_features, out_indices
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_aligned_output_features_output_indices(
|
| 109 |
+
out_features: Optional[List[str]],
|
| 110 |
+
out_indices: Optional[Union[List[int], Tuple[int]]],
|
| 111 |
+
stage_names: List[str],
|
| 112 |
+
) -> Tuple[List[str], List[int]]:
|
| 113 |
+
"""
|
| 114 |
+
Get the `out_features` and `out_indices` so that they are aligned.
|
| 115 |
+
|
| 116 |
+
The logic is as follows:
|
| 117 |
+
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
|
| 118 |
+
`out_indices`.
|
| 119 |
+
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
|
| 120 |
+
`out_features`.
|
| 121 |
+
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
|
| 122 |
+
- `out_indices` and `out_features` set: they are verified to be aligned.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
out_features (`List[str]`): The names of the features for the backbone to output.
|
| 126 |
+
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
|
| 127 |
+
stage_names (`List[str]`): The names of the stages of the backbone.
|
| 128 |
+
"""
|
| 129 |
+
out_indices = list(out_indices) if out_indices is not None else None
|
| 130 |
+
# First verify that the out_features and out_indices are valid
|
| 131 |
+
verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)
|
| 132 |
+
output_features, output_indices = _align_output_features_output_indices(
|
| 133 |
+
out_features=out_features, out_indices=out_indices, stage_names=stage_names
|
| 134 |
+
)
|
| 135 |
+
# Verify that the aligned out_features and out_indices are valid
|
| 136 |
+
verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)
|
| 137 |
+
return output_features, output_indices
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class BackboneMixin:
|
| 141 |
+
backbone_type: Optional[BackboneType] = None
|
| 142 |
+
|
| 143 |
+
def _init_timm_backbone(self, config) -> None:
|
| 144 |
+
"""
|
| 145 |
+
Initialize the backbone model from timm The backbone must already be loaded to self._backbone
|
| 146 |
+
"""
|
| 147 |
+
if getattr(self, "_backbone", None) is None:
|
| 148 |
+
raise ValueError("self._backbone must be set before calling _init_timm_backbone")
|
| 149 |
+
|
| 150 |
+
# These will diagree with the defaults for the transformers models e.g. for resnet50
|
| 151 |
+
# the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4']
|
| 152 |
+
# the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4']
|
| 153 |
+
self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info]
|
| 154 |
+
self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info]
|
| 155 |
+
|
| 156 |
+
# In some timm versions, out_indices reflects the input type of out_indices on the `create_model` call,
|
| 157 |
+
# in later versions >= 1, it is always a tuple
|
| 158 |
+
out_indices = list(self._backbone.feature_info.out_indices)
|
| 159 |
+
out_features = self._backbone.feature_info.module_name()
|
| 160 |
+
|
| 161 |
+
# We verify the out indices and out features are valid
|
| 162 |
+
verify_out_features_out_indices(
|
| 163 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 164 |
+
)
|
| 165 |
+
self._out_features, self._out_indices = out_features, out_indices
|
| 166 |
+
|
| 167 |
+
def _init_transformers_backbone(self, config) -> None:
|
| 168 |
+
stage_names = getattr(config, "stage_names")
|
| 169 |
+
out_features = getattr(config, "out_features", None)
|
| 170 |
+
out_indices = getattr(config, "out_indices", None)
|
| 171 |
+
|
| 172 |
+
self.stage_names = stage_names
|
| 173 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 174 |
+
out_features=out_features, out_indices=out_indices, stage_names=stage_names
|
| 175 |
+
)
|
| 176 |
+
# Number of channels for each stage. This is set in the transformer backbone model init
|
| 177 |
+
self.num_features = None
|
| 178 |
+
|
| 179 |
+
def _init_backbone(self, config) -> None:
|
| 180 |
+
"""
|
| 181 |
+
Method to initialize the backbone. This method is called by the constructor of the base class after the
|
| 182 |
+
pretrained model weights have been loaded.
|
| 183 |
+
"""
|
| 184 |
+
self.config = config
|
| 185 |
+
|
| 186 |
+
self.use_timm_backbone = getattr(config, "use_timm_backbone", False)
|
| 187 |
+
self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS
|
| 188 |
+
|
| 189 |
+
if self.backbone_type == BackboneType.TIMM:
|
| 190 |
+
self._init_timm_backbone(config)
|
| 191 |
+
elif self.backbone_type == BackboneType.TRANSFORMERS:
|
| 192 |
+
self._init_transformers_backbone(config)
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
|
| 195 |
+
|
| 196 |
+
@property
|
| 197 |
+
def out_features(self):
|
| 198 |
+
return self._out_features
|
| 199 |
+
|
| 200 |
+
@out_features.setter
|
| 201 |
+
def out_features(self, out_features: List[str]):
|
| 202 |
+
"""
|
| 203 |
+
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
| 204 |
+
"""
|
| 205 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 206 |
+
out_features=out_features, out_indices=None, stage_names=self.stage_names
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
@property
|
| 210 |
+
def out_indices(self):
|
| 211 |
+
return self._out_indices
|
| 212 |
+
|
| 213 |
+
@out_indices.setter
|
| 214 |
+
def out_indices(self, out_indices: Union[Tuple[int], List[int]]):
|
| 215 |
+
"""
|
| 216 |
+
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
| 217 |
+
"""
|
| 218 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 219 |
+
out_features=None, out_indices=out_indices, stage_names=self.stage_names
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
@property
|
| 223 |
+
def out_feature_channels(self):
|
| 224 |
+
# the current backbones will output the number of channels for each stage
|
| 225 |
+
# even if that stage is not in the out_features list.
|
| 226 |
+
return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)}
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def channels(self):
|
| 230 |
+
return [self.out_feature_channels[name] for name in self.out_features]
|
| 231 |
+
|
| 232 |
+
def forward_with_filtered_kwargs(self, *args, **kwargs):
|
| 233 |
+
signature = dict(inspect.signature(self.forward).parameters)
|
| 234 |
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature}
|
| 235 |
+
return self(*args, **filtered_kwargs)
|
| 236 |
+
|
| 237 |
+
def forward(
|
| 238 |
+
self,
|
| 239 |
+
pixel_values,
|
| 240 |
+
output_hidden_states: Optional[bool] = None,
|
| 241 |
+
output_attentions: Optional[bool] = None,
|
| 242 |
+
return_dict: Optional[bool] = None,
|
| 243 |
+
):
|
| 244 |
+
raise NotImplementedError("This method should be implemented by the derived class.")
|
| 245 |
+
|
| 246 |
+
def to_dict(self):
|
| 247 |
+
"""
|
| 248 |
+
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
|
| 249 |
+
include the `out_features` and `out_indices` attributes.
|
| 250 |
+
"""
|
| 251 |
+
output = super().to_dict()
|
| 252 |
+
output["out_features"] = output.pop("_out_features")
|
| 253 |
+
output["out_indices"] = output.pop("_out_indices")
|
| 254 |
+
return output
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class BackboneConfigMixin:
|
| 258 |
+
"""
|
| 259 |
+
A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
@property
|
| 263 |
+
def out_features(self):
|
| 264 |
+
return self._out_features
|
| 265 |
+
|
| 266 |
+
@out_features.setter
|
| 267 |
+
def out_features(self, out_features: List[str]):
|
| 268 |
+
"""
|
| 269 |
+
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
| 270 |
+
"""
|
| 271 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 272 |
+
out_features=out_features, out_indices=None, stage_names=self.stage_names
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def out_indices(self):
|
| 277 |
+
return self._out_indices
|
| 278 |
+
|
| 279 |
+
@out_indices.setter
|
| 280 |
+
def out_indices(self, out_indices: Union[Tuple[int], List[int]]):
|
| 281 |
+
"""
|
| 282 |
+
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
| 283 |
+
"""
|
| 284 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 285 |
+
out_features=None, out_indices=out_indices, stage_names=self.stage_names
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
def to_dict(self):
|
| 289 |
+
"""
|
| 290 |
+
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
|
| 291 |
+
include the `out_features` and `out_indices` attributes.
|
| 292 |
+
"""
|
| 293 |
+
output = super().to_dict()
|
| 294 |
+
output["out_features"] = output.pop("_out_features")
|
| 295 |
+
output["out_indices"] = output.pop("_out_indices")
|
| 296 |
+
return output
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def load_backbone(config):
|
| 300 |
+
"""
|
| 301 |
+
Loads the backbone model from a config object.
|
| 302 |
+
|
| 303 |
+
If the config is from the backbone model itself, then we return a backbone model with randomly initialized
|
| 304 |
+
weights.
|
| 305 |
+
|
| 306 |
+
If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights
|
| 307 |
+
if specified.
|
| 308 |
+
"""
|
| 309 |
+
from transformers import AutoBackbone, AutoConfig
|
| 310 |
+
|
| 311 |
+
backbone_config = getattr(config, "backbone_config", None)
|
| 312 |
+
use_timm_backbone = getattr(config, "use_timm_backbone", None)
|
| 313 |
+
use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None)
|
| 314 |
+
backbone_checkpoint = getattr(config, "backbone", None)
|
| 315 |
+
backbone_kwargs = getattr(config, "backbone_kwargs", None)
|
| 316 |
+
backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs
|
| 317 |
+
|
| 318 |
+
if backbone_kwargs and backbone_config is not None:
|
| 319 |
+
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
|
| 320 |
+
|
| 321 |
+
# If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired
|
| 322 |
+
# behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config?
|
| 323 |
+
if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None:
|
| 324 |
+
raise ValueError("Cannot specify both config.backbone_config and config.backbone")
|
| 325 |
+
|
| 326 |
+
# If any of thhe following are set, then the config passed in is from a model which contains a backbone.
|
| 327 |
+
if (
|
| 328 |
+
backbone_config is None
|
| 329 |
+
and use_timm_backbone is None
|
| 330 |
+
and backbone_checkpoint is None
|
| 331 |
+
and backbone_checkpoint is None
|
| 332 |
+
):
|
| 333 |
+
return AutoBackbone.from_config(config=config, **backbone_kwargs)
|
| 334 |
+
|
| 335 |
+
# config from the parent model that has a backbone
|
| 336 |
+
if use_timm_backbone:
|
| 337 |
+
if backbone_checkpoint is None:
|
| 338 |
+
raise ValueError("config.backbone must be set if use_timm_backbone is True")
|
| 339 |
+
# Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone
|
| 340 |
+
# to determine whether to load the pretrained weights.
|
| 341 |
+
backbone = AutoBackbone.from_pretrained(
|
| 342 |
+
backbone_checkpoint,
|
| 343 |
+
use_timm_backbone=use_timm_backbone,
|
| 344 |
+
use_pretrained_backbone=use_pretrained_backbone,
|
| 345 |
+
**backbone_kwargs,
|
| 346 |
+
)
|
| 347 |
+
elif use_pretrained_backbone:
|
| 348 |
+
if backbone_checkpoint is None:
|
| 349 |
+
raise ValueError("config.backbone must be set if use_pretrained_backbone is True")
|
| 350 |
+
backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs)
|
| 351 |
+
else:
|
| 352 |
+
if backbone_config is None and backbone_checkpoint is None:
|
| 353 |
+
raise ValueError("Either config.backbone_config or config.backbone must be set")
|
| 354 |
+
if backbone_config is None:
|
| 355 |
+
backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs)
|
| 356 |
+
backbone = AutoBackbone.from_config(config=backbone_config)
|
| 357 |
+
return backbone
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def verify_backbone_config_arguments(
|
| 361 |
+
use_timm_backbone: bool,
|
| 362 |
+
use_pretrained_backbone: bool,
|
| 363 |
+
backbone: Optional[str],
|
| 364 |
+
backbone_config: Optional[Union[dict, "PretrainedConfig"]],
|
| 365 |
+
backbone_kwargs: Optional[dict],
|
| 366 |
+
):
|
| 367 |
+
"""
|
| 368 |
+
Verify that the config arguments to be passed to load_backbone are valid
|
| 369 |
+
"""
|
| 370 |
+
if backbone_config is not None and backbone is not None:
|
| 371 |
+
raise ValueError("You can't specify both `backbone` and `backbone_config`.")
|
| 372 |
+
|
| 373 |
+
if backbone_config is not None and use_timm_backbone:
|
| 374 |
+
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
|
| 375 |
+
|
| 376 |
+
if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
|
| 377 |
+
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
|
phi4/lib/python3.10/site-packages/transformers/utils/bitsandbytes.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
warnings.warn(
|
| 18 |
+
"transformers.utils.bitsandbytes module is deprecated and will be removed in a future version. Please import bitsandbytes modules directly from transformers.integrations",
|
| 19 |
+
FutureWarning,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from ..integrations import ( # noqa
|
| 23 |
+
get_keys_to_not_convert,
|
| 24 |
+
replace_8bit_linear,
|
| 25 |
+
replace_with_bnb_linear,
|
| 26 |
+
set_module_8bit_tensor_to_device,
|
| 27 |
+
set_module_quantized_tensor_to_device,
|
| 28 |
+
)
|
phi4/lib/python3.10/site-packages/transformers/utils/chat_template_utils.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
import json
|
| 17 |
+
import re
|
| 18 |
+
import types
|
| 19 |
+
from contextlib import contextmanager
|
| 20 |
+
from datetime import datetime
|
| 21 |
+
from functools import lru_cache
|
| 22 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, get_args, get_origin, get_type_hints
|
| 23 |
+
|
| 24 |
+
from packaging import version
|
| 25 |
+
|
| 26 |
+
from .import_utils import is_jinja_available, is_torch_available, is_vision_available
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
if is_jinja_available():
|
| 30 |
+
import jinja2
|
| 31 |
+
from jinja2.ext import Extension
|
| 32 |
+
from jinja2.sandbox import ImmutableSandboxedEnvironment
|
| 33 |
+
else:
|
| 34 |
+
jinja2 = None
|
| 35 |
+
|
| 36 |
+
if is_vision_available():
|
| 37 |
+
from PIL.Image import Image
|
| 38 |
+
|
| 39 |
+
if is_torch_available():
|
| 40 |
+
from torch import Tensor
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
BASIC_TYPES = (int, float, str, bool, Any, type(None), ...)
|
| 44 |
+
# Extracts the initial segment of the docstring, containing the function description
|
| 45 |
+
description_re = re.compile(r"^(.*?)[\n\s]*(Args:|Returns:|Raises:|\Z)", re.DOTALL)
|
| 46 |
+
# Extracts the Args: block from the docstring
|
| 47 |
+
args_re = re.compile(r"\n\s*Args:\n\s*(.*?)[\n\s]*(Returns:|Raises:|\Z)", re.DOTALL)
|
| 48 |
+
# Splits the Args: block into individual arguments
|
| 49 |
+
args_split_re = re.compile(
|
| 50 |
+
r"""
|
| 51 |
+
(?:^|\n) # Match the start of the args block, or a newline
|
| 52 |
+
\s*(\w+):\s* # Capture the argument name and strip spacing
|
| 53 |
+
(.*?)\s* # Capture the argument description, which can span multiple lines, and strip trailing spacing
|
| 54 |
+
(?=\n\s*\w+:|\Z) # Stop when you hit the next argument or the end of the block
|
| 55 |
+
""",
|
| 56 |
+
re.DOTALL | re.VERBOSE,
|
| 57 |
+
)
|
| 58 |
+
# Extracts the Returns: block from the docstring, if present. Note that most chat templates ignore the return type/doc!
|
| 59 |
+
returns_re = re.compile(r"\n\s*Returns:\n\s*(.*?)[\n\s]*(Raises:|\Z)", re.DOTALL)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class TypeHintParsingException(Exception):
|
| 63 |
+
"""Exception raised for errors in parsing type hints to generate JSON schemas"""
|
| 64 |
+
|
| 65 |
+
pass
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class DocstringParsingException(Exception):
|
| 69 |
+
"""Exception raised for errors in parsing docstrings to generate JSON schemas"""
|
| 70 |
+
|
| 71 |
+
pass
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _get_json_schema_type(param_type: str) -> Dict[str, str]:
|
| 75 |
+
type_mapping = {
|
| 76 |
+
int: {"type": "integer"},
|
| 77 |
+
float: {"type": "number"},
|
| 78 |
+
str: {"type": "string"},
|
| 79 |
+
bool: {"type": "boolean"},
|
| 80 |
+
type(None): {"type": "null"},
|
| 81 |
+
Any: {},
|
| 82 |
+
}
|
| 83 |
+
if is_vision_available():
|
| 84 |
+
type_mapping[Image] = {"type": "image"}
|
| 85 |
+
if is_torch_available():
|
| 86 |
+
type_mapping[Tensor] = {"type": "audio"}
|
| 87 |
+
return type_mapping.get(param_type, {"type": "object"})
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _parse_type_hint(hint: str) -> Dict:
|
| 91 |
+
origin = get_origin(hint)
|
| 92 |
+
args = get_args(hint)
|
| 93 |
+
|
| 94 |
+
if origin is None:
|
| 95 |
+
try:
|
| 96 |
+
return _get_json_schema_type(hint)
|
| 97 |
+
except KeyError:
|
| 98 |
+
raise TypeHintParsingException(
|
| 99 |
+
"Couldn't parse this type hint, likely due to a custom class or object: ", hint
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
elif origin is Union or (hasattr(types, "UnionType") and origin is types.UnionType):
|
| 103 |
+
# Recurse into each of the subtypes in the Union, except None, which is handled separately at the end
|
| 104 |
+
subtypes = [_parse_type_hint(t) for t in args if t is not type(None)]
|
| 105 |
+
if len(subtypes) == 1:
|
| 106 |
+
# A single non-null type can be expressed directly
|
| 107 |
+
return_dict = subtypes[0]
|
| 108 |
+
elif all(isinstance(subtype["type"], str) for subtype in subtypes):
|
| 109 |
+
# A union of basic types can be expressed as a list in the schema
|
| 110 |
+
return_dict = {"type": sorted([subtype["type"] for subtype in subtypes])}
|
| 111 |
+
else:
|
| 112 |
+
# A union of more complex types requires "anyOf"
|
| 113 |
+
return_dict = {"anyOf": subtypes}
|
| 114 |
+
if type(None) in args:
|
| 115 |
+
return_dict["nullable"] = True
|
| 116 |
+
return return_dict
|
| 117 |
+
|
| 118 |
+
elif origin is list:
|
| 119 |
+
if not args:
|
| 120 |
+
return {"type": "array"}
|
| 121 |
+
else:
|
| 122 |
+
# Lists can only have a single type argument, so recurse into it
|
| 123 |
+
return {"type": "array", "items": _parse_type_hint(args[0])}
|
| 124 |
+
|
| 125 |
+
elif origin is tuple:
|
| 126 |
+
if not args:
|
| 127 |
+
return {"type": "array"}
|
| 128 |
+
if len(args) == 1:
|
| 129 |
+
raise TypeHintParsingException(
|
| 130 |
+
f"The type hint {str(hint).replace('typing.', '')} is a Tuple with a single element, which "
|
| 131 |
+
"we do not automatically convert to JSON schema as it is rarely necessary. If this input can contain "
|
| 132 |
+
"more than one element, we recommend "
|
| 133 |
+
"using a List[] type instead, or if it really is a single element, remove the Tuple[] wrapper and just "
|
| 134 |
+
"pass the element directly."
|
| 135 |
+
)
|
| 136 |
+
if ... in args:
|
| 137 |
+
raise TypeHintParsingException(
|
| 138 |
+
"Conversion of '...' is not supported in Tuple type hints. "
|
| 139 |
+
"Use List[] types for variable-length"
|
| 140 |
+
" inputs instead."
|
| 141 |
+
)
|
| 142 |
+
return {"type": "array", "prefixItems": [_parse_type_hint(t) for t in args]}
|
| 143 |
+
|
| 144 |
+
elif origin is dict:
|
| 145 |
+
# The JSON equivalent to a dict is 'object', which mandates that all keys are strings
|
| 146 |
+
# However, we can specify the type of the dict values with "additionalProperties"
|
| 147 |
+
out = {"type": "object"}
|
| 148 |
+
if len(args) == 2:
|
| 149 |
+
out["additionalProperties"] = _parse_type_hint(args[1])
|
| 150 |
+
return out
|
| 151 |
+
|
| 152 |
+
raise TypeHintParsingException("Couldn't parse this type hint, likely due to a custom class or object: ", hint)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _convert_type_hints_to_json_schema(func: Callable) -> Dict:
|
| 156 |
+
type_hints = get_type_hints(func)
|
| 157 |
+
signature = inspect.signature(func)
|
| 158 |
+
required = []
|
| 159 |
+
for param_name, param in signature.parameters.items():
|
| 160 |
+
if param.annotation == inspect.Parameter.empty:
|
| 161 |
+
raise TypeHintParsingException(f"Argument {param.name} is missing a type hint in function {func.__name__}")
|
| 162 |
+
if param.default == inspect.Parameter.empty:
|
| 163 |
+
required.append(param_name)
|
| 164 |
+
|
| 165 |
+
properties = {}
|
| 166 |
+
for param_name, param_type in type_hints.items():
|
| 167 |
+
properties[param_name] = _parse_type_hint(param_type)
|
| 168 |
+
|
| 169 |
+
schema = {"type": "object", "properties": properties}
|
| 170 |
+
if required:
|
| 171 |
+
schema["required"] = required
|
| 172 |
+
|
| 173 |
+
return schema
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def parse_google_format_docstring(docstring: str) -> Tuple[Optional[str], Optional[Dict], Optional[str]]:
|
| 177 |
+
"""
|
| 178 |
+
Parses a Google-style docstring to extract the function description,
|
| 179 |
+
argument descriptions, and return description.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
docstring (str): The docstring to parse.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
The function description, arguments, and return description.
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
# Extract the sections
|
| 189 |
+
description_match = description_re.search(docstring)
|
| 190 |
+
args_match = args_re.search(docstring)
|
| 191 |
+
returns_match = returns_re.search(docstring)
|
| 192 |
+
|
| 193 |
+
# Clean and store the sections
|
| 194 |
+
description = description_match.group(1).strip() if description_match else None
|
| 195 |
+
docstring_args = args_match.group(1).strip() if args_match else None
|
| 196 |
+
returns = returns_match.group(1).strip() if returns_match else None
|
| 197 |
+
|
| 198 |
+
# Parsing the arguments into a dictionary
|
| 199 |
+
if docstring_args is not None:
|
| 200 |
+
docstring_args = "\n".join([line for line in docstring_args.split("\n") if line.strip()]) # Remove blank lines
|
| 201 |
+
matches = args_split_re.findall(docstring_args)
|
| 202 |
+
args_dict = {match[0]: re.sub(r"\s*\n+\s*", " ", match[1].strip()) for match in matches}
|
| 203 |
+
else:
|
| 204 |
+
args_dict = {}
|
| 205 |
+
|
| 206 |
+
return description, args_dict, returns
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def get_json_schema(func: Callable) -> Dict:
|
| 210 |
+
"""
|
| 211 |
+
This function generates a JSON schema for a given function, based on its docstring and type hints. This is
|
| 212 |
+
mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of
|
| 213 |
+
the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires
|
| 214 |
+
that the function has a docstring, and that each argument has a description in the docstring, in the standard
|
| 215 |
+
Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint.
|
| 216 |
+
|
| 217 |
+
Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is
|
| 218 |
+
optional because most chat templates ignore the return value of the function.
|
| 219 |
+
|
| 220 |
+
Args:
|
| 221 |
+
func: The function to generate a JSON schema for.
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
A dictionary containing the JSON schema for the function.
|
| 225 |
+
|
| 226 |
+
Examples:
|
| 227 |
+
```python
|
| 228 |
+
>>> def multiply(x: float, y: float):
|
| 229 |
+
>>> '''
|
| 230 |
+
>>> A function that multiplies two numbers
|
| 231 |
+
>>>
|
| 232 |
+
>>> Args:
|
| 233 |
+
>>> x: The first number to multiply
|
| 234 |
+
>>> y: The second number to multiply
|
| 235 |
+
>>> '''
|
| 236 |
+
>>> return x * y
|
| 237 |
+
>>>
|
| 238 |
+
>>> print(get_json_schema(multiply))
|
| 239 |
+
{
|
| 240 |
+
"name": "multiply",
|
| 241 |
+
"description": "A function that multiplies two numbers",
|
| 242 |
+
"parameters": {
|
| 243 |
+
"type": "object",
|
| 244 |
+
"properties": {
|
| 245 |
+
"x": {"type": "number", "description": "The first number to multiply"},
|
| 246 |
+
"y": {"type": "number", "description": "The second number to multiply"}
|
| 247 |
+
},
|
| 248 |
+
"required": ["x", "y"]
|
| 249 |
+
}
|
| 250 |
+
}
|
| 251 |
+
```
|
| 252 |
+
|
| 253 |
+
The general use for these schemas is that they are used to generate tool descriptions for chat templates that
|
| 254 |
+
support them, like so:
|
| 255 |
+
|
| 256 |
+
```python
|
| 257 |
+
>>> from transformers import AutoTokenizer
|
| 258 |
+
>>> from transformers.utils import get_json_schema
|
| 259 |
+
>>>
|
| 260 |
+
>>> def multiply(x: float, y: float):
|
| 261 |
+
>>> '''
|
| 262 |
+
>>> A function that multiplies two numbers
|
| 263 |
+
>>>
|
| 264 |
+
>>> Args:
|
| 265 |
+
>>> x: The first number to multiply
|
| 266 |
+
>>> y: The second number to multiply
|
| 267 |
+
>>> return x * y
|
| 268 |
+
>>> '''
|
| 269 |
+
>>>
|
| 270 |
+
>>> multiply_schema = get_json_schema(multiply)
|
| 271 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
|
| 272 |
+
>>> messages = [{"role": "user", "content": "What is 179 x 4571?"}]
|
| 273 |
+
>>> formatted_chat = tokenizer.apply_chat_template(
|
| 274 |
+
>>> messages,
|
| 275 |
+
>>> tools=[multiply_schema],
|
| 276 |
+
>>> chat_template="tool_use",
|
| 277 |
+
>>> return_dict=True,
|
| 278 |
+
>>> return_tensors="pt",
|
| 279 |
+
>>> add_generation_prompt=True
|
| 280 |
+
>>> )
|
| 281 |
+
>>> # The formatted chat can now be passed to model.generate()
|
| 282 |
+
```
|
| 283 |
+
|
| 284 |
+
Each argument description can also have an optional `(choices: ...)` block at the end, such as
|
| 285 |
+
`(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will
|
| 286 |
+
only be parsed correctly if it is at the end of the line:
|
| 287 |
+
|
| 288 |
+
```python
|
| 289 |
+
>>> def drink_beverage(beverage: str):
|
| 290 |
+
>>> '''
|
| 291 |
+
>>> A function that drinks a beverage
|
| 292 |
+
>>>
|
| 293 |
+
>>> Args:
|
| 294 |
+
>>> beverage: The beverage to drink (choices: ["tea", "coffee"])
|
| 295 |
+
>>> '''
|
| 296 |
+
>>> pass
|
| 297 |
+
>>>
|
| 298 |
+
>>> print(get_json_schema(drink_beverage))
|
| 299 |
+
```
|
| 300 |
+
{
|
| 301 |
+
'name': 'drink_beverage',
|
| 302 |
+
'description': 'A function that drinks a beverage',
|
| 303 |
+
'parameters': {
|
| 304 |
+
'type': 'object',
|
| 305 |
+
'properties': {
|
| 306 |
+
'beverage': {
|
| 307 |
+
'type': 'string',
|
| 308 |
+
'enum': ['tea', 'coffee'],
|
| 309 |
+
'description': 'The beverage to drink'
|
| 310 |
+
}
|
| 311 |
+
},
|
| 312 |
+
'required': ['beverage']
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
"""
|
| 316 |
+
doc = inspect.getdoc(func)
|
| 317 |
+
if not doc:
|
| 318 |
+
raise DocstringParsingException(
|
| 319 |
+
f"Cannot generate JSON schema for {func.__name__} because it has no docstring!"
|
| 320 |
+
)
|
| 321 |
+
doc = doc.strip()
|
| 322 |
+
main_doc, param_descriptions, return_doc = parse_google_format_docstring(doc)
|
| 323 |
+
|
| 324 |
+
json_schema = _convert_type_hints_to_json_schema(func)
|
| 325 |
+
if (return_dict := json_schema["properties"].pop("return", None)) is not None:
|
| 326 |
+
if return_doc is not None: # We allow a missing return docstring since most templates ignore it
|
| 327 |
+
return_dict["description"] = return_doc
|
| 328 |
+
for arg, schema in json_schema["properties"].items():
|
| 329 |
+
if arg not in param_descriptions:
|
| 330 |
+
raise DocstringParsingException(
|
| 331 |
+
f"Cannot generate JSON schema for {func.__name__} because the docstring has no description for the argument '{arg}'"
|
| 332 |
+
)
|
| 333 |
+
desc = param_descriptions[arg]
|
| 334 |
+
enum_choices = re.search(r"\(choices:\s*(.*?)\)\s*$", desc, flags=re.IGNORECASE)
|
| 335 |
+
if enum_choices:
|
| 336 |
+
schema["enum"] = [c.strip() for c in json.loads(enum_choices.group(1))]
|
| 337 |
+
desc = enum_choices.string[: enum_choices.start()].strip()
|
| 338 |
+
schema["description"] = desc
|
| 339 |
+
|
| 340 |
+
output = {"name": func.__name__, "description": main_doc, "parameters": json_schema}
|
| 341 |
+
if return_dict is not None:
|
| 342 |
+
output["return"] = return_dict
|
| 343 |
+
return {"type": "function", "function": output}
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def _render_with_assistant_indices(
|
| 347 |
+
compiled_template, messages, tools, documents, add_generation_prompt, **template_kwargs
|
| 348 |
+
):
|
| 349 |
+
rendered_blocks = []
|
| 350 |
+
generation_indices = []
|
| 351 |
+
with compiled_template.environment.activate_tracker(rendered_blocks, generation_indices):
|
| 352 |
+
for block in compiled_template.generate(
|
| 353 |
+
messages=messages,
|
| 354 |
+
tools=tools,
|
| 355 |
+
documents=documents,
|
| 356 |
+
add_generation_prompt=add_generation_prompt,
|
| 357 |
+
**template_kwargs,
|
| 358 |
+
):
|
| 359 |
+
rendered_blocks.append(block)
|
| 360 |
+
rendered_chat = "".join(rendered_blocks)
|
| 361 |
+
return rendered_chat, generation_indices
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
@lru_cache
|
| 365 |
+
def _compile_jinja_template(chat_template):
|
| 366 |
+
class AssistantTracker(Extension):
|
| 367 |
+
# This extension is used to track the indices of assistant-generated tokens in the rendered chat
|
| 368 |
+
tags = {"generation"}
|
| 369 |
+
|
| 370 |
+
def __init__(self, environment: ImmutableSandboxedEnvironment):
|
| 371 |
+
# The class is only initiated by jinja.
|
| 372 |
+
super().__init__(environment)
|
| 373 |
+
environment.extend(activate_tracker=self.activate_tracker)
|
| 374 |
+
self._rendered_blocks = None
|
| 375 |
+
self._generation_indices = None
|
| 376 |
+
|
| 377 |
+
def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.CallBlock:
|
| 378 |
+
lineno = next(parser.stream).lineno
|
| 379 |
+
body = parser.parse_statements(["name:endgeneration"], drop_needle=True)
|
| 380 |
+
return jinja2.nodes.CallBlock(self.call_method("_generation_support"), [], [], body).set_lineno(lineno)
|
| 381 |
+
|
| 382 |
+
@jinja2.pass_eval_context
|
| 383 |
+
def _generation_support(self, context: jinja2.nodes.EvalContext, caller: jinja2.runtime.Macro) -> str:
|
| 384 |
+
rv = caller()
|
| 385 |
+
if self.is_active():
|
| 386 |
+
# Only track generation indices if the tracker is active
|
| 387 |
+
start_index = len("".join(self._rendered_blocks))
|
| 388 |
+
end_index = start_index + len(rv)
|
| 389 |
+
self._generation_indices.append((start_index, end_index))
|
| 390 |
+
return rv
|
| 391 |
+
|
| 392 |
+
def is_active(self) -> bool:
|
| 393 |
+
return self._rendered_blocks or self._generation_indices
|
| 394 |
+
|
| 395 |
+
@contextmanager
|
| 396 |
+
def activate_tracker(self, rendered_blocks: List[int], generation_indices: List[int]):
|
| 397 |
+
try:
|
| 398 |
+
if self.is_active():
|
| 399 |
+
raise ValueError("AssistantTracker should not be reused before closed")
|
| 400 |
+
self._rendered_blocks = rendered_blocks
|
| 401 |
+
self._generation_indices = generation_indices
|
| 402 |
+
|
| 403 |
+
yield
|
| 404 |
+
finally:
|
| 405 |
+
self._rendered_blocks = None
|
| 406 |
+
self._generation_indices = None
|
| 407 |
+
|
| 408 |
+
if version.parse(jinja2.__version__) < version.parse("3.1.0"):
|
| 409 |
+
raise ImportError(
|
| 410 |
+
"apply_chat_template requires jinja2>=3.1.0 to be installed. Your version is " f"{jinja2.__version__}."
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
def raise_exception(message):
|
| 414 |
+
raise jinja2.exceptions.TemplateError(message)
|
| 415 |
+
|
| 416 |
+
def tojson(x, ensure_ascii=False, indent=None, separators=None, sort_keys=False):
|
| 417 |
+
# We override the built-in tojson filter because Jinja's default filter escapes HTML characters
|
| 418 |
+
# We also expose some options like custom indents and separators
|
| 419 |
+
return json.dumps(x, ensure_ascii=ensure_ascii, indent=indent, separators=separators, sort_keys=sort_keys)
|
| 420 |
+
|
| 421 |
+
def strftime_now(format):
|
| 422 |
+
return datetime.now().strftime(format)
|
| 423 |
+
|
| 424 |
+
jinja_env = ImmutableSandboxedEnvironment(
|
| 425 |
+
trim_blocks=True, lstrip_blocks=True, extensions=[AssistantTracker, jinja2.ext.loopcontrols]
|
| 426 |
+
)
|
| 427 |
+
jinja_env.filters["tojson"] = tojson
|
| 428 |
+
jinja_env.globals["raise_exception"] = raise_exception
|
| 429 |
+
jinja_env.globals["strftime_now"] = strftime_now
|
| 430 |
+
return jinja_env.from_string(chat_template)
|