ZTWHHH commited on
Commit
fa754a7
·
verified ·
1 Parent(s): 311dcdd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h +21 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2r_cuda_dispatch.h +28 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div.h +101 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_ops.h +28 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum.h +39 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_native.h +21 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h +21 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_cpu_dispatch.h +23 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addr_cuda_dispatch.h +25 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_ops.h +39 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/chunk.h +30 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erf_meta_dispatch.h +26 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_ops.h +39 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h +28 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h +27 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h +25 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logit_ops.h +50 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_ops.h +39 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h +23 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point_native.h +21 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_cuda_dispatch.h +25 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf.h +39 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_multigammaln_native.h +22 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_meta.h +27 -0
  26. vllm/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc +0 -0
  27. vllm/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc +0 -0
  28. vllm/lib/python3.10/site-packages/transformers/data/data_collator.py +1913 -0
  29. vllm/lib/python3.10/site-packages/transformers/data/datasets/__init__.py +23 -0
  30. vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  31. vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc +0 -0
  33. vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/transformers/data/datasets/glue.py +161 -0
  35. vllm/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py +530 -0
  36. vllm/lib/python3.10/site-packages/transformers/data/datasets/squad.py +229 -0
  37. vllm/lib/python3.10/site-packages/transformers/data/metrics/__init__.py +98 -0
  38. vllm/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  39. vllm/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc +0 -0
  40. vllm/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py +779 -0
  41. vllm/lib/python3.10/site-packages/transformers/data/processors/__init__.py +18 -0
  42. vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  43. vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/transformers/data/processors/glue.py +643 -0
  48. vllm/lib/python3.10/site-packages/transformers/data/processors/squad.py +845 -0
  49. vllm/lib/python3.10/site-packages/transformers/data/processors/utils.py +349 -0
  50. vllm/lib/python3.10/site-packages/transformers/data/processors/xnli.py +96 -0
.gitattributes CHANGED
@@ -1647,3 +1647,5 @@ parrot/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x
1647
  parrot/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1648
  vllm/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1649
  vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
1647
  parrot/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1648
  vllm/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1649
  vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1650
+ vllm/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1651
+ vllm/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, c10::optional<double> scale=c10::nullopt, const c10::optional<at::Tensor> & causal_diagonal={}, const c10::optional<at::Tensor> & seqlen_k={});
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2r_cuda_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size);
21
+ TORCH_API at::Tensor _fft_c2r_symint(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size);
22
+ TORCH_API at::Tensor & _fft_c2r_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size);
23
+ TORCH_API at::Tensor & _fft_c2r_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out);
24
+ TORCH_API at::Tensor & _fft_c2r_symint_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size);
25
+ TORCH_API at::Tensor & _fft_c2r_symint_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out);
26
+
27
+ } // namespace cuda
28
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_foreach_div_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Scalar & scalar) {
27
+ return at::_ops::_foreach_div_Scalar::call(self, scalar);
28
+ }
29
+
30
+ // aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
31
+ inline void _foreach_div_(at::TensorList self, const at::Scalar & scalar) {
32
+ return at::_ops::_foreach_div__Scalar::call(self, scalar);
33
+ }
34
+
35
+ // aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
36
+ inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::TensorList other) {
37
+ return at::_ops::_foreach_div_List::call(self, other);
38
+ }
39
+
40
+ // aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
41
+ inline void _foreach_div_(at::TensorList self, at::TensorList other) {
42
+ return at::_ops::_foreach_div__List::call(self, other);
43
+ }
44
+
45
+ // aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
46
+ inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
47
+ return at::_ops::_foreach_div_ScalarList::call(self, scalars);
48
+ }
49
+
50
+ // aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
51
+ inline void _foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
52
+ return at::_ops::_foreach_div__ScalarList::call(self, scalars);
53
+ }
54
+
55
+ // aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]
56
+ inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Tensor & other) {
57
+ return at::_ops::_foreach_div_Tensor::call(self, other);
58
+ }
59
+
60
+ // aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
61
+ inline void _foreach_div_(at::TensorList self, const at::Tensor & other) {
62
+ return at::_ops::_foreach_div__Tensor::call(self, other);
63
+ }
64
+
65
+ // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
66
+ inline void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
67
+ return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
68
+ }
69
+ // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
70
+ inline void _foreach_div_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
71
+ return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
72
+ }
73
+
74
+ // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
75
+ inline void _foreach_div_out(at::TensorList out, at::TensorList self, at::TensorList other) {
76
+ return at::_ops::_foreach_div_List_out::call(self, other, out);
77
+ }
78
+ // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
79
+ inline void _foreach_div_outf(at::TensorList self, at::TensorList other, at::TensorList out) {
80
+ return at::_ops::_foreach_div_List_out::call(self, other, out);
81
+ }
82
+
83
+ // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
84
+ inline void _foreach_div_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
85
+ return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
86
+ }
87
+ // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
88
+ inline void _foreach_div_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
89
+ return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
90
+ }
91
+
92
+ // aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
93
+ inline void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Tensor & other) {
94
+ return at::_ops::_foreach_div_Tensor_out::call(self, other, out);
95
+ }
96
+ // aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
97
+ inline void _foreach_div_outf(at::TensorList self, const at::Tensor & other, at::TensorList out) {
98
+ return at::_ops::_foreach_div_Tensor_out::call(self, other, out);
99
+ }
100
+
101
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _sparse_bsc_tensor_unsafe {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_bsc_tensor_unsafe")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sparse_csr_sum_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
26
+ inline at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
27
+ return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
28
+ }
29
+
30
+ // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _sparse_csr_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
32
+ return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
33
+ }
34
+ // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _sparse_csr_sum_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
36
+ return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim=c10::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addr_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
21
+ TORCH_API at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
22
+ TORCH_API at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API batch_norm_gather_stats_with_counts {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, const c10::optional<at::Tensor> &, double, double, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::batch_norm_gather_stats_with_counts")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts);
26
+ };
27
+
28
+ struct TORCH_API batch_norm_gather_stats_with_counts_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, const c10::optional<at::Tensor> &, double, double, const at::Tensor &, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::batch_norm_gather_stats_with_counts")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/chunk.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/chunk_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
26
+ inline ::std::vector<at::Tensor> chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0) {
27
+ return at::_ops::chunk::call(self, chunks, dim);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erf_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor erf(const at::Tensor & self);
21
+ TORCH_API at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & erf_(at::Tensor & self);
24
+
25
+ } // namespace meta
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_ihfft {
18
+ using schema = at::Tensor (const at::Tensor &, c10::optional<c10::SymInt>, int64_t, c10::optional<c10::string_view>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm);
26
+ };
27
+
28
+ struct TORCH_API fft_ihfft_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::optional<c10::SymInt>, int64_t, c10::optional<c10::string_view>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API is_signed {
18
+ using schema = bool (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_signed")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_signed(Tensor self) -> bool")
24
+ static bool call(const at::Tensor & self);
25
+ static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_linalg_cholesky_ex : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, bool upper, bool check_errors);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false);
21
+ TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false);
22
+ TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out);
23
+
24
+ } // namespace meta
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logit_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API logit {
18
+ using schema = at::Tensor (const at::Tensor &, c10::optional<double>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logit")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logit(Tensor self, float? eps=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::optional<double> eps);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps);
26
+ };
27
+
28
+ struct TORCH_API logit_ {
29
+ using schema = at::Tensor & (at::Tensor &, c10::optional<double>);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logit_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, c10::optional<double> eps);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<double> eps);
37
+ };
38
+
39
+ struct TORCH_API logit_out {
40
+ using schema = at::Tensor & (const at::Tensor &, c10::optional<double>, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logit")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API narrow_copy {
18
+ using schema = at::Tensor (const at::Tensor &, int64_t, c10::SymInt, c10::SymInt);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::narrow_copy")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length);
26
+ };
27
+
28
+ struct TORCH_API narrow_copy_out {
29
+ using schema = at::Tensor & (const at::Tensor &, int64_t, c10::SymInt, c10::SymInt, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::narrow_copy")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API int64_t q_zero_point_quant(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor special_entr(const at::Tensor & self);
21
+ TORCH_API at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/special_erf_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::special_erf(Tensor self) -> Tensor
26
+ inline at::Tensor special_erf(const at::Tensor & self) {
27
+ return at::_ops::special_erf::call(self);
28
+ }
29
+
30
+ // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self) {
32
+ return at::_ops::special_erf_out::call(self, out);
33
+ }
34
+ // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out) {
36
+ return at::_ops::special_erf_out::call(self, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_multigammaln_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor special_multigammaln(const at::Tensor & self, int64_t p);
20
+ TORCH_API at::Tensor & special_multigammaln_out(const at::Tensor & self, int64_t p, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_special_shifted_chebyshev_polynomial_v : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & x, const at::Tensor & n);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
vllm/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc ADDED
Binary file (58.3 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/data_collator.py ADDED
@@ -0,0 +1,1913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import multiprocessing as mp
16
+ import random
17
+ import warnings
18
+ from collections.abc import Mapping
19
+ from dataclasses import dataclass
20
+ from random import randint
21
+ from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+
25
+ from ..models.bert import BertTokenizer, BertTokenizerFast
26
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
27
+ from ..utils import PaddingStrategy
28
+
29
+
30
+ InputDataClass = NewType("InputDataClass", Any)
31
+
32
+ """
33
+ A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
34
+ of PyTorch/TensorFlow tensors or NumPy arrays.
35
+ """
36
+ DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
37
+
38
+
39
+ class DataCollatorMixin:
40
+ def __call__(self, features, return_tensors=None):
41
+ if return_tensors is None:
42
+ return_tensors = self.return_tensors
43
+ if return_tensors == "tf":
44
+ return self.tf_call(features)
45
+ elif return_tensors == "pt":
46
+ return self.torch_call(features)
47
+ elif return_tensors == "np":
48
+ return self.numpy_call(features)
49
+ else:
50
+ raise ValueError(f"Framework '{return_tensors}' not recognized!")
51
+
52
+
53
+ def pad_without_fast_tokenizer_warning(tokenizer, *pad_args, **pad_kwargs):
54
+ """
55
+ Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
56
+ """
57
+
58
+ # To avoid errors when using Feature extractors
59
+ if not hasattr(tokenizer, "deprecation_warnings"):
60
+ return tokenizer.pad(*pad_args, **pad_kwargs)
61
+
62
+ # Save the state of the warning, then disable it
63
+ warning_state = tokenizer.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False)
64
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
65
+
66
+ try:
67
+ padded = tokenizer.pad(*pad_args, **pad_kwargs)
68
+ finally:
69
+ # Restore the state of the warning.
70
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = warning_state
71
+
72
+ return padded
73
+
74
+
75
+ def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
76
+ """
77
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
78
+ potential keys named:
79
+
80
+ - `label`: handles a single value (int or float) per object
81
+ - `label_ids`: handles a list of values per object
82
+
83
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
84
+ to the model. See glue and ner for example of how it's useful.
85
+ """
86
+
87
+ # In this function we'll make the assumption that all `features` in the batch
88
+ # have the same attributes.
89
+ # So we will look at the first element as a proxy for what attributes exist
90
+ # on the whole batch.
91
+
92
+ if return_tensors == "pt":
93
+ return torch_default_data_collator(features)
94
+ elif return_tensors == "tf":
95
+ return tf_default_data_collator(features)
96
+ elif return_tensors == "np":
97
+ return numpy_default_data_collator(features)
98
+
99
+
100
+ @dataclass
101
+ class DefaultDataCollator(DataCollatorMixin):
102
+ """
103
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
104
+ potential keys named:
105
+
106
+ - `label`: handles a single value (int or float) per object
107
+ - `label_ids`: handles a list of values per object
108
+
109
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
110
+ to the model. See glue and ner for example of how it's useful.
111
+
112
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
113
+ helpful if you need to set a return_tensors value at initialization.
114
+
115
+ Args:
116
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
117
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
118
+ """
119
+
120
+ return_tensors: str = "pt"
121
+
122
+ def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
123
+ if return_tensors is None:
124
+ return_tensors = self.return_tensors
125
+ return default_data_collator(features, return_tensors)
126
+
127
+
128
+ def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
129
+ import torch
130
+
131
+ if not isinstance(features[0], Mapping):
132
+ features = [vars(f) for f in features]
133
+ first = features[0]
134
+ batch = {}
135
+
136
+ # Special handling for labels.
137
+ # Ensure that tensor is created with the correct type
138
+ # (it should be automatically the case, but let's make sure of it.)
139
+ if "label" in first and first["label"] is not None:
140
+ label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
141
+ dtype = torch.long if isinstance(label, int) else torch.float
142
+ batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
143
+ elif "label_ids" in first and first["label_ids"] is not None:
144
+ if isinstance(first["label_ids"], torch.Tensor):
145
+ batch["labels"] = torch.stack([f["label_ids"] for f in features])
146
+ else:
147
+ dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
148
+ batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
149
+
150
+ # Handling of all other possible keys.
151
+ # Again, we will use the first element to figure out which key/values are not None for this model.
152
+ for k, v in first.items():
153
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
154
+ if isinstance(v, torch.Tensor):
155
+ batch[k] = torch.stack([f[k] for f in features])
156
+ elif isinstance(v, np.ndarray):
157
+ batch[k] = torch.from_numpy(np.stack([f[k] for f in features]))
158
+ else:
159
+ batch[k] = torch.tensor([f[k] for f in features])
160
+
161
+ return batch
162
+
163
+
164
+ def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
165
+ import tensorflow as tf
166
+
167
+ if not isinstance(features[0], Mapping):
168
+ features = [vars(f) for f in features]
169
+ first = features[0]
170
+ batch = {}
171
+
172
+ # Special handling for labels.
173
+ # Ensure that tensor is created with the correct type
174
+ # (it should be automatically the case, but let's make sure of it.)
175
+ if "label" in first and first["label"] is not None:
176
+ label_col_name = "label"
177
+ elif "label_ids" in first and first["label_ids"] is not None:
178
+ label_col_name = "label_ids"
179
+ elif "labels" in first and first["labels"] is not None:
180
+ label_col_name = "labels"
181
+ else:
182
+ label_col_name = None
183
+ if label_col_name is not None:
184
+ if isinstance(first[label_col_name], tf.Tensor):
185
+ dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
186
+ elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
187
+ dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
188
+ elif isinstance(first[label_col_name], (tuple, list)):
189
+ dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
190
+ else:
191
+ dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
192
+ batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
193
+ # Handling of all other possible keys.
194
+ # Again, we will use the first element to figure out which key/values are not None for this model.
195
+ for k, v in first.items():
196
+ if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
197
+ if isinstance(v, (tf.Tensor, np.ndarray)):
198
+ batch[k] = tf.stack([f[k] for f in features])
199
+ else:
200
+ batch[k] = tf.convert_to_tensor([f[k] for f in features])
201
+
202
+ return batch
203
+
204
+
205
+ def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
206
+ if not isinstance(features[0], Mapping):
207
+ features = [vars(f) for f in features]
208
+ first = features[0]
209
+ batch = {}
210
+
211
+ # Special handling for labels.
212
+ # Ensure that tensor is created with the correct type
213
+ # (it should be automatically the case, but let's make sure of it.)
214
+ if "label" in first and first["label"] is not None:
215
+ label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
216
+ dtype = np.int64 if isinstance(label, int) else np.float32
217
+ batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
218
+ elif "label_ids" in first and first["label_ids"] is not None:
219
+ if isinstance(first["label_ids"], np.ndarray):
220
+ batch["labels"] = np.stack([f["label_ids"] for f in features])
221
+ else:
222
+ dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
223
+ batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
224
+
225
+ # Handling of all other possible keys.
226
+ # Again, we will use the first element to figure out which key/values are not None for this model.
227
+ for k, v in first.items():
228
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
229
+ if isinstance(v, np.ndarray):
230
+ batch[k] = np.stack([f[k] for f in features])
231
+ else:
232
+ batch[k] = np.array([f[k] for f in features])
233
+
234
+ return batch
235
+
236
+
237
+ @dataclass
238
+ class DataCollatorWithPadding:
239
+ """
240
+ Data collator that will dynamically pad the inputs received.
241
+
242
+ Args:
243
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
244
+ The tokenizer used for encoding the data.
245
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
246
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
247
+ among:
248
+
249
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
250
+ sequence is provided).
251
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
252
+ acceptable input length for the model if that argument is not provided.
253
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
254
+ max_length (`int`, *optional*):
255
+ Maximum length of the returned list and optionally padding length (see above).
256
+ pad_to_multiple_of (`int`, *optional*):
257
+ If set will pad the sequence to a multiple of the provided value.
258
+
259
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
260
+ 7.0 (Volta).
261
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
262
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
263
+ """
264
+
265
+ tokenizer: PreTrainedTokenizerBase
266
+ padding: Union[bool, str, PaddingStrategy] = True
267
+ max_length: Optional[int] = None
268
+ pad_to_multiple_of: Optional[int] = None
269
+ return_tensors: str = "pt"
270
+
271
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
272
+ batch = pad_without_fast_tokenizer_warning(
273
+ self.tokenizer,
274
+ features,
275
+ padding=self.padding,
276
+ max_length=self.max_length,
277
+ pad_to_multiple_of=self.pad_to_multiple_of,
278
+ return_tensors=self.return_tensors,
279
+ )
280
+ if "label" in batch:
281
+ batch["labels"] = batch["label"]
282
+ del batch["label"]
283
+ if "label_ids" in batch:
284
+ batch["labels"] = batch["label_ids"]
285
+ del batch["label_ids"]
286
+ return batch
287
+
288
+
289
+ @dataclass
290
+ class DataCollatorForTokenClassification(DataCollatorMixin):
291
+ """
292
+ Data collator that will dynamically pad the inputs received, as well as the labels.
293
+
294
+ Args:
295
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
296
+ The tokenizer used for encoding the data.
297
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
298
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
299
+ among:
300
+
301
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
302
+ sequence is provided).
303
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
304
+ acceptable input length for the model if that argument is not provided.
305
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
306
+ max_length (`int`, *optional*):
307
+ Maximum length of the returned list and optionally padding length (see above).
308
+ pad_to_multiple_of (`int`, *optional*):
309
+ If set will pad the sequence to a multiple of the provided value.
310
+
311
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
312
+ 7.0 (Volta).
313
+ label_pad_token_id (`int`, *optional*, defaults to -100):
314
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
315
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
316
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
317
+ """
318
+
319
+ tokenizer: PreTrainedTokenizerBase
320
+ padding: Union[bool, str, PaddingStrategy] = True
321
+ max_length: Optional[int] = None
322
+ pad_to_multiple_of: Optional[int] = None
323
+ label_pad_token_id: int = -100
324
+ return_tensors: str = "pt"
325
+
326
+ def torch_call(self, features):
327
+ import torch
328
+
329
+ label_name = "label" if "label" in features[0].keys() else "labels"
330
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
331
+
332
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
333
+
334
+ batch = pad_without_fast_tokenizer_warning(
335
+ self.tokenizer,
336
+ no_labels_features,
337
+ padding=self.padding,
338
+ max_length=self.max_length,
339
+ pad_to_multiple_of=self.pad_to_multiple_of,
340
+ return_tensors="pt",
341
+ )
342
+
343
+ if labels is None:
344
+ return batch
345
+
346
+ sequence_length = batch["input_ids"].shape[1]
347
+ padding_side = self.tokenizer.padding_side
348
+
349
+ def to_list(tensor_or_iterable):
350
+ if isinstance(tensor_or_iterable, torch.Tensor):
351
+ return tensor_or_iterable.tolist()
352
+ return list(tensor_or_iterable)
353
+
354
+ if padding_side == "right":
355
+ batch[label_name] = [
356
+ to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
357
+ ]
358
+ else:
359
+ batch[label_name] = [
360
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
361
+ ]
362
+
363
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
364
+ return batch
365
+
366
+ def tf_call(self, features):
367
+ import tensorflow as tf
368
+
369
+ label_name = "label" if "label" in features[0].keys() else "labels"
370
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
371
+ batch = pad_without_fast_tokenizer_warning(
372
+ self.tokenizer,
373
+ features,
374
+ padding=self.padding,
375
+ max_length=self.max_length,
376
+ pad_to_multiple_of=self.pad_to_multiple_of,
377
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
378
+ return_tensors="tf" if labels is None else None,
379
+ )
380
+
381
+ if labels is None:
382
+ return batch
383
+
384
+ sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
385
+ padding_side = self.tokenizer.padding_side
386
+ if padding_side == "right":
387
+ batch["labels"] = [
388
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
389
+ ]
390
+ else:
391
+ batch["labels"] = [
392
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
393
+ ]
394
+
395
+ batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
396
+ return batch
397
+
398
+ def numpy_call(self, features):
399
+ label_name = "label" if "label" in features[0].keys() else "labels"
400
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
401
+ batch = pad_without_fast_tokenizer_warning(
402
+ self.tokenizer,
403
+ features,
404
+ padding=self.padding,
405
+ max_length=self.max_length,
406
+ pad_to_multiple_of=self.pad_to_multiple_of,
407
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
408
+ return_tensors="np" if labels is None else None,
409
+ )
410
+
411
+ if labels is None:
412
+ return batch
413
+
414
+ sequence_length = np.array(batch["input_ids"]).shape[1]
415
+ padding_side = self.tokenizer.padding_side
416
+ if padding_side == "right":
417
+ batch["labels"] = [
418
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
419
+ ]
420
+ else:
421
+ batch["labels"] = [
422
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
423
+ ]
424
+
425
+ batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
426
+ return batch
427
+
428
+
429
+ def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
430
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
431
+ import torch
432
+
433
+ # Tensorize if necessary.
434
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
435
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
436
+
437
+ length_of_first = examples[0].size(0)
438
+
439
+ # Check if padding is necessary.
440
+
441
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
442
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
443
+ if not isinstance(examples, torch.Tensor):
444
+ return torch.stack(examples, dim=0)
445
+
446
+ # If yes, check if we have a `pad_token`.
447
+ if tokenizer.pad_token is None:
448
+ raise ValueError(
449
+ "You are attempting to pad samples but the tokenizer you are using"
450
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
451
+ )
452
+
453
+ # Creating the full tensor and filling it with our data.
454
+ max_length = max(x.size(0) for x in examples)
455
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
456
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
457
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
458
+ for i, example in enumerate(examples):
459
+ if tokenizer.padding_side == "right":
460
+ result[i, : example.shape[0]] = example
461
+ else:
462
+ result[i, -example.shape[0] :] = example
463
+ return result
464
+
465
+
466
+ def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
467
+ import tensorflow as tf
468
+
469
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
470
+ # Tensorize if necessary.
471
+ if isinstance(examples[0], (list, tuple)):
472
+ examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
473
+
474
+ # Check if padding is necessary.
475
+ length_of_first = len(examples[0])
476
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
477
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
478
+ return tf.stack(examples, axis=0)
479
+
480
+ # If yes, check if we have a `pad_token`.
481
+ if tokenizer.pad_token is None:
482
+ raise ValueError(
483
+ "You are attempting to pad samples but the tokenizer you are using"
484
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
485
+ )
486
+
487
+ # Creating the full tensor and filling it with our data.
488
+ max_length = max(len(x) for x in examples)
489
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
490
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
491
+ # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
492
+ result = []
493
+ rank = tf.rank(examples[0])
494
+ paddings = np.zeros((rank, 2), dtype=np.int32)
495
+ for example in examples:
496
+ if tokenizer.padding_side == "right":
497
+ paddings[0, 1] = max_length - len(example)
498
+ else:
499
+ paddings[0, 0] = max_length - len(example)
500
+ result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
501
+ return tf.stack(result, axis=0)
502
+
503
+
504
+ def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
505
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
506
+ # Tensorize if necessary.
507
+ if isinstance(examples[0], (list, tuple)):
508
+ examples = [np.array(e, dtype=np.int64) for e in examples]
509
+
510
+ # Check if padding is necessary.
511
+ length_of_first = len(examples[0])
512
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
513
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
514
+ return np.stack(examples, axis=0)
515
+
516
+ # If yes, check if we have a `pad_token`.
517
+ if tokenizer.pad_token is None:
518
+ raise ValueError(
519
+ "You are attempting to pad samples but the tokenizer you are using"
520
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
521
+ )
522
+
523
+ # Creating the full tensor and filling it with our data.
524
+ max_length = max(len(x) for x in examples)
525
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
526
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
527
+ result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
528
+ for i, example in enumerate(examples):
529
+ if tokenizer.padding_side == "right":
530
+ result[i, : example.shape[0]] = example
531
+ else:
532
+ result[i, -example.shape[0] :] = example
533
+ return result
534
+
535
+
536
+ @dataclass
537
+ class DataCollatorForMultipleChoice(DataCollatorMixin):
538
+ """
539
+ Data collator that dynamically pads a batch of nested examples for multiple choice, so that all choices
540
+ of all examples have the same length.
541
+
542
+ Args:
543
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
544
+ The tokenizer used for encoding the data.
545
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
546
+ Select a strategy to pad the returned sequences according to the model's padding side and padding index
547
+ among:
548
+
549
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
550
+ is provided).
551
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
552
+ acceptable input length for the model if that argument is not provided.
553
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
554
+ lengths).
555
+ max_length (`int`, *optional*):
556
+ Maximum length of the returned list and optionally padding length (see above).
557
+ pad_to_multiple_of (`int`, *optional*):
558
+ Pad the sequence to a multiple of the provided value.
559
+
560
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
561
+ 7.5 (Volta).
562
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
563
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
564
+ """
565
+
566
+ tokenizer: PreTrainedTokenizerBase
567
+ padding: Union[bool, str, PaddingStrategy] = True
568
+ max_length: Optional[int] = None
569
+ pad_to_multiple_of: Optional[int] = None
570
+ return_tensors: str = "pt"
571
+
572
+ def torch_call(self, examples: List[Dict[str, Any]]): # Refactored implementation from the docs.
573
+ import torch
574
+
575
+ # Take labels out of the examples beforehand, because they aren't nested.
576
+ label_name = "label" if "label" in examples[0].keys() else "labels"
577
+ labels = [example.pop(label_name) for example in examples]
578
+
579
+ batch_size = len(examples)
580
+ num_choices = len(examples[0]["input_ids"])
581
+
582
+ # Go from e.g. 2 examples of 2 choices [{input_ids: [[1], [2]]}, {input_ids: [[3], [4]]}]
583
+ # to 4 examples [{input_ids: [1]}, {input_ids: [2]}] + [{input_ids: [3]}, {input_ids: [4]}]
584
+ flat_examples = sum(
585
+ ([{k: v[i] for k, v in example.items()} for i in range(num_choices)] for example in examples), start=[]
586
+ )
587
+
588
+ # Pad all choices of all examples as if you're padding any other batch of examples.
589
+ batch = self.tokenizer.pad(
590
+ flat_examples,
591
+ padding=self.padding,
592
+ max_length=self.max_length,
593
+ pad_to_multiple_of=self.pad_to_multiple_of,
594
+ return_tensors="pt",
595
+ )
596
+
597
+ # Reshape from B*C x L into B x C x L, and add the labels back in.
598
+ batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
599
+ batch["labels"] = torch.tensor(labels, dtype=torch.int64)
600
+ return batch
601
+
602
+ def tf_call(self, features): # Implementation taken from the docs.
603
+ import tensorflow as tf
604
+
605
+ label_name = "label" if "label" in features[0].keys() else "labels"
606
+ labels = [feature.pop(label_name) for feature in features]
607
+ batch_size = len(features)
608
+ num_choices = len(features[0]["input_ids"])
609
+ flattened_features = [
610
+ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
611
+ ]
612
+ flattened_features = sum(flattened_features, []) # Sometimes written as list(chain(*flattened_features))
613
+
614
+ batch = self.tokenizer.pad(
615
+ flattened_features,
616
+ padding=self.padding,
617
+ max_length=self.max_length,
618
+ pad_to_multiple_of=self.pad_to_multiple_of,
619
+ return_tensors="tf",
620
+ )
621
+
622
+ batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()}
623
+ batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64)
624
+ return batch
625
+
626
+
627
+ @dataclass
628
+ class DataCollatorForSeq2Seq:
629
+ """
630
+ Data collator that will dynamically pad the inputs received, as well as the labels.
631
+
632
+ Args:
633
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
634
+ The tokenizer used for encoding the data.
635
+ model ([`PreTrainedModel`], *optional*):
636
+ The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
637
+ prepare the *decoder_input_ids*
638
+
639
+ This is useful when using *label_smoothing* to avoid calculating loss twice.
640
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
641
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
642
+ among:
643
+
644
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
645
+ sequence is provided).
646
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
647
+ acceptable input length for the model if that argument is not provided.
648
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
649
+ max_length (`int`, *optional*):
650
+ Maximum length of the returned list and optionally padding length (see above).
651
+ pad_to_multiple_of (`int`, *optional*):
652
+ If set will pad the sequence to a multiple of the provided value.
653
+
654
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
655
+ 7.0 (Volta).
656
+ label_pad_token_id (`int`, *optional*, defaults to -100):
657
+ The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
658
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
659
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
660
+ """
661
+
662
+ tokenizer: PreTrainedTokenizerBase
663
+ model: Optional[Any] = None
664
+ padding: Union[bool, str, PaddingStrategy] = True
665
+ max_length: Optional[int] = None
666
+ pad_to_multiple_of: Optional[int] = None
667
+ label_pad_token_id: int = -100
668
+ return_tensors: str = "pt"
669
+
670
+ def __call__(self, features, return_tensors=None):
671
+ if return_tensors is None:
672
+ return_tensors = self.return_tensors
673
+
674
+ label_name = "label" if "label" in features[0].keys() else "labels"
675
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
676
+ # reconvert list[None] to None if necessary
677
+ # this might occur when we pass {..., "labels": None}
678
+ if labels is not None and all(label is None for label in labels):
679
+ labels = None
680
+ non_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
681
+
682
+ # run through tokenizer without labels to ensure no side effects
683
+ batch = pad_without_fast_tokenizer_warning(
684
+ self.tokenizer,
685
+ non_labels_features,
686
+ padding=self.padding,
687
+ max_length=self.max_length,
688
+ pad_to_multiple_of=self.pad_to_multiple_of,
689
+ return_tensors=return_tensors,
690
+ )
691
+
692
+ # we have to pad the labels manually as we cannot rely on `tokenizer.pad` and we need them to be of the same length to return tensors
693
+ no_padding = self.padding is False or self.padding == PaddingStrategy.DO_NOT_PAD
694
+ if labels is not None:
695
+ if no_padding:
696
+ if isinstance(features[0][label_name], list):
697
+ batch["labels"] = list(labels)
698
+ else:
699
+ batch["labels"] = [np.concatenate([label, []]) for label in labels]
700
+ else:
701
+ max_padding = self.padding == PaddingStrategy.MAX_LENGTH and self.max_length is not None
702
+ max_label_length = max(len(l) for l in labels) if not max_padding else self.max_length
703
+ if self.pad_to_multiple_of is not None:
704
+ max_label_length = (
705
+ (max_label_length + self.pad_to_multiple_of - 1)
706
+ // self.pad_to_multiple_of
707
+ * self.pad_to_multiple_of
708
+ )
709
+
710
+ padding_side = self.tokenizer.padding_side
711
+ if isinstance(features[0][label_name], list):
712
+ batch["labels"] = [
713
+ label + [self.label_pad_token_id] * (max_label_length - len(label))
714
+ if padding_side == "right"
715
+ else [self.label_pad_token_id] * (max_label_length - len(label)) + label
716
+ for label in labels
717
+ ]
718
+ else:
719
+ batch["labels"] = [
720
+ np.concatenate(
721
+ [
722
+ label,
723
+ np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64),
724
+ ]
725
+ )
726
+ if padding_side == "right"
727
+ else np.concatenate(
728
+ [
729
+ np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64),
730
+ label,
731
+ ]
732
+ )
733
+ for label in labels
734
+ ]
735
+
736
+ # reintroduce side effects via tokenizer that return respective datatypes for the `return_tensors` argument
737
+ if batch.get("labels", None) is not None:
738
+ if return_tensors == "pt":
739
+ import torch
740
+
741
+ batch["labels"] = torch.tensor(batch["labels"], dtype=torch.int64)
742
+ elif return_tensors == "tf":
743
+ import tensorflow as tf
744
+
745
+ batch["labels"] = tf.constant(batch["labels"], dtype=tf.int64)
746
+ else:
747
+ batch["labels"] = np.array(batch["labels"], dtype=np.int64)
748
+ else:
749
+ batch["labels"] = None
750
+
751
+ # prepare decoder_input_ids
752
+ if (
753
+ labels is not None
754
+ and self.model is not None
755
+ and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
756
+ ):
757
+ decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=batch["labels"])
758
+ batch["decoder_input_ids"] = decoder_input_ids
759
+
760
+ return batch
761
+
762
+
763
+ @dataclass
764
+ class DataCollatorForLanguageModeling(DataCollatorMixin):
765
+ """
766
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
767
+ are not all of the same length.
768
+
769
+ Args:
770
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
771
+ The tokenizer used for encoding the data.
772
+ mlm (`bool`, *optional*, defaults to `True`):
773
+ Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
774
+ with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
775
+ tokens and the value to predict for the masked token.
776
+ mlm_probability (`float`, *optional*, defaults to 0.15):
777
+ The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
778
+ mask_replace_prob (`float`, *optional*, defaults to 0.8):
779
+ The probability with which masked tokens are replaced by the tokenizer's mask token (e.g., `[MASK]`).
780
+ Defaults to 0.8, meaning 80% of the masked tokens will be replaced with `[MASK]`.
781
+ Only works when `mlm` is set to `True`.
782
+ random_replace_prob (`float`, *optional*, defaults to 0.1):
783
+ The probability with which masked tokens are replaced by random tokens from the tokenizer's vocabulary.
784
+ Defaults to 0.1, meaning 10% of the masked tokens will be replaced with random tokens. The remaining
785
+ masked tokens (1 - mask_replace_prob - random_replace_prob) are left unchanged.
786
+ Only works when `mlm` is set to `True`.
787
+ pad_to_multiple_of (`int`, *optional*):
788
+ If set, will pad the sequence to a multiple of the provided value.
789
+ return_tensors (`str`):
790
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
791
+ seed (`int`, *optional*):
792
+ The seed to use for the random number generator for masking. If not provided, the global RNG will be used.
793
+
794
+ <Tip>
795
+
796
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
797
+ BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
798
+ [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
799
+
800
+ <Example Options and Expectations>
801
+
802
+ 1. Default Behavior:
803
+ - `mask_replace_prob=0.8`, `random_replace_prob=0.1`.
804
+ - Expect 80% of masked tokens replaced with `[MASK]`, 10% replaced with random tokens, and 10% left unchanged.
805
+
806
+ 2. All masked tokens replaced by `[MASK]`:
807
+ - `mask_replace_prob=1.0`, `random_replace_prob=0.0`.
808
+ - Expect all masked tokens to be replaced with `[MASK]`. No tokens are left unchanged or replaced with random tokens.
809
+
810
+ 3. No `[MASK]` replacement, only random tokens:
811
+ - `mask_replace_prob=0.0`, `random_replace_prob=1.0`.
812
+ - Expect all masked tokens to be replaced with random tokens. No `[MASK]` replacements or unchanged tokens.
813
+
814
+ 4. Balanced replacement:
815
+ - `mask_replace_prob=0.5`, `random_replace_prob=0.4`.
816
+ - Expect 50% of masked tokens replaced with `[MASK]`, 40% replaced with random tokens, and 10% left unchanged.
817
+
818
+ Note:
819
+ The sum of `mask_replace_prob` and `random_replace_prob` must not exceed 1. If their sum is less than 1, the
820
+ remaining proportion will consist of masked tokens left unchanged.
821
+
822
+ </Tip>
823
+ """
824
+
825
+ tokenizer: PreTrainedTokenizerBase
826
+ mlm: bool = True
827
+ mlm_probability: float = 0.15
828
+ mask_replace_prob: float = 0.8
829
+ random_replace_prob: float = 0.1
830
+ pad_to_multiple_of: Optional[int] = None
831
+ tf_experimental_compile: bool = False
832
+ return_tensors: str = "pt"
833
+ seed: Optional[int] = None
834
+
835
+ def __post_init__(self):
836
+ if self.mlm and self.tokenizer.mask_token is None:
837
+ raise ValueError(
838
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
839
+ "You should pass `mlm=False` to train on causal language modeling instead."
840
+ )
841
+ if self.mlm_probability < 0 or self.mlm_probability > 1:
842
+ raise ValueError("mlm_probability should be between 0 and 1.")
843
+ if self.mask_replace_prob + self.random_replace_prob > 1:
844
+ raise ValueError("The sum of mask_replace_prob and random_replace_prob should not exceed 1")
845
+ if self.mask_replace_prob < 0 or self.mask_replace_prob > 1:
846
+ raise ValueError("mask_replace_prob should be between 0 and 1.")
847
+ if self.random_replace_prob < 0 or self.random_replace_prob > 1:
848
+ raise ValueError("random_replace_prob should be between 0 and 1.")
849
+
850
+ self.mlm_probability = float(self.mlm_probability)
851
+ self.mask_replace_prob = float(self.mask_replace_prob)
852
+ self.random_replace_prob = float(self.random_replace_prob)
853
+
854
+ if self.tf_experimental_compile:
855
+ import tensorflow as tf
856
+
857
+ self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
858
+
859
+ self.generator = None
860
+
861
+ def get_generator(self, seed):
862
+ if self.return_tensors == "pt":
863
+ import torch
864
+
865
+ return torch.Generator().manual_seed(seed)
866
+ elif self.return_tensors == "tf":
867
+ import tensorflow as tf
868
+
869
+ return tf.random.Generator.from_seed(seed)
870
+ else:
871
+ import numpy as np
872
+
873
+ return np.random.default_rng(seed)
874
+
875
+ def create_rng(self):
876
+ if mp.current_process().name == "MainProcess":
877
+ # If we are in the main process, we create a generator object with the seed
878
+ self.generator = self.get_generator(self.seed)
879
+ else:
880
+ # If we are in a worker process (i.e using multiprocessing), we need to set a unique seed for each
881
+ # worker's generator, generated as the main seed + the worker's ID.
882
+ # (https://pytorch.org/docs/stable/data.html#randomness-in-multi-process-data-loading)
883
+ # Only PyTorch DataLoader allows us to access the worker ID, and so we check for this.
884
+ # For other frameworks, we will throw an error.
885
+ import torch
886
+
887
+ worker_info = torch.utils.data.get_worker_info()
888
+ if worker_info is None:
889
+ error_string = (
890
+ "Worker process information is not available for seeding the generator. This may be because",
891
+ "you are using multiprocessing without using a PyTorch DataLoader. The `seed` parameter can",
892
+ "only be used when using multiprocessing with a PyTorch DataLoader. Please either use a",
893
+ "single process or use a PyTorch DataLoader with multiple workers.",
894
+ )
895
+ raise ValueError(error_string)
896
+
897
+ self.generator = self.get_generator(self.seed + worker_info.id)
898
+
899
+ @staticmethod
900
+ def tf_bernoulli(shape, probability, generator=None):
901
+ import tensorflow as tf
902
+
903
+ prob_matrix = tf.fill(shape, probability)
904
+ # if generator exists, use it to generate the random numbers
905
+ # otherwise, use the global RNG
906
+ if generator:
907
+ return tf.cast(prob_matrix - generator.uniform(shape, 0, 1) >= 0, tf.bool)
908
+ else:
909
+ return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
910
+
911
+ def tf_mask_tokens(
912
+ self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
913
+ ) -> Tuple[Any, Any]:
914
+ """
915
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
916
+ """
917
+ import tensorflow as tf
918
+
919
+ mask_token_id = tf.cast(mask_token_id, inputs.dtype)
920
+
921
+ input_shape = tf.shape(inputs)
922
+ # 1 for a special token, 0 for a normal token in the special tokens mask
923
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
924
+ masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability, self.generator) & ~special_tokens_mask
925
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
926
+ labels = tf.where(masked_indices, inputs, -100)
927
+
928
+ # mask_replace_prob% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
929
+ indices_replaced = self.tf_bernoulli(input_shape, self.mask_replace_prob, self.generator) & masked_indices
930
+
931
+ inputs = tf.where(indices_replaced, mask_token_id, inputs)
932
+
933
+ if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
934
+ return inputs, labels
935
+
936
+ remaining_prob = 1 - self.mask_replace_prob
937
+ # scaling the random_replace_prob to the remaining probability for example if
938
+ # mask_replace_prob = 0.8 and random_replace_prob = 0.1,
939
+ # then random_replace_prob_scaled = 0.1 / 0.2 = 0.5
940
+ random_replace_prob_scaled = self.random_replace_prob / remaining_prob
941
+ # random_replace_prob% of the time, we replace masked input tokens with random word
942
+ indices_random = (
943
+ self.tf_bernoulli(input_shape, random_replace_prob_scaled, self.generator)
944
+ & masked_indices
945
+ & ~indices_replaced
946
+ )
947
+
948
+ if self.generator:
949
+ random_words = self.generator.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
950
+ else:
951
+ random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
952
+
953
+ inputs = tf.where(indices_random, random_words, inputs)
954
+
955
+ # The rest of the time ((1-random_replace_prob-mask_replace_prob)% of the time) we keep the masked input tokens unchanged
956
+ return inputs, labels
957
+
958
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
959
+ import tensorflow as tf
960
+
961
+ if self.seed and self.generator is None:
962
+ # If we have a seed, we need to create a generator object. Subsequent calls to this function will use the same generator.
963
+ # If no seed supplied, we will use the global RNG
964
+ self.create_rng()
965
+
966
+ # Handle dict or lists with proper padding and conversion to tensor.
967
+ if isinstance(examples[0], Mapping):
968
+ batch = pad_without_fast_tokenizer_warning(
969
+ self.tokenizer, examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of
970
+ )
971
+ else:
972
+ batch = {
973
+ "input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
974
+ }
975
+
976
+ # If special token mask has been preprocessed, pop it from the dict.
977
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
978
+ if self.mlm:
979
+ if special_tokens_mask is None:
980
+ special_tokens_mask = [
981
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
982
+ for val in batch["input_ids"].numpy().tolist()
983
+ ]
984
+ # Cannot directly create as bool
985
+ special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
986
+ else:
987
+ special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
988
+ batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
989
+ tf.cast(batch["input_ids"], tf.int64),
990
+ special_tokens_mask=special_tokens_mask,
991
+ mask_token_id=self.tokenizer.mask_token_id,
992
+ vocab_size=len(self.tokenizer),
993
+ )
994
+ else:
995
+ labels = batch["input_ids"]
996
+ if self.tokenizer.pad_token_id is not None:
997
+ # Replace self.tokenizer.pad_token_id with -100
998
+ labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
999
+ else:
1000
+ labels = tf.identity(labels) # Makes a copy, just in case
1001
+ batch["labels"] = labels
1002
+ return batch
1003
+
1004
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1005
+ # Handle dict or lists with proper padding and conversion to tensor.
1006
+
1007
+ if self.seed and self.generator is None:
1008
+ # If we have a seed, we need to create a generator object. Subsequent calls to this function will use the same generator.
1009
+ # If no seed supplied, we will use the global RNG
1010
+ self.create_rng()
1011
+
1012
+ if isinstance(examples[0], Mapping):
1013
+ batch = pad_without_fast_tokenizer_warning(
1014
+ self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of
1015
+ )
1016
+ else:
1017
+ batch = {
1018
+ "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1019
+ }
1020
+
1021
+ # If special token mask has been preprocessed, pop it from the dict.
1022
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
1023
+ if self.mlm:
1024
+ batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
1025
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
1026
+ )
1027
+ else:
1028
+ labels = batch["input_ids"].clone()
1029
+ if self.tokenizer.pad_token_id is not None:
1030
+ labels[labels == self.tokenizer.pad_token_id] = -100
1031
+ batch["labels"] = labels
1032
+ return batch
1033
+
1034
+ def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
1035
+ """
1036
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
1037
+ """
1038
+ import torch
1039
+
1040
+ labels = inputs.clone()
1041
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
1042
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
1043
+ if special_tokens_mask is None:
1044
+ special_tokens_mask = [
1045
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1046
+ ]
1047
+ special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
1048
+ else:
1049
+ special_tokens_mask = special_tokens_mask.bool()
1050
+
1051
+ probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
1052
+ masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool()
1053
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1054
+
1055
+ # mask_replace_prob% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1056
+ indices_replaced = (
1057
+ torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool()
1058
+ & masked_indices
1059
+ )
1060
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1061
+
1062
+ if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
1063
+ return inputs, labels
1064
+
1065
+ remaining_prob = 1 - self.mask_replace_prob
1066
+ # scaling the random_replace_prob to the remaining probability for example if
1067
+ # mask_replace_prob = 0.8 and random_replace_prob = 0.1,
1068
+ # then random_replace_prob_scaled = 0.1 / 0.2 = 0.5
1069
+ random_replace_prob_scaled = self.random_replace_prob / remaining_prob
1070
+
1071
+ # random_replace_prob% of the time, we replace masked input tokens with random word
1072
+ indices_random = (
1073
+ torch.bernoulli(torch.full(labels.shape, random_replace_prob_scaled), generator=self.generator).bool()
1074
+ & masked_indices
1075
+ & ~indices_replaced
1076
+ )
1077
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long, generator=self.generator)
1078
+ inputs[indices_random] = random_words[indices_random]
1079
+
1080
+ # The rest of the time ((1-random_replace_prob-mask_replace_prob)% of the time) we keep the masked input tokens unchanged
1081
+ return inputs, labels
1082
+
1083
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1084
+ # Handle dict or lists with proper padding and conversion to tensor.
1085
+
1086
+ if self.seed and self.generator is None:
1087
+ # If we have a seed, we need to create a generator object. Subsequent calls to this function will use the same generator.
1088
+ # If no seed supplied, we will use the global RNG
1089
+ self.create_rng()
1090
+
1091
+ if isinstance(examples[0], Mapping):
1092
+ batch = pad_without_fast_tokenizer_warning(
1093
+ self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of
1094
+ )
1095
+ else:
1096
+ batch = {
1097
+ "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1098
+ }
1099
+
1100
+ # If special token mask has been preprocessed, pop it from the dict.
1101
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
1102
+ if self.mlm:
1103
+ batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
1104
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
1105
+ )
1106
+ else:
1107
+ labels = np.copy(batch["input_ids"])
1108
+ if self.tokenizer.pad_token_id is not None:
1109
+ labels[labels == self.tokenizer.pad_token_id] = -100
1110
+ batch["labels"] = labels
1111
+ return batch
1112
+
1113
+ def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
1114
+ """
1115
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
1116
+ """
1117
+ labels = np.copy(inputs)
1118
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
1119
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
1120
+ if special_tokens_mask is None:
1121
+ special_tokens_mask = [
1122
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1123
+ ]
1124
+ special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
1125
+ else:
1126
+ special_tokens_mask = special_tokens_mask.astype(bool)
1127
+
1128
+ probability_matrix[special_tokens_mask] = 0
1129
+ # Numpy doesn't have bernoulli, so we use a binomial with 1 trial
1130
+ if self.generator:
1131
+ masked_indices = self.generator.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
1132
+ else:
1133
+ masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
1134
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1135
+
1136
+ # mask_replace_prob% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1137
+ if self.generator:
1138
+ indices_replaced = (
1139
+ self.generator.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
1140
+ )
1141
+ else:
1142
+ indices_replaced = (
1143
+ np.random.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
1144
+ )
1145
+ inputs[indices_replaced] = self.tokenizer.mask_token_id
1146
+
1147
+ if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
1148
+ return inputs, labels
1149
+
1150
+ remaining_prob = 1 - self.mask_replace_prob
1151
+ # scaling the random_replace_prob to the remaining probability for example if
1152
+ # mask_replace_prob = 0.8 and random_replace_prob = 0.1,
1153
+ # then random_replace_prob_scaled = 0.1 / 0.2 = 0.5
1154
+ random_replace_prob_scaled = self.random_replace_prob / remaining_prob
1155
+ if self.generator:
1156
+ indices_random = (
1157
+ self.generator.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool)
1158
+ & masked_indices
1159
+ & ~indices_replaced
1160
+ )
1161
+ random_words = self.generator.integers(
1162
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
1163
+ )
1164
+ else:
1165
+ indices_random = (
1166
+ np.random.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool)
1167
+ & masked_indices
1168
+ & ~indices_replaced
1169
+ )
1170
+ random_words = np.random.randint(
1171
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
1172
+ )
1173
+ inputs[indices_random] = random_words
1174
+
1175
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1176
+ return inputs, labels
1177
+
1178
+
1179
+ @dataclass
1180
+ class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
1181
+ """
1182
+ Data collator used for language modeling that masks entire words.
1183
+
1184
+ - collates batches of tensors, honoring their tokenizer's pad_token
1185
+ - preprocesses batches for masked language modeling
1186
+
1187
+ <Tip>
1188
+
1189
+ This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
1190
+ that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
1191
+ produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
1192
+
1193
+ </Tip>"""
1194
+
1195
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1196
+ if isinstance(examples[0], Mapping):
1197
+ input_ids = [e["input_ids"] for e in examples]
1198
+ else:
1199
+ input_ids = examples
1200
+ examples = [{"input_ids": e} for e in examples]
1201
+
1202
+ batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1203
+
1204
+ mask_labels = []
1205
+ for e in examples:
1206
+ ref_tokens = []
1207
+ for id in tolist(e["input_ids"]):
1208
+ token = self.tokenizer._convert_id_to_token(id)
1209
+ ref_tokens.append(token)
1210
+
1211
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
1212
+ if "chinese_ref" in e:
1213
+ ref_pos = tolist(e["chinese_ref"])
1214
+ len_seq = len(e["input_ids"])
1215
+ for i in range(len_seq):
1216
+ if i in ref_pos:
1217
+ ref_tokens[i] = "##" + ref_tokens[i]
1218
+ mask_labels.append(self._whole_word_mask(ref_tokens))
1219
+ batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1220
+ inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
1221
+ return {"input_ids": inputs, "labels": labels}
1222
+
1223
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1224
+ import tensorflow as tf
1225
+
1226
+ if isinstance(examples[0], Mapping):
1227
+ input_ids = [e["input_ids"] for e in examples]
1228
+ else:
1229
+ input_ids = examples
1230
+ examples = [{"input_ids": e} for e in examples]
1231
+
1232
+ batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1233
+
1234
+ mask_labels = []
1235
+ for e in examples:
1236
+ ref_tokens = []
1237
+ for id in tolist(e["input_ids"]):
1238
+ token = self.tokenizer._convert_id_to_token(id)
1239
+ ref_tokens.append(token)
1240
+
1241
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
1242
+ if "chinese_ref" in e:
1243
+ ref_pos = tolist(e["chinese_ref"])
1244
+ len_seq = len(e["input_ids"])
1245
+ for i in range(len_seq):
1246
+ if i in ref_pos:
1247
+ ref_tokens[i] = "##" + ref_tokens[i]
1248
+ mask_labels.append(self._whole_word_mask(ref_tokens))
1249
+ batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1250
+ inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
1251
+ return {"input_ids": inputs, "labels": labels}
1252
+
1253
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1254
+ if isinstance(examples[0], Mapping):
1255
+ input_ids = [e["input_ids"] for e in examples]
1256
+ else:
1257
+ input_ids = examples
1258
+ examples = [{"input_ids": e} for e in examples]
1259
+
1260
+ batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1261
+
1262
+ mask_labels = []
1263
+ for e in examples:
1264
+ ref_tokens = []
1265
+ for id in tolist(e["input_ids"]):
1266
+ token = self.tokenizer._convert_id_to_token(id)
1267
+ ref_tokens.append(token)
1268
+
1269
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
1270
+ if "chinese_ref" in e:
1271
+ ref_pos = tolist(e["chinese_ref"])
1272
+ len_seq = len(e["input_ids"])
1273
+ for i in range(len_seq):
1274
+ if i in ref_pos:
1275
+ ref_tokens[i] = "##" + ref_tokens[i]
1276
+ mask_labels.append(self._whole_word_mask(ref_tokens))
1277
+ batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
1278
+ inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
1279
+ return {"input_ids": inputs, "labels": labels}
1280
+
1281
+ def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
1282
+ """
1283
+ Get 0/1 labels for masked tokens with whole word mask proxy
1284
+ """
1285
+ if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
1286
+ warnings.warn(
1287
+ "DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
1288
+ "Please refer to the documentation for more information."
1289
+ )
1290
+
1291
+ cand_indexes = []
1292
+ for i, token in enumerate(input_tokens):
1293
+ if token == "[CLS]" or token == "[SEP]":
1294
+ continue
1295
+
1296
+ if len(cand_indexes) >= 1 and token.startswith("##"):
1297
+ cand_indexes[-1].append(i)
1298
+ else:
1299
+ cand_indexes.append([i])
1300
+
1301
+ random.shuffle(cand_indexes)
1302
+ num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
1303
+ masked_lms = []
1304
+ covered_indexes = set()
1305
+ for index_set in cand_indexes:
1306
+ if len(masked_lms) >= num_to_predict:
1307
+ break
1308
+ # If adding a whole-word mask would exceed the maximum number of
1309
+ # predictions, then just skip this candidate.
1310
+ if len(masked_lms) + len(index_set) > num_to_predict:
1311
+ continue
1312
+ for index in index_set:
1313
+ covered_indexes.add(index)
1314
+ masked_lms.append(index)
1315
+
1316
+ if len(covered_indexes) != len(masked_lms):
1317
+ raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
1318
+ mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
1319
+ return mask_labels
1320
+
1321
+ def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1322
+ """
1323
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1324
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1325
+ """
1326
+ import torch
1327
+
1328
+ if self.tokenizer.mask_token is None:
1329
+ raise ValueError(
1330
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1331
+ " --mlm flag if you want to use this tokenizer."
1332
+ )
1333
+ labels = inputs.clone()
1334
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1335
+
1336
+ probability_matrix = mask_labels
1337
+
1338
+ special_tokens_mask = [
1339
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1340
+ ]
1341
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1342
+ if self.tokenizer.pad_token is not None:
1343
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1344
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1345
+
1346
+ masked_indices = probability_matrix.bool()
1347
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1348
+
1349
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1350
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1351
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1352
+
1353
+ # 10% of the time, we replace masked input tokens with random word
1354
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1355
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1356
+ inputs[indices_random] = random_words[indices_random]
1357
+
1358
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1359
+ return inputs, labels
1360
+
1361
+ def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1362
+ """
1363
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1364
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1365
+ """
1366
+ import tensorflow as tf
1367
+
1368
+ input_shape = tf.shape(inputs)
1369
+ if self.tokenizer.mask_token is None:
1370
+ raise ValueError(
1371
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1372
+ " --mlm flag if you want to use this tokenizer."
1373
+ )
1374
+ labels = tf.identity(inputs)
1375
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1376
+
1377
+ masked_indices = tf.cast(mask_labels, tf.bool)
1378
+
1379
+ special_tokens_mask = [
1380
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
1381
+ ]
1382
+ masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
1383
+ if self.tokenizer.pad_token is not None:
1384
+ padding_mask = inputs == self.tokenizer.pad_token_id
1385
+ masked_indices = masked_indices & ~padding_mask
1386
+
1387
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
1388
+ labels = tf.where(masked_indices, inputs, -100)
1389
+
1390
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1391
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
1392
+
1393
+ inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
1394
+
1395
+ # 10% of the time, we replace masked input tokens with random word
1396
+ indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
1397
+ random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
1398
+ inputs = tf.where(indices_random, random_words, inputs)
1399
+
1400
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1401
+ return inputs, labels
1402
+
1403
+ def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1404
+ """
1405
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1406
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1407
+ """
1408
+ if self.tokenizer.mask_token is None:
1409
+ raise ValueError(
1410
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1411
+ " --mlm flag if you want to use this tokenizer."
1412
+ )
1413
+ labels = np.copy(inputs)
1414
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1415
+
1416
+ masked_indices = mask_labels.astype(bool)
1417
+
1418
+ special_tokens_mask = [
1419
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1420
+ ]
1421
+ masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
1422
+ if self.tokenizer.pad_token is not None:
1423
+ padding_mask = labels == self.tokenizer.pad_token_id
1424
+ masked_indices[padding_mask] = 0
1425
+
1426
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1427
+
1428
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1429
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
1430
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1431
+
1432
+ # 10% of the time, we replace masked input tokens with random word
1433
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1434
+ indices_random = (
1435
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
1436
+ )
1437
+ random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
1438
+ inputs[indices_random] = random_words[indices_random]
1439
+
1440
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1441
+ return inputs, labels
1442
+
1443
+
1444
+ def tolist(x):
1445
+ if isinstance(x, list):
1446
+ return x
1447
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
1448
+ x = x.numpy()
1449
+ return x.tolist()
1450
+
1451
+
1452
+ @dataclass
1453
+ class DataCollatorForSOP(DataCollatorForLanguageModeling):
1454
+ """
1455
+ Data collator used for sentence order prediction task.
1456
+
1457
+ - collates batches of tensors, honoring their tokenizer's pad_token
1458
+ - preprocesses batches for both masked language modeling and sentence order prediction
1459
+ """
1460
+
1461
+ def __init__(self, *args, **kwargs):
1462
+ warnings.warn(
1463
+ "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
1464
+ "DataCollatorForLanguageModeling instead.",
1465
+ FutureWarning,
1466
+ )
1467
+
1468
+ def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
1469
+ import torch
1470
+ from torch.nn.utils.rnn import pad_sequence
1471
+
1472
+ input_ids = [example["input_ids"] for example in examples]
1473
+ input_ids = _torch_collate_batch(input_ids, self.tokenizer)
1474
+ input_ids, labels, attention_mask = self.mask_tokens(input_ids)
1475
+
1476
+ token_type_ids = [example["token_type_ids"] for example in examples]
1477
+ # size of segment_ids varied because randomness, padding zero to the end as the original implementation
1478
+ token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
1479
+
1480
+ sop_label_list = [example["sentence_order_label"] for example in examples]
1481
+ sentence_order_label = torch.stack(sop_label_list)
1482
+
1483
+ return {
1484
+ "input_ids": input_ids,
1485
+ "labels": labels,
1486
+ "attention_mask": attention_mask,
1487
+ "token_type_ids": token_type_ids,
1488
+ "sentence_order_label": sentence_order_label,
1489
+ }
1490
+
1491
+ def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
1492
+ """
1493
+ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
1494
+ original. N-gram not applied yet.
1495
+ """
1496
+ import torch
1497
+
1498
+ if self.tokenizer.mask_token is None:
1499
+ raise ValueError(
1500
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1501
+ " --mlm flag if you want to use this tokenizer."
1502
+ )
1503
+
1504
+ labels = inputs.clone()
1505
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1506
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
1507
+ special_tokens_mask = [
1508
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1509
+ ]
1510
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1511
+ if self.tokenizer.pad_token is not None:
1512
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1513
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1514
+ masked_indices = torch.bernoulli(probability_matrix).bool()
1515
+ # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
1516
+ attention_mask = (~masked_indices).float()
1517
+ if self.tokenizer.pad_token is not None:
1518
+ attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
1519
+ attention_mask.masked_fill_(attention_padding_mask, value=1.0)
1520
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
1521
+
1522
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1523
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1524
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1525
+
1526
+ # 10% of the time, we replace masked input tokens with random word
1527
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1528
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1529
+ inputs[indices_random] = random_words[indices_random]
1530
+
1531
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1532
+ return inputs, labels, attention_mask
1533
+
1534
+
1535
+ @dataclass
1536
+ class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
1537
+ """
1538
+ Data collator used for permutation language modeling.
1539
+
1540
+ - collates batches of tensors, honoring their tokenizer's pad_token
1541
+ - preprocesses batches for permutation language modeling with procedures specific to XLNet
1542
+ """
1543
+
1544
+ tokenizer: PreTrainedTokenizerBase
1545
+ plm_probability: float = 1 / 6
1546
+ max_span_length: int = 5 # maximum length of a span of masked tokens
1547
+ return_tensors: str = "pt"
1548
+
1549
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1550
+ if isinstance(examples[0], Mapping):
1551
+ examples = [e["input_ids"] for e in examples]
1552
+ batch = _torch_collate_batch(examples, self.tokenizer)
1553
+ inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
1554
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1555
+
1556
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1557
+ if isinstance(examples[0], Mapping):
1558
+ examples = [e["input_ids"] for e in examples]
1559
+ batch = _tf_collate_batch(examples, self.tokenizer)
1560
+ inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
1561
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1562
+
1563
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1564
+ if isinstance(examples[0], Mapping):
1565
+ examples = [e["input_ids"] for e in examples]
1566
+ batch = _numpy_collate_batch(examples, self.tokenizer)
1567
+ inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
1568
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1569
+
1570
+ def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1571
+ """
1572
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1573
+
1574
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1575
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1576
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1577
+ masked
1578
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1579
+ span_length]` and mask tokens `start_index:start_index + span_length`
1580
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1581
+ sequence to be processed), repeat from Step 1.
1582
+ """
1583
+ import torch
1584
+
1585
+ if self.tokenizer.mask_token is None:
1586
+ raise ValueError(
1587
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1588
+ " Please add a mask token if you want to use this tokenizer."
1589
+ )
1590
+
1591
+ if inputs.size(1) % 2 != 0:
1592
+ raise ValueError(
1593
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1594
+ " relevant comments in source code for details."
1595
+ )
1596
+
1597
+ labels = inputs.clone()
1598
+ # Creating the mask and target_mapping tensors
1599
+ masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
1600
+ target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1601
+
1602
+ for i in range(labels.size(0)):
1603
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1604
+ cur_len = 0
1605
+ max_len = labels.size(1)
1606
+
1607
+ while cur_len < max_len:
1608
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1609
+ span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
1610
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1611
+ context_length = int(span_length / self.plm_probability)
1612
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1613
+ start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
1614
+ masked_indices[i, start_index : start_index + span_length] = 1
1615
+ # Set `cur_len = cur_len + context_length`
1616
+ cur_len += context_length
1617
+
1618
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1619
+ # the i-th predict corresponds to the i-th token.
1620
+ target_mapping[i] = torch.eye(labels.size(1))
1621
+
1622
+ special_tokens_mask = torch.tensor(
1623
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1624
+ dtype=torch.bool,
1625
+ )
1626
+ masked_indices.masked_fill_(special_tokens_mask, value=0.0)
1627
+ if self.tokenizer.pad_token is not None:
1628
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1629
+ masked_indices.masked_fill_(padding_mask, value=0.0)
1630
+
1631
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1632
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1633
+
1634
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1635
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1636
+
1637
+ perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1638
+
1639
+ for i in range(labels.size(0)):
1640
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1641
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1642
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1643
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1644
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1645
+ # This requires that the sequence length be even.
1646
+
1647
+ # Create a linear factorisation order
1648
+ perm_index = torch.arange(labels.size(1))
1649
+ # Split this into two halves, assuming that half the sequence is reused each time
1650
+ perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
1651
+ # Permute the two halves such that they do not cross over
1652
+ perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
1653
+ # Flatten this out into the desired permuted factorisation order
1654
+ perm_index = torch.flatten(perm_index.transpose(0, 1))
1655
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1656
+ # smallest index (-1) so that:
1657
+ # (1) They can be seen by all other positions
1658
+ # (2) They cannot see masked positions, so there won't be information leak
1659
+ perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
1660
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1661
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1662
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1663
+ perm_mask[i] = (
1664
+ perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
1665
+ ) & masked_indices[i]
1666
+
1667
+ return inputs.long(), perm_mask, target_mapping, labels.long()
1668
+
1669
+ def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1670
+ """
1671
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1672
+
1673
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1674
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1675
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1676
+ masked
1677
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1678
+ span_length]` and mask tokens `start_index:start_index + span_length`
1679
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1680
+ sequence to be processed), repeat from Step 1.
1681
+ """
1682
+ import tensorflow as tf
1683
+
1684
+ if self.tokenizer.mask_token is None:
1685
+ raise ValueError(
1686
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1687
+ " Please add a mask token if you want to use this tokenizer."
1688
+ )
1689
+
1690
+ if tf.shape(inputs)[1] % 2 != 0:
1691
+ raise ValueError(
1692
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1693
+ " relevant comments in source code for details."
1694
+ )
1695
+
1696
+ labels = tf.identity(inputs)
1697
+ # Creating the mask and target_mapping tensors
1698
+ masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
1699
+ labels_shape = tf.shape(labels)
1700
+ target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
1701
+
1702
+ for i in range(len(labels)):
1703
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1704
+ cur_len = 0
1705
+ max_len = tf.shape(labels)[1]
1706
+
1707
+ while cur_len < max_len:
1708
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1709
+ span_length = randint(1, self.max_span_length + 1)
1710
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1711
+ context_length = int(span_length / self.plm_probability)
1712
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1713
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1714
+ masked_indices[i, start_index : start_index + span_length] = 1
1715
+ # Set `cur_len = cur_len + context_length`
1716
+ cur_len += context_length
1717
+
1718
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1719
+ # the i-th predict corresponds to the i-th token.
1720
+ target_mapping[i] = np.eye(labels_shape[1])
1721
+ masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
1722
+ target_mapping = tf.convert_to_tensor(target_mapping)
1723
+ special_tokens_mask = tf.convert_to_tensor(
1724
+ [
1725
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
1726
+ for val in labels.numpy().tolist()
1727
+ ],
1728
+ )
1729
+ special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
1730
+ masked_indices = masked_indices & ~special_tokens_mask
1731
+ if self.tokenizer.pad_token is not None:
1732
+ padding_mask = labels == self.tokenizer.pad_token_id
1733
+ masked_indices = masked_indices & ~padding_mask
1734
+
1735
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1736
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1737
+
1738
+ inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
1739
+ labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
1740
+
1741
+ perm_mask = []
1742
+
1743
+ for i in range(len(labels)):
1744
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1745
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1746
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1747
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1748
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1749
+ # This requires that the sequence length be even.
1750
+
1751
+ # Create a linear factorisation order
1752
+ # tf.range is the equivalent of torch.arange
1753
+ perm_index = tf.range(labels_shape[1])
1754
+ # Split this into two halves, assuming that half the sequence is reused each time
1755
+ perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
1756
+ # Permute the two halves such that they do not cross over
1757
+ perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
1758
+ # Flatten this out into the desired permuted factorisation order
1759
+ perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
1760
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1761
+ # smallest index (-1) so that:
1762
+ # (1) They can be seen by all other positions
1763
+ # (2) They cannot see masked positions, so there won't be information leak
1764
+ perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
1765
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1766
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1767
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1768
+ perm_mask.append(
1769
+ (tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
1770
+ & masked_indices[i]
1771
+ )
1772
+ perm_mask = tf.stack(perm_mask, axis=0)
1773
+
1774
+ return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
1775
+
1776
+ def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1777
+ """
1778
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1779
+
1780
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1781
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1782
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1783
+ masked
1784
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1785
+ span_length]` and mask tokens `start_index:start_index + span_length`
1786
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1787
+ sequence to be processed), repeat from Step 1.
1788
+ """
1789
+ if self.tokenizer.mask_token is None:
1790
+ raise ValueError(
1791
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1792
+ " Please add a mask token if you want to use this tokenizer."
1793
+ )
1794
+
1795
+ if inputs.shape[1] % 2 != 0:
1796
+ raise ValueError(
1797
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1798
+ " relevant comments in source code for details."
1799
+ )
1800
+
1801
+ labels = np.copy(inputs)
1802
+ # Creating the mask and target_mapping tensors
1803
+ masked_indices = np.full(labels.shape, 0, dtype=bool)
1804
+ target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1805
+
1806
+ for i in range(labels.shape[0]):
1807
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1808
+ cur_len = 0
1809
+ max_len = labels.shape[1]
1810
+
1811
+ while cur_len < max_len:
1812
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1813
+ span_length = randint(1, self.max_span_length + 1)
1814
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1815
+ context_length = int(span_length / self.plm_probability)
1816
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1817
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1818
+ masked_indices[i, start_index : start_index + span_length] = 1
1819
+ # Set `cur_len = cur_len + context_length`
1820
+ cur_len += context_length
1821
+
1822
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1823
+ # the i-th predict corresponds to the i-th token.
1824
+ target_mapping[i] = np.eye(labels.shape[1])
1825
+
1826
+ special_tokens_mask = np.array(
1827
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1828
+ dtype=bool,
1829
+ )
1830
+ masked_indices[special_tokens_mask] = 0
1831
+ if self.tokenizer.pad_token is not None:
1832
+ padding_mask = labels == self.tokenizer.pad_token_id
1833
+ masked_indices[padding_mask] = 0.0
1834
+
1835
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1836
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1837
+
1838
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1839
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1840
+
1841
+ perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1842
+
1843
+ for i in range(labels.shape[0]):
1844
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1845
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1846
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1847
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1848
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1849
+ # This requires that the sequence length be even.
1850
+
1851
+ # Create a linear factorisation order
1852
+ perm_index = np.arange(labels.shape[1])
1853
+ # Split this into two halves, assuming that half the sequence is reused each time
1854
+ perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
1855
+ # Permute the two halves such that they do not cross over
1856
+ np.random.shuffle(perm_index)
1857
+ # Flatten this out into the desired permuted factorisation order
1858
+ perm_index = perm_index.T.flatten()
1859
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1860
+ # smallest index (-1) so that:
1861
+ # (1) They can be seen by all other positions
1862
+ # (2) They cannot see masked positions, so there won't be information leak
1863
+ perm_index[~masked_indices[i] & non_func_mask[i]] = -1
1864
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1865
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1866
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1867
+ perm_mask[i] = (
1868
+ perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
1869
+ ) & masked_indices[i]
1870
+
1871
+ return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
1872
+
1873
+
1874
+ @dataclass
1875
+ class DataCollatorWithFlattening(DefaultDataCollator):
1876
+ """
1877
+ Data collator used for padding free approach. Does the following:
1878
+
1879
+ - concatate the entire mini batch into single long sequence [1, total_tokens]
1880
+ - uses `separator_id` to separate sequences within the concatenated `labels`, default value is -100
1881
+ - no padding will be added, returns `input_ids`, `labels` and `position_ids`
1882
+
1883
+ <Tip warning={true}>
1884
+
1885
+ Using `DataCollatorWithFlattening` will flatten the entire mini batch into single long sequence.
1886
+ Make sure your attention computation is able to handle it!
1887
+
1888
+ </Tip>
1889
+ """
1890
+
1891
+ def __init__(self, *args, return_position_ids=True, separator_id=-100, **kwargs):
1892
+ super().__init__(*args, **kwargs)
1893
+ self.return_position_ids = return_position_ids
1894
+ self.separator_id = separator_id
1895
+
1896
+ def __call__(self, features, return_tensors=None, separator_id=None):
1897
+ if return_tensors is None:
1898
+ return_tensors = self.return_tensors
1899
+ if separator_id is None:
1900
+ separator_id = self.separator_id
1901
+ is_labels_provided = "labels" in features[0]
1902
+ ret = {"input_ids": [], "labels": []}
1903
+ if self.return_position_ids:
1904
+ ret.update({"position_ids": []})
1905
+ for idx in range(0, len(features)):
1906
+ ret["input_ids"] += features[idx]["input_ids"]
1907
+ if is_labels_provided:
1908
+ ret["labels"] += [separator_id] + features[idx]["labels"][1:]
1909
+ else:
1910
+ ret["labels"] += [separator_id] + features[idx]["input_ids"][1:]
1911
+ if self.return_position_ids:
1912
+ ret["position_ids"] += list(range(len(features[idx]["input_ids"])))
1913
+ return default_data_collator([ret], return_tensors)
vllm/lib/python3.10/site-packages/transformers/data/datasets/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import GlueDataset, GlueDataTrainingArguments
16
+ from .language_modeling import (
17
+ LineByLineTextDataset,
18
+ LineByLineWithRefDataset,
19
+ LineByLineWithSOPTextDataset,
20
+ TextDataset,
21
+ TextDatasetForNextSentencePrediction,
22
+ )
23
+ from .squad import SquadDataset, SquadDataTrainingArguments
vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/datasets/glue.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import time
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from enum import Enum
20
+ from typing import List, Optional, Union
21
+
22
+ import torch
23
+ from filelock import FileLock
24
+ from torch.utils.data import Dataset
25
+
26
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
27
+ from ...utils import logging
28
+ from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
29
+ from ..processors.utils import InputFeatures
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class GlueDataTrainingArguments:
37
+ """
38
+ Arguments pertaining to what data we are going to input our model for training and eval.
39
+
40
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
41
+ line.
42
+ """
43
+
44
+ task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
45
+ data_dir: str = field(
46
+ metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
47
+ )
48
+ max_seq_length: int = field(
49
+ default=128,
50
+ metadata={
51
+ "help": (
52
+ "The maximum total input sequence length after tokenization. Sequences longer "
53
+ "than this will be truncated, sequences shorter will be padded."
54
+ )
55
+ },
56
+ )
57
+ overwrite_cache: bool = field(
58
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
59
+ )
60
+
61
+ def __post_init__(self):
62
+ self.task_name = self.task_name.lower()
63
+
64
+
65
+ class Split(Enum):
66
+ train = "train"
67
+ dev = "dev"
68
+ test = "test"
69
+
70
+
71
+ class GlueDataset(Dataset):
72
+ """
73
+ This will be superseded by a framework-agnostic approach soon.
74
+ """
75
+
76
+ args: GlueDataTrainingArguments
77
+ output_mode: str
78
+ features: List[InputFeatures]
79
+
80
+ def __init__(
81
+ self,
82
+ args: GlueDataTrainingArguments,
83
+ tokenizer: PreTrainedTokenizerBase,
84
+ limit_length: Optional[int] = None,
85
+ mode: Union[str, Split] = Split.train,
86
+ cache_dir: Optional[str] = None,
87
+ ):
88
+ warnings.warn(
89
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
90
+ "library. You can have a look at this example script for pointers: "
91
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
92
+ FutureWarning,
93
+ )
94
+ self.args = args
95
+ self.processor = glue_processors[args.task_name]()
96
+ self.output_mode = glue_output_modes[args.task_name]
97
+ if isinstance(mode, str):
98
+ try:
99
+ mode = Split[mode]
100
+ except KeyError:
101
+ raise KeyError("mode is not a valid split name")
102
+ # Load data features from cache or dataset file
103
+ cached_features_file = os.path.join(
104
+ cache_dir if cache_dir is not None else args.data_dir,
105
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
106
+ )
107
+ label_list = self.processor.get_labels()
108
+ if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
109
+ "RobertaTokenizer",
110
+ "RobertaTokenizerFast",
111
+ "XLMRobertaTokenizer",
112
+ "BartTokenizer",
113
+ "BartTokenizerFast",
114
+ ):
115
+ # HACK(label indices are swapped in RoBERTa pretrained model)
116
+ label_list[1], label_list[2] = label_list[2], label_list[1]
117
+ self.label_list = label_list
118
+
119
+ # Make sure only the first process in distributed training processes the dataset,
120
+ # and the others will use the cache.
121
+ lock_path = cached_features_file + ".lock"
122
+ with FileLock(lock_path):
123
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
124
+ start = time.time()
125
+ self.features = torch.load(cached_features_file)
126
+ logger.info(
127
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
128
+ )
129
+ else:
130
+ logger.info(f"Creating features from dataset file at {args.data_dir}")
131
+
132
+ if mode == Split.dev:
133
+ examples = self.processor.get_dev_examples(args.data_dir)
134
+ elif mode == Split.test:
135
+ examples = self.processor.get_test_examples(args.data_dir)
136
+ else:
137
+ examples = self.processor.get_train_examples(args.data_dir)
138
+ if limit_length is not None:
139
+ examples = examples[:limit_length]
140
+ self.features = glue_convert_examples_to_features(
141
+ examples,
142
+ tokenizer,
143
+ max_length=args.max_seq_length,
144
+ label_list=label_list,
145
+ output_mode=self.output_mode,
146
+ )
147
+ start = time.time()
148
+ torch.save(self.features, cached_features_file)
149
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
150
+ logger.info(
151
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
152
+ )
153
+
154
+ def __len__(self):
155
+ return len(self.features)
156
+
157
+ def __getitem__(self, i) -> InputFeatures:
158
+ return self.features[i]
159
+
160
+ def get_labels(self):
161
+ return self.label_list
vllm/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import pickle
18
+ import random
19
+ import time
20
+ import warnings
21
+ from typing import Dict, List, Optional
22
+
23
+ import torch
24
+ from filelock import FileLock
25
+ from torch.utils.data import Dataset
26
+
27
+ from ...tokenization_utils import PreTrainedTokenizer
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: {0}"
37
+ )
38
+
39
+
40
+ class TextDataset(Dataset):
41
+ """
42
+ This will be superseded by a framework-agnostic approach soon.
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ tokenizer: PreTrainedTokenizer,
48
+ file_path: str,
49
+ block_size: int,
50
+ overwrite_cache=False,
51
+ cache_dir: Optional[str] = None,
52
+ ):
53
+ warnings.warn(
54
+ DEPRECATION_WARNING.format(
55
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
56
+ ),
57
+ FutureWarning,
58
+ )
59
+ if os.path.isfile(file_path) is False:
60
+ raise ValueError(f"Input file path {file_path} not found")
61
+
62
+ block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
63
+
64
+ directory, filename = os.path.split(file_path)
65
+ cached_features_file = os.path.join(
66
+ cache_dir if cache_dir is not None else directory,
67
+ f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
68
+ )
69
+
70
+ # Make sure only the first process in distributed training processes the dataset,
71
+ # and the others will use the cache.
72
+ lock_path = cached_features_file + ".lock"
73
+ with FileLock(lock_path):
74
+ if os.path.exists(cached_features_file) and not overwrite_cache:
75
+ start = time.time()
76
+ with open(cached_features_file, "rb") as handle:
77
+ self.examples = pickle.load(handle)
78
+ logger.info(
79
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
80
+ )
81
+
82
+ else:
83
+ logger.info(f"Creating features from dataset file at {directory}")
84
+
85
+ self.examples = []
86
+ with open(file_path, encoding="utf-8") as f:
87
+ text = f.read()
88
+
89
+ tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
90
+
91
+ for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
92
+ self.examples.append(
93
+ tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
94
+ )
95
+ # Note that we are losing the last truncated example here for the sake of simplicity (no padding)
96
+ # If your dataset is small, first you should look for a bigger one :-) and second you
97
+ # can change this behavior by adding (model specific) padding.
98
+
99
+ start = time.time()
100
+ with open(cached_features_file, "wb") as handle:
101
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
102
+ logger.info(
103
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
104
+ )
105
+
106
+ def __len__(self):
107
+ return len(self.examples)
108
+
109
+ def __getitem__(self, i) -> torch.Tensor:
110
+ return torch.tensor(self.examples[i], dtype=torch.long)
111
+
112
+
113
+ class LineByLineTextDataset(Dataset):
114
+ """
115
+ This will be superseded by a framework-agnostic approach soon.
116
+ """
117
+
118
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
119
+ warnings.warn(
120
+ DEPRECATION_WARNING.format(
121
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
122
+ ),
123
+ FutureWarning,
124
+ )
125
+ if os.path.isfile(file_path) is False:
126
+ raise ValueError(f"Input file path {file_path} not found")
127
+ # Here, we do not cache the features, operating under the assumption
128
+ # that we will soon use fast multithreaded tokenizers from the
129
+ # `tokenizers` repo everywhere =)
130
+ logger.info(f"Creating features from dataset file at {file_path}")
131
+
132
+ with open(file_path, encoding="utf-8") as f:
133
+ lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
134
+
135
+ batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
136
+ self.examples = batch_encoding["input_ids"]
137
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
138
+
139
+ def __len__(self):
140
+ return len(self.examples)
141
+
142
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
143
+ return self.examples[i]
144
+
145
+
146
+ class LineByLineWithRefDataset(Dataset):
147
+ """
148
+ This will be superseded by a framework-agnostic approach soon.
149
+ """
150
+
151
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
152
+ warnings.warn(
153
+ DEPRECATION_WARNING.format(
154
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
155
+ ),
156
+ FutureWarning,
157
+ )
158
+ if os.path.isfile(file_path) is False:
159
+ raise ValueError(f"Input file path {file_path} not found")
160
+ if os.path.isfile(ref_path) is False:
161
+ raise ValueError(f"Ref file path {file_path} not found")
162
+ # Here, we do not cache the features, operating under the assumption
163
+ # that we will soon use fast multithreaded tokenizers from the
164
+ # `tokenizers` repo everywhere =)
165
+ logger.info(f"Creating features from dataset file at {file_path}")
166
+ logger.info(f"Use ref segment results at {ref_path}")
167
+ with open(file_path, encoding="utf-8") as f:
168
+ data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
169
+ data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
170
+ # Get ref inf from file
171
+ with open(ref_path, encoding="utf-8") as f:
172
+ ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
173
+ if len(data) != len(ref):
174
+ raise ValueError(
175
+ f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
176
+ f"while length of {ref_path} is {len(ref)}"
177
+ )
178
+
179
+ batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
180
+ self.examples = batch_encoding["input_ids"]
181
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
182
+
183
+ n = len(self.examples)
184
+ for i in range(n):
185
+ self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
186
+
187
+ def __len__(self):
188
+ return len(self.examples)
189
+
190
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
191
+ return self.examples[i]
192
+
193
+
194
+ class LineByLineWithSOPTextDataset(Dataset):
195
+ """
196
+ Dataset for sentence order prediction task, prepare sentence pairs for SOP task
197
+ """
198
+
199
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
200
+ warnings.warn(
201
+ DEPRECATION_WARNING.format(
202
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
203
+ ),
204
+ FutureWarning,
205
+ )
206
+ if os.path.isdir(file_dir) is False:
207
+ raise ValueError(f"{file_dir} is not a directory")
208
+ logger.info(f"Creating features from dataset file folder at {file_dir}")
209
+ self.examples = []
210
+ # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
211
+ # file path looks like ./dataset/wiki_1, ./dataset/wiki_2
212
+ for file_name in os.listdir(file_dir):
213
+ file_path = os.path.join(file_dir, file_name)
214
+ if os.path.isfile(file_path) is False:
215
+ raise ValueError(f"{file_path} is not a file")
216
+ article_open = False
217
+ with open(file_path, encoding="utf-8") as f:
218
+ original_lines = f.readlines()
219
+ article_lines = []
220
+ for line in original_lines:
221
+ if "<doc id=" in line:
222
+ article_open = True
223
+ elif "</doc>" in line:
224
+ article_open = False
225
+ document = [
226
+ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
227
+ for line in article_lines[1:]
228
+ if (len(line) > 0 and not line.isspace())
229
+ ]
230
+
231
+ examples = self.create_examples_from_document(document, block_size, tokenizer)
232
+ self.examples.extend(examples)
233
+ article_lines = []
234
+ else:
235
+ if article_open:
236
+ article_lines.append(line)
237
+
238
+ logger.info("Dataset parse finished.")
239
+
240
+ def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
241
+ """Creates examples for a single document."""
242
+
243
+ # Account for special tokens
244
+ max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
245
+
246
+ # We *usually* want to fill up the entire sequence since we are padding
247
+ # to `block_size` anyways, so short sequences are generally wasted
248
+ # computation. However, we *sometimes*
249
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
250
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
251
+ # The `target_seq_length` is just a rough target however, whereas
252
+ # `block_size` is a hard limit.
253
+ target_seq_length = max_num_tokens
254
+ if random.random() < short_seq_prob:
255
+ target_seq_length = random.randint(2, max_num_tokens)
256
+
257
+ # We DON'T just concatenate all of the tokens from a document into a long
258
+ # sequence and choose an arbitrary split point because this would make the
259
+ # next sentence prediction task too easy. Instead, we split the input into
260
+ # segments "A" and "B" based on the actual "sentences" provided by the user
261
+ # input.
262
+ examples = []
263
+ current_chunk = [] # a buffer stored current working segments
264
+ current_length = 0
265
+ i = 0
266
+ while i < len(document):
267
+ segment = document[i] # get a segment
268
+ if not segment:
269
+ i += 1
270
+ continue
271
+ current_chunk.append(segment) # add a segment to current chunk
272
+ current_length += len(segment) # overall token length
273
+ # if current length goes to the target length or reaches the end of file, start building token a and b
274
+ if i == len(document) - 1 or current_length >= target_seq_length:
275
+ if current_chunk:
276
+ # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
277
+ a_end = 1
278
+ # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
279
+ if len(current_chunk) >= 2:
280
+ a_end = random.randint(1, len(current_chunk) - 1)
281
+ # token a
282
+ tokens_a = []
283
+ for j in range(a_end):
284
+ tokens_a.extend(current_chunk[j])
285
+
286
+ # token b
287
+ tokens_b = []
288
+ for j in range(a_end, len(current_chunk)):
289
+ tokens_b.extend(current_chunk[j])
290
+
291
+ if len(tokens_a) == 0 or len(tokens_b) == 0:
292
+ continue
293
+
294
+ # switch tokens_a and tokens_b randomly
295
+ if random.random() < 0.5:
296
+ is_next = False
297
+ tokens_a, tokens_b = tokens_b, tokens_a
298
+ else:
299
+ is_next = True
300
+
301
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
302
+ """Truncates a pair of sequences to a maximum sequence length."""
303
+ while True:
304
+ total_length = len(tokens_a) + len(tokens_b)
305
+ if total_length <= max_num_tokens:
306
+ break
307
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
308
+ if not (len(trunc_tokens) >= 1):
309
+ raise ValueError("Sequence length to be truncated must be no less than one")
310
+ # We want to sometimes truncate from the front and sometimes from the
311
+ # back to add more randomness and avoid biases.
312
+ if random.random() < 0.5:
313
+ del trunc_tokens[0]
314
+ else:
315
+ trunc_tokens.pop()
316
+
317
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
318
+ if not (len(tokens_a) >= 1):
319
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
320
+ if not (len(tokens_b) >= 1):
321
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
322
+
323
+ # add special tokens
324
+ input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
325
+ # add token type ids, 0 for sentence a, 1 for sentence b
326
+ token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
327
+
328
+ example = {
329
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
330
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
331
+ "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
332
+ }
333
+ examples.append(example)
334
+ current_chunk = [] # clear current chunk
335
+ current_length = 0 # reset current text length
336
+ i += 1 # go to next line
337
+ return examples
338
+
339
+ def __len__(self):
340
+ return len(self.examples)
341
+
342
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
343
+ return self.examples[i]
344
+
345
+
346
+ class TextDatasetForNextSentencePrediction(Dataset):
347
+ """
348
+ This will be superseded by a framework-agnostic approach soon.
349
+ """
350
+
351
+ def __init__(
352
+ self,
353
+ tokenizer: PreTrainedTokenizer,
354
+ file_path: str,
355
+ block_size: int,
356
+ overwrite_cache=False,
357
+ short_seq_probability=0.1,
358
+ nsp_probability=0.5,
359
+ ):
360
+ warnings.warn(
361
+ DEPRECATION_WARNING.format(
362
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
363
+ ),
364
+ FutureWarning,
365
+ )
366
+ if not os.path.isfile(file_path):
367
+ raise ValueError(f"Input file path {file_path} not found")
368
+
369
+ self.short_seq_probability = short_seq_probability
370
+ self.nsp_probability = nsp_probability
371
+
372
+ directory, filename = os.path.split(file_path)
373
+ cached_features_file = os.path.join(
374
+ directory,
375
+ f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
376
+ )
377
+
378
+ self.tokenizer = tokenizer
379
+
380
+ # Make sure only the first process in distributed training processes the dataset,
381
+ # and the others will use the cache.
382
+ lock_path = cached_features_file + ".lock"
383
+
384
+ # Input file format:
385
+ # (1) One sentence per line. These should ideally be actual sentences, not
386
+ # entire paragraphs or arbitrary spans of text. (Because we use the
387
+ # sentence boundaries for the "next sentence prediction" task).
388
+ # (2) Blank lines between documents. Document boundaries are needed so
389
+ # that the "next sentence prediction" task doesn't span between documents.
390
+ #
391
+ # Example:
392
+ # I am very happy.
393
+ # Here is the second sentence.
394
+ #
395
+ # A new document.
396
+
397
+ with FileLock(lock_path):
398
+ if os.path.exists(cached_features_file) and not overwrite_cache:
399
+ start = time.time()
400
+ with open(cached_features_file, "rb") as handle:
401
+ self.examples = pickle.load(handle)
402
+ logger.info(
403
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
404
+ )
405
+ else:
406
+ logger.info(f"Creating features from dataset file at {directory}")
407
+
408
+ self.documents = [[]]
409
+ with open(file_path, encoding="utf-8") as f:
410
+ while True:
411
+ line = f.readline()
412
+ if not line:
413
+ break
414
+ line = line.strip()
415
+
416
+ # Empty lines are used as document delimiters
417
+ if not line and len(self.documents[-1]) != 0:
418
+ self.documents.append([])
419
+ tokens = tokenizer.tokenize(line)
420
+ tokens = tokenizer.convert_tokens_to_ids(tokens)
421
+ if tokens:
422
+ self.documents[-1].append(tokens)
423
+
424
+ logger.info(f"Creating examples from {len(self.documents)} documents.")
425
+ self.examples = []
426
+ for doc_index, document in enumerate(self.documents):
427
+ self.create_examples_from_document(document, doc_index, block_size)
428
+
429
+ start = time.time()
430
+ with open(cached_features_file, "wb") as handle:
431
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
432
+ logger.info(
433
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
434
+ )
435
+
436
+ def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
437
+ """Creates examples for a single document."""
438
+
439
+ max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
440
+
441
+ # We *usually* want to fill up the entire sequence since we are padding
442
+ # to `block_size` anyways, so short sequences are generally wasted
443
+ # computation. However, we *sometimes*
444
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
445
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
446
+ # The `target_seq_length` is just a rough target however, whereas
447
+ # `block_size` is a hard limit.
448
+ target_seq_length = max_num_tokens
449
+ if random.random() < self.short_seq_probability:
450
+ target_seq_length = random.randint(2, max_num_tokens)
451
+
452
+ current_chunk = [] # a buffer stored current working segments
453
+ current_length = 0
454
+ i = 0
455
+
456
+ while i < len(document):
457
+ segment = document[i]
458
+ current_chunk.append(segment)
459
+ current_length += len(segment)
460
+ if i == len(document) - 1 or current_length >= target_seq_length:
461
+ if current_chunk:
462
+ # `a_end` is how many segments from `current_chunk` go into the `A`
463
+ # (first) sentence.
464
+ a_end = 1
465
+ if len(current_chunk) >= 2:
466
+ a_end = random.randint(1, len(current_chunk) - 1)
467
+
468
+ tokens_a = []
469
+ for j in range(a_end):
470
+ tokens_a.extend(current_chunk[j])
471
+
472
+ tokens_b = []
473
+
474
+ if len(current_chunk) == 1 or random.random() < self.nsp_probability:
475
+ is_random_next = True
476
+ target_b_length = target_seq_length - len(tokens_a)
477
+
478
+ # This should rarely go for more than one iteration for large
479
+ # corpora. However, just to be careful, we try to make sure that
480
+ # the random document is not the same as the document
481
+ # we're processing.
482
+ for _ in range(10):
483
+ random_document_index = random.randint(0, len(self.documents) - 1)
484
+ if random_document_index != doc_index:
485
+ break
486
+
487
+ random_document = self.documents[random_document_index]
488
+ random_start = random.randint(0, len(random_document) - 1)
489
+ for j in range(random_start, len(random_document)):
490
+ tokens_b.extend(random_document[j])
491
+ if len(tokens_b) >= target_b_length:
492
+ break
493
+ # We didn't actually use these segments so we "put them back" so
494
+ # they don't go to waste.
495
+ num_unused_segments = len(current_chunk) - a_end
496
+ i -= num_unused_segments
497
+ # Actual next
498
+ else:
499
+ is_random_next = False
500
+ for j in range(a_end, len(current_chunk)):
501
+ tokens_b.extend(current_chunk[j])
502
+
503
+ if not (len(tokens_a) >= 1):
504
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
505
+ if not (len(tokens_b) >= 1):
506
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
507
+
508
+ # add special tokens
509
+ input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
510
+ # add token type ids, 0 for sentence a, 1 for sentence b
511
+ token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
512
+
513
+ example = {
514
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
515
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
516
+ "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
517
+ }
518
+
519
+ self.examples.append(example)
520
+
521
+ current_chunk = []
522
+ current_length = 0
523
+
524
+ i += 1
525
+
526
+ def __len__(self):
527
+ return len(self.examples)
528
+
529
+ def __getitem__(self, i):
530
+ return self.examples[i]
vllm/lib/python3.10/site-packages/transformers/data/datasets/squad.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import time
17
+ from dataclasses import dataclass, field
18
+ from enum import Enum
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ from filelock import FileLock
23
+ from torch.utils.data import Dataset
24
+
25
+ from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
26
+ from ...tokenization_utils import PreTrainedTokenizer
27
+ from ...utils import logging
28
+ from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
34
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
35
+
36
+
37
+ @dataclass
38
+ class SquadDataTrainingArguments:
39
+ """
40
+ Arguments pertaining to what data we are going to input our model for training and eval.
41
+ """
42
+
43
+ model_type: str = field(
44
+ default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
45
+ )
46
+ data_dir: str = field(
47
+ default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
48
+ )
49
+ max_seq_length: int = field(
50
+ default=128,
51
+ metadata={
52
+ "help": (
53
+ "The maximum total input sequence length after tokenization. Sequences longer "
54
+ "than this will be truncated, sequences shorter will be padded."
55
+ )
56
+ },
57
+ )
58
+ doc_stride: int = field(
59
+ default=128,
60
+ metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
61
+ )
62
+ max_query_length: int = field(
63
+ default=64,
64
+ metadata={
65
+ "help": (
66
+ "The maximum number of tokens for the question. Questions longer than this will "
67
+ "be truncated to this length."
68
+ )
69
+ },
70
+ )
71
+ max_answer_length: int = field(
72
+ default=30,
73
+ metadata={
74
+ "help": (
75
+ "The maximum length of an answer that can be generated. This is needed because the start "
76
+ "and end predictions are not conditioned on one another."
77
+ )
78
+ },
79
+ )
80
+ overwrite_cache: bool = field(
81
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
82
+ )
83
+ version_2_with_negative: bool = field(
84
+ default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
85
+ )
86
+ null_score_diff_threshold: float = field(
87
+ default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
88
+ )
89
+ n_best_size: int = field(
90
+ default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
91
+ )
92
+ lang_id: int = field(
93
+ default=0,
94
+ metadata={
95
+ "help": (
96
+ "language id of input for language-specific xlm models (see"
97
+ " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
98
+ )
99
+ },
100
+ )
101
+ threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
102
+
103
+
104
+ class Split(Enum):
105
+ train = "train"
106
+ dev = "dev"
107
+
108
+
109
+ class SquadDataset(Dataset):
110
+ """
111
+ This will be superseded by a framework-agnostic approach soon.
112
+ """
113
+
114
+ args: SquadDataTrainingArguments
115
+ features: List[SquadFeatures]
116
+ mode: Split
117
+ is_language_sensitive: bool
118
+
119
+ def __init__(
120
+ self,
121
+ args: SquadDataTrainingArguments,
122
+ tokenizer: PreTrainedTokenizer,
123
+ limit_length: Optional[int] = None,
124
+ mode: Union[str, Split] = Split.train,
125
+ is_language_sensitive: Optional[bool] = False,
126
+ cache_dir: Optional[str] = None,
127
+ dataset_format: Optional[str] = "pt",
128
+ ):
129
+ self.args = args
130
+ self.is_language_sensitive = is_language_sensitive
131
+ self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
132
+ if isinstance(mode, str):
133
+ try:
134
+ mode = Split[mode]
135
+ except KeyError:
136
+ raise KeyError("mode is not a valid split name")
137
+ self.mode = mode
138
+ # Load data features from cache or dataset file
139
+ version_tag = "v2" if args.version_2_with_negative else "v1"
140
+ cached_features_file = os.path.join(
141
+ cache_dir if cache_dir is not None else args.data_dir,
142
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
143
+ )
144
+
145
+ # Make sure only the first process in distributed training processes the dataset,
146
+ # and the others will use the cache.
147
+ lock_path = cached_features_file + ".lock"
148
+ with FileLock(lock_path):
149
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
150
+ start = time.time()
151
+ self.old_features = torch.load(cached_features_file)
152
+
153
+ # Legacy cache files have only features, while new cache files
154
+ # will have dataset and examples also.
155
+ self.features = self.old_features["features"]
156
+ self.dataset = self.old_features.get("dataset", None)
157
+ self.examples = self.old_features.get("examples", None)
158
+ logger.info(
159
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
160
+ )
161
+
162
+ if self.dataset is None or self.examples is None:
163
+ logger.warning(
164
+ f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
165
+ " future run"
166
+ )
167
+ else:
168
+ if mode == Split.dev:
169
+ self.examples = self.processor.get_dev_examples(args.data_dir)
170
+ else:
171
+ self.examples = self.processor.get_train_examples(args.data_dir)
172
+
173
+ self.features, self.dataset = squad_convert_examples_to_features(
174
+ examples=self.examples,
175
+ tokenizer=tokenizer,
176
+ max_seq_length=args.max_seq_length,
177
+ doc_stride=args.doc_stride,
178
+ max_query_length=args.max_query_length,
179
+ is_training=mode == Split.train,
180
+ threads=args.threads,
181
+ return_dataset=dataset_format,
182
+ )
183
+
184
+ start = time.time()
185
+ torch.save(
186
+ {"features": self.features, "dataset": self.dataset, "examples": self.examples},
187
+ cached_features_file,
188
+ )
189
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
190
+ logger.info(
191
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
192
+ )
193
+
194
+ def __len__(self):
195
+ return len(self.features)
196
+
197
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
198
+ # Convert to Tensors and build dataset
199
+ feature = self.features[i]
200
+
201
+ input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
202
+ attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
203
+ token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
204
+ cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
205
+ p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
206
+ is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
207
+
208
+ inputs = {
209
+ "input_ids": input_ids,
210
+ "attention_mask": attention_mask,
211
+ "token_type_ids": token_type_ids,
212
+ }
213
+
214
+ if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
215
+ del inputs["token_type_ids"]
216
+
217
+ if self.args.model_type in ["xlnet", "xlm"]:
218
+ inputs.update({"cls_index": cls_index, "p_mask": p_mask})
219
+ if self.args.version_2_with_negative:
220
+ inputs.update({"is_impossible": is_impossible})
221
+ if self.is_language_sensitive:
222
+ inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
223
+
224
+ if self.mode == Split.train:
225
+ start_positions = torch.tensor(feature.start_position, dtype=torch.long)
226
+ end_positions = torch.tensor(feature.end_position, dtype=torch.long)
227
+ inputs.update({"start_positions": start_positions, "end_positions": end_positions})
228
+
229
+ return inputs
vllm/lib/python3.10/site-packages/transformers/data/metrics/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ import warnings
14
+
15
+ from ...utils import is_sklearn_available, requires_backends
16
+
17
+
18
+ if is_sklearn_available():
19
+ from scipy.stats import pearsonr, spearmanr
20
+ from sklearn.metrics import f1_score, matthews_corrcoef
21
+
22
+
23
+ DEPRECATION_WARNING = (
24
+ "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
25
+ "library. You can have a look at this example script for pointers: "
26
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
27
+ )
28
+
29
+
30
+ def simple_accuracy(preds, labels):
31
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
32
+ requires_backends(simple_accuracy, "sklearn")
33
+ return (preds == labels).mean()
34
+
35
+
36
+ def acc_and_f1(preds, labels):
37
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
38
+ requires_backends(acc_and_f1, "sklearn")
39
+ acc = simple_accuracy(preds, labels)
40
+ f1 = f1_score(y_true=labels, y_pred=preds)
41
+ return {
42
+ "acc": acc,
43
+ "f1": f1,
44
+ "acc_and_f1": (acc + f1) / 2,
45
+ }
46
+
47
+
48
+ def pearson_and_spearman(preds, labels):
49
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
50
+ requires_backends(pearson_and_spearman, "sklearn")
51
+ pearson_corr = pearsonr(preds, labels)[0]
52
+ spearman_corr = spearmanr(preds, labels)[0]
53
+ return {
54
+ "pearson": pearson_corr,
55
+ "spearmanr": spearman_corr,
56
+ "corr": (pearson_corr + spearman_corr) / 2,
57
+ }
58
+
59
+
60
+ def glue_compute_metrics(task_name, preds, labels):
61
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
62
+ requires_backends(glue_compute_metrics, "sklearn")
63
+ assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
64
+ if task_name == "cola":
65
+ return {"mcc": matthews_corrcoef(labels, preds)}
66
+ elif task_name == "sst-2":
67
+ return {"acc": simple_accuracy(preds, labels)}
68
+ elif task_name == "mrpc":
69
+ return acc_and_f1(preds, labels)
70
+ elif task_name == "sts-b":
71
+ return pearson_and_spearman(preds, labels)
72
+ elif task_name == "qqp":
73
+ return acc_and_f1(preds, labels)
74
+ elif task_name == "mnli":
75
+ return {"mnli/acc": simple_accuracy(preds, labels)}
76
+ elif task_name == "mnli-mm":
77
+ return {"mnli-mm/acc": simple_accuracy(preds, labels)}
78
+ elif task_name == "qnli":
79
+ return {"acc": simple_accuracy(preds, labels)}
80
+ elif task_name == "rte":
81
+ return {"acc": simple_accuracy(preds, labels)}
82
+ elif task_name == "wnli":
83
+ return {"acc": simple_accuracy(preds, labels)}
84
+ elif task_name == "hans":
85
+ return {"acc": simple_accuracy(preds, labels)}
86
+ else:
87
+ raise KeyError(task_name)
88
+
89
+
90
+ def xnli_compute_metrics(task_name, preds, labels):
91
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
92
+ requires_backends(xnli_compute_metrics, "sklearn")
93
+ if len(preds) != len(labels):
94
+ raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}")
95
+ if task_name == "xnli":
96
+ return {"acc": simple_accuracy(preds, labels)}
97
+ else:
98
+ raise KeyError(task_name)
vllm/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py ADDED
@@ -0,0 +1,779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to
16
+ update `find_best_threshold` scripts for SQuAD V2.0
17
+
18
+ In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an
19
+ additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted
20
+ probability that a question is unanswerable.
21
+ """
22
+
23
+ import collections
24
+ import json
25
+ import math
26
+ import re
27
+ import string
28
+
29
+ from ...models.bert import BasicTokenizer
30
+ from ...utils import logging
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ def normalize_answer(s):
37
+ """Lower text and remove punctuation, articles and extra whitespace."""
38
+
39
+ def remove_articles(text):
40
+ regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
41
+ return re.sub(regex, " ", text)
42
+
43
+ def white_space_fix(text):
44
+ return " ".join(text.split())
45
+
46
+ def remove_punc(text):
47
+ exclude = set(string.punctuation)
48
+ return "".join(ch for ch in text if ch not in exclude)
49
+
50
+ def lower(text):
51
+ return text.lower()
52
+
53
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
54
+
55
+
56
+ def get_tokens(s):
57
+ if not s:
58
+ return []
59
+ return normalize_answer(s).split()
60
+
61
+
62
+ def compute_exact(a_gold, a_pred):
63
+ return int(normalize_answer(a_gold) == normalize_answer(a_pred))
64
+
65
+
66
+ def compute_f1(a_gold, a_pred):
67
+ gold_toks = get_tokens(a_gold)
68
+ pred_toks = get_tokens(a_pred)
69
+ common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
70
+ num_same = sum(common.values())
71
+ if len(gold_toks) == 0 or len(pred_toks) == 0:
72
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
73
+ return int(gold_toks == pred_toks)
74
+ if num_same == 0:
75
+ return 0
76
+ precision = 1.0 * num_same / len(pred_toks)
77
+ recall = 1.0 * num_same / len(gold_toks)
78
+ f1 = (2 * precision * recall) / (precision + recall)
79
+ return f1
80
+
81
+
82
+ def get_raw_scores(examples, preds):
83
+ """
84
+ Computes the exact and f1 scores from the examples and the model predictions
85
+ """
86
+ exact_scores = {}
87
+ f1_scores = {}
88
+
89
+ for example in examples:
90
+ qas_id = example.qas_id
91
+ gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
92
+
93
+ if not gold_answers:
94
+ # For unanswerable questions, only correct answer is empty string
95
+ gold_answers = [""]
96
+
97
+ if qas_id not in preds:
98
+ print(f"Missing prediction for {qas_id}")
99
+ continue
100
+
101
+ prediction = preds[qas_id]
102
+ exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
103
+ f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
104
+
105
+ return exact_scores, f1_scores
106
+
107
+
108
+ def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
109
+ new_scores = {}
110
+ for qid, s in scores.items():
111
+ pred_na = na_probs[qid] > na_prob_thresh
112
+ if pred_na:
113
+ new_scores[qid] = float(not qid_to_has_ans[qid])
114
+ else:
115
+ new_scores[qid] = s
116
+ return new_scores
117
+
118
+
119
+ def make_eval_dict(exact_scores, f1_scores, qid_list=None):
120
+ if not qid_list:
121
+ total = len(exact_scores)
122
+ return collections.OrderedDict(
123
+ [
124
+ ("exact", 100.0 * sum(exact_scores.values()) / total),
125
+ ("f1", 100.0 * sum(f1_scores.values()) / total),
126
+ ("total", total),
127
+ ]
128
+ )
129
+ else:
130
+ total = len(qid_list)
131
+ return collections.OrderedDict(
132
+ [
133
+ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
134
+ ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
135
+ ("total", total),
136
+ ]
137
+ )
138
+
139
+
140
+ def merge_eval(main_eval, new_eval, prefix):
141
+ for k in new_eval:
142
+ main_eval[f"{prefix}_{k}"] = new_eval[k]
143
+
144
+
145
+ def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
146
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
147
+ cur_score = num_no_ans
148
+ best_score = cur_score
149
+ best_thresh = 0.0
150
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
151
+ for i, qid in enumerate(qid_list):
152
+ if qid not in scores:
153
+ continue
154
+ if qid_to_has_ans[qid]:
155
+ diff = scores[qid]
156
+ else:
157
+ if preds[qid]:
158
+ diff = -1
159
+ else:
160
+ diff = 0
161
+ cur_score += diff
162
+ if cur_score > best_score:
163
+ best_score = cur_score
164
+ best_thresh = na_probs[qid]
165
+
166
+ has_ans_score, has_ans_cnt = 0, 0
167
+ for qid in qid_list:
168
+ if not qid_to_has_ans[qid]:
169
+ continue
170
+ has_ans_cnt += 1
171
+
172
+ if qid not in scores:
173
+ continue
174
+ has_ans_score += scores[qid]
175
+
176
+ return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
177
+
178
+
179
+ def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
180
+ best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
181
+ best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
182
+ main_eval["best_exact"] = best_exact
183
+ main_eval["best_exact_thresh"] = exact_thresh
184
+ main_eval["best_f1"] = best_f1
185
+ main_eval["best_f1_thresh"] = f1_thresh
186
+ main_eval["has_ans_exact"] = has_ans_exact
187
+ main_eval["has_ans_f1"] = has_ans_f1
188
+
189
+
190
+ def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
191
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
192
+ cur_score = num_no_ans
193
+ best_score = cur_score
194
+ best_thresh = 0.0
195
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
196
+ for _, qid in enumerate(qid_list):
197
+ if qid not in scores:
198
+ continue
199
+ if qid_to_has_ans[qid]:
200
+ diff = scores[qid]
201
+ else:
202
+ if preds[qid]:
203
+ diff = -1
204
+ else:
205
+ diff = 0
206
+ cur_score += diff
207
+ if cur_score > best_score:
208
+ best_score = cur_score
209
+ best_thresh = na_probs[qid]
210
+ return 100.0 * best_score / len(scores), best_thresh
211
+
212
+
213
+ def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
214
+ best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
215
+ best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
216
+
217
+ main_eval["best_exact"] = best_exact
218
+ main_eval["best_exact_thresh"] = exact_thresh
219
+ main_eval["best_f1"] = best_f1
220
+ main_eval["best_f1_thresh"] = f1_thresh
221
+
222
+
223
+ def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
224
+ qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
225
+ has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
226
+ no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
227
+
228
+ if no_answer_probs is None:
229
+ no_answer_probs = {k: 0.0 for k in preds}
230
+
231
+ exact, f1 = get_raw_scores(examples, preds)
232
+
233
+ exact_threshold = apply_no_ans_threshold(
234
+ exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
235
+ )
236
+ f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
237
+
238
+ evaluation = make_eval_dict(exact_threshold, f1_threshold)
239
+
240
+ if has_answer_qids:
241
+ has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
242
+ merge_eval(evaluation, has_ans_eval, "HasAns")
243
+
244
+ if no_answer_qids:
245
+ no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
246
+ merge_eval(evaluation, no_ans_eval, "NoAns")
247
+
248
+ if no_answer_probs:
249
+ find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
250
+
251
+ return evaluation
252
+
253
+
254
+ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
255
+ """Project the tokenized prediction back to the original text."""
256
+
257
+ # When we created the data, we kept track of the alignment between original
258
+ # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
259
+ # now `orig_text` contains the span of our original text corresponding to the
260
+ # span that we predicted.
261
+ #
262
+ # However, `orig_text` may contain extra characters that we don't want in
263
+ # our prediction.
264
+ #
265
+ # For example, let's say:
266
+ # pred_text = steve smith
267
+ # orig_text = Steve Smith's
268
+ #
269
+ # We don't want to return `orig_text` because it contains the extra "'s".
270
+ #
271
+ # We don't want to return `pred_text` because it's already been normalized
272
+ # (the SQuAD eval script also does punctuation stripping/lower casing but
273
+ # our tokenizer does additional normalization like stripping accent
274
+ # characters).
275
+ #
276
+ # What we really want to return is "Steve Smith".
277
+ #
278
+ # Therefore, we have to apply a semi-complicated alignment heuristic between
279
+ # `pred_text` and `orig_text` to get a character-to-character alignment. This
280
+ # can fail in certain cases in which case we just return `orig_text`.
281
+
282
+ def _strip_spaces(text):
283
+ ns_chars = []
284
+ ns_to_s_map = collections.OrderedDict()
285
+ for i, c in enumerate(text):
286
+ if c == " ":
287
+ continue
288
+ ns_to_s_map[len(ns_chars)] = i
289
+ ns_chars.append(c)
290
+ ns_text = "".join(ns_chars)
291
+ return (ns_text, ns_to_s_map)
292
+
293
+ # We first tokenize `orig_text`, strip whitespace from the result
294
+ # and `pred_text`, and check if they are the same length. If they are
295
+ # NOT the same length, the heuristic has failed. If they are the same
296
+ # length, we assume the characters are one-to-one aligned.
297
+ tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
298
+
299
+ tok_text = " ".join(tokenizer.tokenize(orig_text))
300
+
301
+ start_position = tok_text.find(pred_text)
302
+ if start_position == -1:
303
+ if verbose_logging:
304
+ logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
305
+ return orig_text
306
+ end_position = start_position + len(pred_text) - 1
307
+
308
+ (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
309
+ (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
310
+
311
+ if len(orig_ns_text) != len(tok_ns_text):
312
+ if verbose_logging:
313
+ logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'")
314
+ return orig_text
315
+
316
+ # We then project the characters in `pred_text` back to `orig_text` using
317
+ # the character-to-character alignment.
318
+ tok_s_to_ns_map = {}
319
+ for i, tok_index in tok_ns_to_s_map.items():
320
+ tok_s_to_ns_map[tok_index] = i
321
+
322
+ orig_start_position = None
323
+ if start_position in tok_s_to_ns_map:
324
+ ns_start_position = tok_s_to_ns_map[start_position]
325
+ if ns_start_position in orig_ns_to_s_map:
326
+ orig_start_position = orig_ns_to_s_map[ns_start_position]
327
+
328
+ if orig_start_position is None:
329
+ if verbose_logging:
330
+ logger.info("Couldn't map start position")
331
+ return orig_text
332
+
333
+ orig_end_position = None
334
+ if end_position in tok_s_to_ns_map:
335
+ ns_end_position = tok_s_to_ns_map[end_position]
336
+ if ns_end_position in orig_ns_to_s_map:
337
+ orig_end_position = orig_ns_to_s_map[ns_end_position]
338
+
339
+ if orig_end_position is None:
340
+ if verbose_logging:
341
+ logger.info("Couldn't map end position")
342
+ return orig_text
343
+
344
+ output_text = orig_text[orig_start_position : (orig_end_position + 1)]
345
+ return output_text
346
+
347
+
348
+ def _get_best_indexes(logits, n_best_size):
349
+ """Get the n-best logits from a list."""
350
+ index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
351
+
352
+ best_indexes = []
353
+ for i in range(len(index_and_score)):
354
+ if i >= n_best_size:
355
+ break
356
+ best_indexes.append(index_and_score[i][0])
357
+ return best_indexes
358
+
359
+
360
+ def _compute_softmax(scores):
361
+ """Compute softmax probability over raw logits."""
362
+ if not scores:
363
+ return []
364
+
365
+ max_score = None
366
+ for score in scores:
367
+ if max_score is None or score > max_score:
368
+ max_score = score
369
+
370
+ exp_scores = []
371
+ total_sum = 0.0
372
+ for score in scores:
373
+ x = math.exp(score - max_score)
374
+ exp_scores.append(x)
375
+ total_sum += x
376
+
377
+ probs = []
378
+ for score in exp_scores:
379
+ probs.append(score / total_sum)
380
+ return probs
381
+
382
+
383
+ def compute_predictions_logits(
384
+ all_examples,
385
+ all_features,
386
+ all_results,
387
+ n_best_size,
388
+ max_answer_length,
389
+ do_lower_case,
390
+ output_prediction_file,
391
+ output_nbest_file,
392
+ output_null_log_odds_file,
393
+ verbose_logging,
394
+ version_2_with_negative,
395
+ null_score_diff_threshold,
396
+ tokenizer,
397
+ ):
398
+ """Write final predictions to the json file and log-odds of null if needed."""
399
+ if output_prediction_file:
400
+ logger.info(f"Writing predictions to: {output_prediction_file}")
401
+ if output_nbest_file:
402
+ logger.info(f"Writing nbest to: {output_nbest_file}")
403
+ if output_null_log_odds_file and version_2_with_negative:
404
+ logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
405
+
406
+ example_index_to_features = collections.defaultdict(list)
407
+ for feature in all_features:
408
+ example_index_to_features[feature.example_index].append(feature)
409
+
410
+ unique_id_to_result = {}
411
+ for result in all_results:
412
+ unique_id_to_result[result.unique_id] = result
413
+
414
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
415
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
416
+ )
417
+
418
+ all_predictions = collections.OrderedDict()
419
+ all_nbest_json = collections.OrderedDict()
420
+ scores_diff_json = collections.OrderedDict()
421
+
422
+ for example_index, example in enumerate(all_examples):
423
+ features = example_index_to_features[example_index]
424
+
425
+ prelim_predictions = []
426
+ # keep track of the minimum score of null start+end of position 0
427
+ score_null = 1000000 # large and positive
428
+ min_null_feature_index = 0 # the paragraph slice with min null score
429
+ null_start_logit = 0 # the start logit at the slice with min null score
430
+ null_end_logit = 0 # the end logit at the slice with min null score
431
+ for feature_index, feature in enumerate(features):
432
+ result = unique_id_to_result[feature.unique_id]
433
+ start_indexes = _get_best_indexes(result.start_logits, n_best_size)
434
+ end_indexes = _get_best_indexes(result.end_logits, n_best_size)
435
+ # if we could have irrelevant answers, get the min score of irrelevant
436
+ if version_2_with_negative:
437
+ feature_null_score = result.start_logits[0] + result.end_logits[0]
438
+ if feature_null_score < score_null:
439
+ score_null = feature_null_score
440
+ min_null_feature_index = feature_index
441
+ null_start_logit = result.start_logits[0]
442
+ null_end_logit = result.end_logits[0]
443
+ for start_index in start_indexes:
444
+ for end_index in end_indexes:
445
+ # We could hypothetically create invalid predictions, e.g., predict
446
+ # that the start of the span is in the question. We throw out all
447
+ # invalid predictions.
448
+ if start_index >= len(feature.tokens):
449
+ continue
450
+ if end_index >= len(feature.tokens):
451
+ continue
452
+ if start_index not in feature.token_to_orig_map:
453
+ continue
454
+ if end_index not in feature.token_to_orig_map:
455
+ continue
456
+ if not feature.token_is_max_context.get(start_index, False):
457
+ continue
458
+ if end_index < start_index:
459
+ continue
460
+ length = end_index - start_index + 1
461
+ if length > max_answer_length:
462
+ continue
463
+ prelim_predictions.append(
464
+ _PrelimPrediction(
465
+ feature_index=feature_index,
466
+ start_index=start_index,
467
+ end_index=end_index,
468
+ start_logit=result.start_logits[start_index],
469
+ end_logit=result.end_logits[end_index],
470
+ )
471
+ )
472
+ if version_2_with_negative:
473
+ prelim_predictions.append(
474
+ _PrelimPrediction(
475
+ feature_index=min_null_feature_index,
476
+ start_index=0,
477
+ end_index=0,
478
+ start_logit=null_start_logit,
479
+ end_logit=null_end_logit,
480
+ )
481
+ )
482
+ prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
483
+
484
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
485
+ "NbestPrediction", ["text", "start_logit", "end_logit"]
486
+ )
487
+
488
+ seen_predictions = {}
489
+ nbest = []
490
+ for pred in prelim_predictions:
491
+ if len(nbest) >= n_best_size:
492
+ break
493
+ feature = features[pred.feature_index]
494
+ if pred.start_index > 0: # this is a non-null prediction
495
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
496
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
497
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
498
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
499
+
500
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
501
+
502
+ # tok_text = " ".join(tok_tokens)
503
+ #
504
+ # # De-tokenize WordPieces that have been split off.
505
+ # tok_text = tok_text.replace(" ##", "")
506
+ # tok_text = tok_text.replace("##", "")
507
+
508
+ # Clean whitespace
509
+ tok_text = tok_text.strip()
510
+ tok_text = " ".join(tok_text.split())
511
+ orig_text = " ".join(orig_tokens)
512
+
513
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
514
+ if final_text in seen_predictions:
515
+ continue
516
+
517
+ seen_predictions[final_text] = True
518
+ else:
519
+ final_text = ""
520
+ seen_predictions[final_text] = True
521
+
522
+ nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
523
+ # if we didn't include the empty option in the n-best, include it
524
+ if version_2_with_negative:
525
+ if "" not in seen_predictions:
526
+ nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
527
+
528
+ # In very rare edge cases we could only have single null prediction.
529
+ # So we just create a nonce prediction in this case to avoid failure.
530
+ if len(nbest) == 1:
531
+ nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
532
+
533
+ # In very rare edge cases we could have no valid predictions. So we
534
+ # just create a nonce prediction in this case to avoid failure.
535
+ if not nbest:
536
+ nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
537
+
538
+ if len(nbest) < 1:
539
+ raise ValueError("No valid predictions")
540
+
541
+ total_scores = []
542
+ best_non_null_entry = None
543
+ for entry in nbest:
544
+ total_scores.append(entry.start_logit + entry.end_logit)
545
+ if not best_non_null_entry:
546
+ if entry.text:
547
+ best_non_null_entry = entry
548
+
549
+ probs = _compute_softmax(total_scores)
550
+
551
+ nbest_json = []
552
+ for i, entry in enumerate(nbest):
553
+ output = collections.OrderedDict()
554
+ output["text"] = entry.text
555
+ output["probability"] = probs[i]
556
+ output["start_logit"] = entry.start_logit
557
+ output["end_logit"] = entry.end_logit
558
+ nbest_json.append(output)
559
+
560
+ if len(nbest_json) < 1:
561
+ raise ValueError("No valid predictions")
562
+
563
+ if not version_2_with_negative:
564
+ all_predictions[example.qas_id] = nbest_json[0]["text"]
565
+ else:
566
+ # predict "" iff the null score - the score of best non-null > threshold
567
+ score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
568
+ scores_diff_json[example.qas_id] = score_diff
569
+ if score_diff > null_score_diff_threshold:
570
+ all_predictions[example.qas_id] = ""
571
+ else:
572
+ all_predictions[example.qas_id] = best_non_null_entry.text
573
+ all_nbest_json[example.qas_id] = nbest_json
574
+
575
+ if output_prediction_file:
576
+ with open(output_prediction_file, "w") as writer:
577
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
578
+
579
+ if output_nbest_file:
580
+ with open(output_nbest_file, "w") as writer:
581
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
582
+
583
+ if output_null_log_odds_file and version_2_with_negative:
584
+ with open(output_null_log_odds_file, "w") as writer:
585
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
586
+
587
+ return all_predictions
588
+
589
+
590
+ def compute_predictions_log_probs(
591
+ all_examples,
592
+ all_features,
593
+ all_results,
594
+ n_best_size,
595
+ max_answer_length,
596
+ output_prediction_file,
597
+ output_nbest_file,
598
+ output_null_log_odds_file,
599
+ start_n_top,
600
+ end_n_top,
601
+ version_2_with_negative,
602
+ tokenizer,
603
+ verbose_logging,
604
+ ):
605
+ """
606
+ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of
607
+ null if needed.
608
+
609
+ Requires utils_squad_evaluate.py
610
+ """
611
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
612
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
613
+ )
614
+
615
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
616
+ "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
617
+ )
618
+
619
+ logger.info(f"Writing predictions to: {output_prediction_file}")
620
+
621
+ example_index_to_features = collections.defaultdict(list)
622
+ for feature in all_features:
623
+ example_index_to_features[feature.example_index].append(feature)
624
+
625
+ unique_id_to_result = {}
626
+ for result in all_results:
627
+ unique_id_to_result[result.unique_id] = result
628
+
629
+ all_predictions = collections.OrderedDict()
630
+ all_nbest_json = collections.OrderedDict()
631
+ scores_diff_json = collections.OrderedDict()
632
+
633
+ for example_index, example in enumerate(all_examples):
634
+ features = example_index_to_features[example_index]
635
+
636
+ prelim_predictions = []
637
+ # keep track of the minimum score of null start+end of position 0
638
+ score_null = 1000000 # large and positive
639
+
640
+ for feature_index, feature in enumerate(features):
641
+ result = unique_id_to_result[feature.unique_id]
642
+
643
+ cur_null_score = result.cls_logits
644
+
645
+ # if we could have irrelevant answers, get the min score of irrelevant
646
+ score_null = min(score_null, cur_null_score)
647
+
648
+ for i in range(start_n_top):
649
+ for j in range(end_n_top):
650
+ start_log_prob = result.start_logits[i]
651
+ start_index = result.start_top_index[i]
652
+
653
+ j_index = i * end_n_top + j
654
+
655
+ end_log_prob = result.end_logits[j_index]
656
+ end_index = result.end_top_index[j_index]
657
+
658
+ # We could hypothetically create invalid predictions, e.g., predict
659
+ # that the start of the span is in the question. We throw out all
660
+ # invalid predictions.
661
+ if start_index >= feature.paragraph_len - 1:
662
+ continue
663
+ if end_index >= feature.paragraph_len - 1:
664
+ continue
665
+
666
+ if not feature.token_is_max_context.get(start_index, False):
667
+ continue
668
+ if end_index < start_index:
669
+ continue
670
+ length = end_index - start_index + 1
671
+ if length > max_answer_length:
672
+ continue
673
+
674
+ prelim_predictions.append(
675
+ _PrelimPrediction(
676
+ feature_index=feature_index,
677
+ start_index=start_index,
678
+ end_index=end_index,
679
+ start_log_prob=start_log_prob,
680
+ end_log_prob=end_log_prob,
681
+ )
682
+ )
683
+
684
+ prelim_predictions = sorted(
685
+ prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
686
+ )
687
+
688
+ seen_predictions = {}
689
+ nbest = []
690
+ for pred in prelim_predictions:
691
+ if len(nbest) >= n_best_size:
692
+ break
693
+ feature = features[pred.feature_index]
694
+
695
+ # XLNet un-tokenizer
696
+ # Let's keep it simple for now and see if we need all this later.
697
+ #
698
+ # tok_start_to_orig_index = feature.tok_start_to_orig_index
699
+ # tok_end_to_orig_index = feature.tok_end_to_orig_index
700
+ # start_orig_pos = tok_start_to_orig_index[pred.start_index]
701
+ # end_orig_pos = tok_end_to_orig_index[pred.end_index]
702
+ # paragraph_text = example.paragraph_text
703
+ # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
704
+
705
+ # Previously used Bert untokenizer
706
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
707
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
708
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
709
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
710
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
711
+
712
+ # Clean whitespace
713
+ tok_text = tok_text.strip()
714
+ tok_text = " ".join(tok_text.split())
715
+ orig_text = " ".join(orig_tokens)
716
+
717
+ if hasattr(tokenizer, "do_lower_case"):
718
+ do_lower_case = tokenizer.do_lower_case
719
+ else:
720
+ do_lower_case = tokenizer.do_lowercase_and_remove_accent
721
+
722
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
723
+
724
+ if final_text in seen_predictions:
725
+ continue
726
+
727
+ seen_predictions[final_text] = True
728
+
729
+ nbest.append(
730
+ _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
731
+ )
732
+
733
+ # In very rare edge cases we could have no valid predictions. So we
734
+ # just create a nonce prediction in this case to avoid failure.
735
+ if not nbest:
736
+ nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
737
+
738
+ total_scores = []
739
+ best_non_null_entry = None
740
+ for entry in nbest:
741
+ total_scores.append(entry.start_log_prob + entry.end_log_prob)
742
+ if not best_non_null_entry:
743
+ best_non_null_entry = entry
744
+
745
+ probs = _compute_softmax(total_scores)
746
+
747
+ nbest_json = []
748
+ for i, entry in enumerate(nbest):
749
+ output = collections.OrderedDict()
750
+ output["text"] = entry.text
751
+ output["probability"] = probs[i]
752
+ output["start_log_prob"] = entry.start_log_prob
753
+ output["end_log_prob"] = entry.end_log_prob
754
+ nbest_json.append(output)
755
+
756
+ if len(nbest_json) < 1:
757
+ raise ValueError("No valid predictions")
758
+ if best_non_null_entry is None:
759
+ raise ValueError("No valid predictions")
760
+
761
+ score_diff = score_null
762
+ scores_diff_json[example.qas_id] = score_diff
763
+ # note(zhiliny): always predict best_non_null_entry
764
+ # and the evaluation script will search for the best threshold
765
+ all_predictions[example.qas_id] = best_non_null_entry.text
766
+
767
+ all_nbest_json[example.qas_id] = nbest_json
768
+
769
+ with open(output_prediction_file, "w") as writer:
770
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
771
+
772
+ with open(output_nbest_file, "w") as writer:
773
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
774
+
775
+ if version_2_with_negative:
776
+ with open(output_null_log_odds_file, "w") as writer:
777
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
778
+
779
+ return all_predictions
vllm/lib/python3.10/site-packages/transformers/data/processors/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
16
+ from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
17
+ from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
18
+ from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (733 Bytes). View file
 
vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
vllm/lib/python3.10/site-packages/transformers/data/processors/glue.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """GLUE processors and helpers"""
17
+
18
+ import os
19
+ import warnings
20
+ from dataclasses import asdict
21
+ from enum import Enum
22
+ from typing import List, Optional, Union
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import is_tf_available, logging
26
+ from .utils import DataProcessor, InputExample, InputFeatures
27
+
28
+
29
+ if is_tf_available():
30
+ import tensorflow as tf
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: "
37
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
38
+ )
39
+
40
+
41
+ def glue_convert_examples_to_features(
42
+ examples: Union[List[InputExample], "tf.data.Dataset"],
43
+ tokenizer: PreTrainedTokenizer,
44
+ max_length: Optional[int] = None,
45
+ task=None,
46
+ label_list=None,
47
+ output_mode=None,
48
+ ):
49
+ """
50
+ Loads a data file into a list of `InputFeatures`
51
+
52
+ Args:
53
+ examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
54
+ tokenizer: Instance of a tokenizer that will tokenize the examples
55
+ max_length: Maximum example length. Defaults to the tokenizer's max_len
56
+ task: GLUE task
57
+ label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
58
+ output_mode: String indicating the output mode. Either `regression` or `classification`
59
+
60
+ Returns:
61
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
62
+ features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
63
+ can be fed to the model.
64
+
65
+ """
66
+ warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
67
+ if is_tf_available() and isinstance(examples, tf.data.Dataset):
68
+ if task is None:
69
+ raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
70
+ return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
71
+ return _glue_convert_examples_to_features(
72
+ examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
73
+ )
74
+
75
+
76
+ if is_tf_available():
77
+
78
+ def _tf_glue_convert_examples_to_features(
79
+ examples: tf.data.Dataset,
80
+ tokenizer: PreTrainedTokenizer,
81
+ task=str,
82
+ max_length: Optional[int] = None,
83
+ ) -> tf.data.Dataset:
84
+ """
85
+ Returns:
86
+ A `tf.data.Dataset` containing the task-specific features.
87
+
88
+ """
89
+ processor = glue_processors[task]()
90
+ examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
91
+ features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
92
+ label_type = tf.float32 if task == "sts-b" else tf.int64
93
+
94
+ def gen():
95
+ for ex in features:
96
+ d = {k: v for k, v in asdict(ex).items() if v is not None}
97
+ label = d.pop("label")
98
+ yield (d, label)
99
+
100
+ input_names = tokenizer.model_input_names
101
+
102
+ return tf.data.Dataset.from_generator(
103
+ gen,
104
+ ({k: tf.int32 for k in input_names}, label_type),
105
+ ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
106
+ )
107
+
108
+
109
+ def _glue_convert_examples_to_features(
110
+ examples: List[InputExample],
111
+ tokenizer: PreTrainedTokenizer,
112
+ max_length: Optional[int] = None,
113
+ task=None,
114
+ label_list=None,
115
+ output_mode=None,
116
+ ):
117
+ if max_length is None:
118
+ max_length = tokenizer.model_max_length
119
+
120
+ if task is not None:
121
+ processor = glue_processors[task]()
122
+ if label_list is None:
123
+ label_list = processor.get_labels()
124
+ logger.info(f"Using label list {label_list} for task {task}")
125
+ if output_mode is None:
126
+ output_mode = glue_output_modes[task]
127
+ logger.info(f"Using output mode {output_mode} for task {task}")
128
+
129
+ label_map = {label: i for i, label in enumerate(label_list)}
130
+
131
+ def label_from_example(example: InputExample) -> Union[int, float, None]:
132
+ if example.label is None:
133
+ return None
134
+ if output_mode == "classification":
135
+ return label_map[example.label]
136
+ elif output_mode == "regression":
137
+ return float(example.label)
138
+ raise KeyError(output_mode)
139
+
140
+ labels = [label_from_example(example) for example in examples]
141
+
142
+ batch_encoding = tokenizer(
143
+ [(example.text_a, example.text_b) for example in examples],
144
+ max_length=max_length,
145
+ padding="max_length",
146
+ truncation=True,
147
+ )
148
+
149
+ features = []
150
+ for i in range(len(examples)):
151
+ inputs = {k: batch_encoding[k][i] for k in batch_encoding}
152
+
153
+ feature = InputFeatures(**inputs, label=labels[i])
154
+ features.append(feature)
155
+
156
+ for i, example in enumerate(examples[:5]):
157
+ logger.info("*** Example ***")
158
+ logger.info(f"guid: {example.guid}")
159
+ logger.info(f"features: {features[i]}")
160
+
161
+ return features
162
+
163
+
164
+ class OutputMode(Enum):
165
+ classification = "classification"
166
+ regression = "regression"
167
+
168
+
169
+ class MrpcProcessor(DataProcessor):
170
+ """Processor for the MRPC data set (GLUE version)."""
171
+
172
+ def __init__(self, *args, **kwargs):
173
+ super().__init__(*args, **kwargs)
174
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
175
+
176
+ def get_example_from_tensor_dict(self, tensor_dict):
177
+ """See base class."""
178
+ return InputExample(
179
+ tensor_dict["idx"].numpy(),
180
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
181
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
182
+ str(tensor_dict["label"].numpy()),
183
+ )
184
+
185
+ def get_train_examples(self, data_dir):
186
+ """See base class."""
187
+ logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
188
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
189
+
190
+ def get_dev_examples(self, data_dir):
191
+ """See base class."""
192
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
193
+
194
+ def get_test_examples(self, data_dir):
195
+ """See base class."""
196
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
197
+
198
+ def get_labels(self):
199
+ """See base class."""
200
+ return ["0", "1"]
201
+
202
+ def _create_examples(self, lines, set_type):
203
+ """Creates examples for the training, dev and test sets."""
204
+ examples = []
205
+ for i, line in enumerate(lines):
206
+ if i == 0:
207
+ continue
208
+ guid = f"{set_type}-{i}"
209
+ text_a = line[3]
210
+ text_b = line[4]
211
+ label = None if set_type == "test" else line[0]
212
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
213
+ return examples
214
+
215
+
216
+ class MnliProcessor(DataProcessor):
217
+ """Processor for the MultiNLI data set (GLUE version)."""
218
+
219
+ def __init__(self, *args, **kwargs):
220
+ super().__init__(*args, **kwargs)
221
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
222
+
223
+ def get_example_from_tensor_dict(self, tensor_dict):
224
+ """See base class."""
225
+ return InputExample(
226
+ tensor_dict["idx"].numpy(),
227
+ tensor_dict["premise"].numpy().decode("utf-8"),
228
+ tensor_dict["hypothesis"].numpy().decode("utf-8"),
229
+ str(tensor_dict["label"].numpy()),
230
+ )
231
+
232
+ def get_train_examples(self, data_dir):
233
+ """See base class."""
234
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
235
+
236
+ def get_dev_examples(self, data_dir):
237
+ """See base class."""
238
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
239
+
240
+ def get_test_examples(self, data_dir):
241
+ """See base class."""
242
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
243
+
244
+ def get_labels(self):
245
+ """See base class."""
246
+ return ["contradiction", "entailment", "neutral"]
247
+
248
+ def _create_examples(self, lines, set_type):
249
+ """Creates examples for the training, dev and test sets."""
250
+ examples = []
251
+ for i, line in enumerate(lines):
252
+ if i == 0:
253
+ continue
254
+ guid = f"{set_type}-{line[0]}"
255
+ text_a = line[8]
256
+ text_b = line[9]
257
+ label = None if set_type.startswith("test") else line[-1]
258
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
259
+ return examples
260
+
261
+
262
+ class MnliMismatchedProcessor(MnliProcessor):
263
+ """Processor for the MultiNLI Mismatched data set (GLUE version)."""
264
+
265
+ def __init__(self, *args, **kwargs):
266
+ super().__init__(*args, **kwargs)
267
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
268
+
269
+ def get_dev_examples(self, data_dir):
270
+ """See base class."""
271
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
272
+
273
+ def get_test_examples(self, data_dir):
274
+ """See base class."""
275
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
276
+
277
+
278
+ class ColaProcessor(DataProcessor):
279
+ """Processor for the CoLA data set (GLUE version)."""
280
+
281
+ def __init__(self, *args, **kwargs):
282
+ super().__init__(*args, **kwargs)
283
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
284
+
285
+ def get_example_from_tensor_dict(self, tensor_dict):
286
+ """See base class."""
287
+ return InputExample(
288
+ tensor_dict["idx"].numpy(),
289
+ tensor_dict["sentence"].numpy().decode("utf-8"),
290
+ None,
291
+ str(tensor_dict["label"].numpy()),
292
+ )
293
+
294
+ def get_train_examples(self, data_dir):
295
+ """See base class."""
296
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
297
+
298
+ def get_dev_examples(self, data_dir):
299
+ """See base class."""
300
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
301
+
302
+ def get_test_examples(self, data_dir):
303
+ """See base class."""
304
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
305
+
306
+ def get_labels(self):
307
+ """See base class."""
308
+ return ["0", "1"]
309
+
310
+ def _create_examples(self, lines, set_type):
311
+ """Creates examples for the training, dev and test sets."""
312
+ test_mode = set_type == "test"
313
+ if test_mode:
314
+ lines = lines[1:]
315
+ text_index = 1 if test_mode else 3
316
+ examples = []
317
+ for i, line in enumerate(lines):
318
+ guid = f"{set_type}-{i}"
319
+ text_a = line[text_index]
320
+ label = None if test_mode else line[1]
321
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
322
+ return examples
323
+
324
+
325
+ class Sst2Processor(DataProcessor):
326
+ """Processor for the SST-2 data set (GLUE version)."""
327
+
328
+ def __init__(self, *args, **kwargs):
329
+ super().__init__(*args, **kwargs)
330
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
331
+
332
+ def get_example_from_tensor_dict(self, tensor_dict):
333
+ """See base class."""
334
+ return InputExample(
335
+ tensor_dict["idx"].numpy(),
336
+ tensor_dict["sentence"].numpy().decode("utf-8"),
337
+ None,
338
+ str(tensor_dict["label"].numpy()),
339
+ )
340
+
341
+ def get_train_examples(self, data_dir):
342
+ """See base class."""
343
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
344
+
345
+ def get_dev_examples(self, data_dir):
346
+ """See base class."""
347
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348
+
349
+ def get_test_examples(self, data_dir):
350
+ """See base class."""
351
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
352
+
353
+ def get_labels(self):
354
+ """See base class."""
355
+ return ["0", "1"]
356
+
357
+ def _create_examples(self, lines, set_type):
358
+ """Creates examples for the training, dev and test sets."""
359
+ examples = []
360
+ text_index = 1 if set_type == "test" else 0
361
+ for i, line in enumerate(lines):
362
+ if i == 0:
363
+ continue
364
+ guid = f"{set_type}-{i}"
365
+ text_a = line[text_index]
366
+ label = None if set_type == "test" else line[1]
367
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
368
+ return examples
369
+
370
+
371
+ class StsbProcessor(DataProcessor):
372
+ """Processor for the STS-B data set (GLUE version)."""
373
+
374
+ def __init__(self, *args, **kwargs):
375
+ super().__init__(*args, **kwargs)
376
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
377
+
378
+ def get_example_from_tensor_dict(self, tensor_dict):
379
+ """See base class."""
380
+ return InputExample(
381
+ tensor_dict["idx"].numpy(),
382
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
383
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
384
+ str(tensor_dict["label"].numpy()),
385
+ )
386
+
387
+ def get_train_examples(self, data_dir):
388
+ """See base class."""
389
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
390
+
391
+ def get_dev_examples(self, data_dir):
392
+ """See base class."""
393
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
394
+
395
+ def get_test_examples(self, data_dir):
396
+ """See base class."""
397
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
398
+
399
+ def get_labels(self):
400
+ """See base class."""
401
+ return [None]
402
+
403
+ def _create_examples(self, lines, set_type):
404
+ """Creates examples for the training, dev and test sets."""
405
+ examples = []
406
+ for i, line in enumerate(lines):
407
+ if i == 0:
408
+ continue
409
+ guid = f"{set_type}-{line[0]}"
410
+ text_a = line[7]
411
+ text_b = line[8]
412
+ label = None if set_type == "test" else line[-1]
413
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
414
+ return examples
415
+
416
+
417
+ class QqpProcessor(DataProcessor):
418
+ """Processor for the QQP data set (GLUE version)."""
419
+
420
+ def __init__(self, *args, **kwargs):
421
+ super().__init__(*args, **kwargs)
422
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
423
+
424
+ def get_example_from_tensor_dict(self, tensor_dict):
425
+ """See base class."""
426
+ return InputExample(
427
+ tensor_dict["idx"].numpy(),
428
+ tensor_dict["question1"].numpy().decode("utf-8"),
429
+ tensor_dict["question2"].numpy().decode("utf-8"),
430
+ str(tensor_dict["label"].numpy()),
431
+ )
432
+
433
+ def get_train_examples(self, data_dir):
434
+ """See base class."""
435
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
436
+
437
+ def get_dev_examples(self, data_dir):
438
+ """See base class."""
439
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
440
+
441
+ def get_test_examples(self, data_dir):
442
+ """See base class."""
443
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
444
+
445
+ def get_labels(self):
446
+ """See base class."""
447
+ return ["0", "1"]
448
+
449
+ def _create_examples(self, lines, set_type):
450
+ """Creates examples for the training, dev and test sets."""
451
+ test_mode = set_type == "test"
452
+ q1_index = 1 if test_mode else 3
453
+ q2_index = 2 if test_mode else 4
454
+ examples = []
455
+ for i, line in enumerate(lines):
456
+ if i == 0:
457
+ continue
458
+ guid = f"{set_type}-{line[0]}"
459
+ try:
460
+ text_a = line[q1_index]
461
+ text_b = line[q2_index]
462
+ label = None if test_mode else line[5]
463
+ except IndexError:
464
+ continue
465
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
466
+ return examples
467
+
468
+
469
+ class QnliProcessor(DataProcessor):
470
+ """Processor for the QNLI data set (GLUE version)."""
471
+
472
+ def __init__(self, *args, **kwargs):
473
+ super().__init__(*args, **kwargs)
474
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
475
+
476
+ def get_example_from_tensor_dict(self, tensor_dict):
477
+ """See base class."""
478
+ return InputExample(
479
+ tensor_dict["idx"].numpy(),
480
+ tensor_dict["question"].numpy().decode("utf-8"),
481
+ tensor_dict["sentence"].numpy().decode("utf-8"),
482
+ str(tensor_dict["label"].numpy()),
483
+ )
484
+
485
+ def get_train_examples(self, data_dir):
486
+ """See base class."""
487
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
488
+
489
+ def get_dev_examples(self, data_dir):
490
+ """See base class."""
491
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
492
+
493
+ def get_test_examples(self, data_dir):
494
+ """See base class."""
495
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
496
+
497
+ def get_labels(self):
498
+ """See base class."""
499
+ return ["entailment", "not_entailment"]
500
+
501
+ def _create_examples(self, lines, set_type):
502
+ """Creates examples for the training, dev and test sets."""
503
+ examples = []
504
+ for i, line in enumerate(lines):
505
+ if i == 0:
506
+ continue
507
+ guid = f"{set_type}-{line[0]}"
508
+ text_a = line[1]
509
+ text_b = line[2]
510
+ label = None if set_type == "test" else line[-1]
511
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
512
+ return examples
513
+
514
+
515
+ class RteProcessor(DataProcessor):
516
+ """Processor for the RTE data set (GLUE version)."""
517
+
518
+ def __init__(self, *args, **kwargs):
519
+ super().__init__(*args, **kwargs)
520
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
521
+
522
+ def get_example_from_tensor_dict(self, tensor_dict):
523
+ """See base class."""
524
+ return InputExample(
525
+ tensor_dict["idx"].numpy(),
526
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
527
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
528
+ str(tensor_dict["label"].numpy()),
529
+ )
530
+
531
+ def get_train_examples(self, data_dir):
532
+ """See base class."""
533
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
534
+
535
+ def get_dev_examples(self, data_dir):
536
+ """See base class."""
537
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
538
+
539
+ def get_test_examples(self, data_dir):
540
+ """See base class."""
541
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
542
+
543
+ def get_labels(self):
544
+ """See base class."""
545
+ return ["entailment", "not_entailment"]
546
+
547
+ def _create_examples(self, lines, set_type):
548
+ """Creates examples for the training, dev and test sets."""
549
+ examples = []
550
+ for i, line in enumerate(lines):
551
+ if i == 0:
552
+ continue
553
+ guid = f"{set_type}-{line[0]}"
554
+ text_a = line[1]
555
+ text_b = line[2]
556
+ label = None if set_type == "test" else line[-1]
557
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
558
+ return examples
559
+
560
+
561
+ class WnliProcessor(DataProcessor):
562
+ """Processor for the WNLI data set (GLUE version)."""
563
+
564
+ def __init__(self, *args, **kwargs):
565
+ super().__init__(*args, **kwargs)
566
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
567
+
568
+ def get_example_from_tensor_dict(self, tensor_dict):
569
+ """See base class."""
570
+ return InputExample(
571
+ tensor_dict["idx"].numpy(),
572
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
573
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
574
+ str(tensor_dict["label"].numpy()),
575
+ )
576
+
577
+ def get_train_examples(self, data_dir):
578
+ """See base class."""
579
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
580
+
581
+ def get_dev_examples(self, data_dir):
582
+ """See base class."""
583
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
584
+
585
+ def get_test_examples(self, data_dir):
586
+ """See base class."""
587
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
588
+
589
+ def get_labels(self):
590
+ """See base class."""
591
+ return ["0", "1"]
592
+
593
+ def _create_examples(self, lines, set_type):
594
+ """Creates examples for the training, dev and test sets."""
595
+ examples = []
596
+ for i, line in enumerate(lines):
597
+ if i == 0:
598
+ continue
599
+ guid = f"{set_type}-{line[0]}"
600
+ text_a = line[1]
601
+ text_b = line[2]
602
+ label = None if set_type == "test" else line[-1]
603
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
604
+ return examples
605
+
606
+
607
+ glue_tasks_num_labels = {
608
+ "cola": 2,
609
+ "mnli": 3,
610
+ "mrpc": 2,
611
+ "sst-2": 2,
612
+ "sts-b": 1,
613
+ "qqp": 2,
614
+ "qnli": 2,
615
+ "rte": 2,
616
+ "wnli": 2,
617
+ }
618
+
619
+ glue_processors = {
620
+ "cola": ColaProcessor,
621
+ "mnli": MnliProcessor,
622
+ "mnli-mm": MnliMismatchedProcessor,
623
+ "mrpc": MrpcProcessor,
624
+ "sst-2": Sst2Processor,
625
+ "sts-b": StsbProcessor,
626
+ "qqp": QqpProcessor,
627
+ "qnli": QnliProcessor,
628
+ "rte": RteProcessor,
629
+ "wnli": WnliProcessor,
630
+ }
631
+
632
+ glue_output_modes = {
633
+ "cola": "classification",
634
+ "mnli": "classification",
635
+ "mnli-mm": "classification",
636
+ "mrpc": "classification",
637
+ "sst-2": "classification",
638
+ "sts-b": "regression",
639
+ "qqp": "classification",
640
+ "qnli": "classification",
641
+ "rte": "classification",
642
+ "wnli": "classification",
643
+ }
vllm/lib/python3.10/site-packages/transformers/data/processors/squad.py ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from functools import partial
18
+ from multiprocessing import Pool, cpu_count
19
+
20
+ import numpy as np
21
+ from tqdm import tqdm
22
+
23
+ from ...models.bert.tokenization_bert import whitespace_tokenize
24
+ from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
25
+ from ...utils import is_tf_available, is_torch_available, logging
26
+ from .utils import DataProcessor
27
+
28
+
29
+ # Store the tokenizers which insert 2 separators tokens
30
+ MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
31
+
32
+
33
+ if is_torch_available():
34
+ import torch
35
+ from torch.utils.data import TensorDataset
36
+
37
+ if is_tf_available():
38
+ import tensorflow as tf
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
44
+ """Returns tokenized answer spans that better match the annotated answer."""
45
+ tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
46
+
47
+ for new_start in range(input_start, input_end + 1):
48
+ for new_end in range(input_end, new_start - 1, -1):
49
+ text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
50
+ if text_span == tok_answer_text:
51
+ return (new_start, new_end)
52
+
53
+ return (input_start, input_end)
54
+
55
+
56
+ def _check_is_max_context(doc_spans, cur_span_index, position):
57
+ """Check if this is the 'max context' doc span for the token."""
58
+ best_score = None
59
+ best_span_index = None
60
+ for span_index, doc_span in enumerate(doc_spans):
61
+ end = doc_span.start + doc_span.length - 1
62
+ if position < doc_span.start:
63
+ continue
64
+ if position > end:
65
+ continue
66
+ num_left_context = position - doc_span.start
67
+ num_right_context = end - position
68
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
69
+ if best_score is None or score > best_score:
70
+ best_score = score
71
+ best_span_index = span_index
72
+
73
+ return cur_span_index == best_span_index
74
+
75
+
76
+ def _new_check_is_max_context(doc_spans, cur_span_index, position):
77
+ """Check if this is the 'max context' doc span for the token."""
78
+ # if len(doc_spans) == 1:
79
+ # return True
80
+ best_score = None
81
+ best_span_index = None
82
+ for span_index, doc_span in enumerate(doc_spans):
83
+ end = doc_span["start"] + doc_span["length"] - 1
84
+ if position < doc_span["start"]:
85
+ continue
86
+ if position > end:
87
+ continue
88
+ num_left_context = position - doc_span["start"]
89
+ num_right_context = end - position
90
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
91
+ if best_score is None or score > best_score:
92
+ best_score = score
93
+ best_span_index = span_index
94
+
95
+ return cur_span_index == best_span_index
96
+
97
+
98
+ def _is_whitespace(c):
99
+ if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
100
+ return True
101
+ return False
102
+
103
+
104
+ def squad_convert_example_to_features(
105
+ example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
106
+ ):
107
+ features = []
108
+ if is_training and not example.is_impossible:
109
+ # Get start and end position
110
+ start_position = example.start_position
111
+ end_position = example.end_position
112
+
113
+ # If the answer cannot be found in the text, then skip this example.
114
+ actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
115
+ cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
116
+ if actual_text.find(cleaned_answer_text) == -1:
117
+ logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
118
+ return []
119
+
120
+ tok_to_orig_index = []
121
+ orig_to_tok_index = []
122
+ all_doc_tokens = []
123
+ for i, token in enumerate(example.doc_tokens):
124
+ orig_to_tok_index.append(len(all_doc_tokens))
125
+ if tokenizer.__class__.__name__ in [
126
+ "RobertaTokenizer",
127
+ "LongformerTokenizer",
128
+ "BartTokenizer",
129
+ "RobertaTokenizerFast",
130
+ "LongformerTokenizerFast",
131
+ "BartTokenizerFast",
132
+ ]:
133
+ sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
134
+ else:
135
+ sub_tokens = tokenizer.tokenize(token)
136
+ for sub_token in sub_tokens:
137
+ tok_to_orig_index.append(i)
138
+ all_doc_tokens.append(sub_token)
139
+
140
+ if is_training and not example.is_impossible:
141
+ tok_start_position = orig_to_tok_index[example.start_position]
142
+ if example.end_position < len(example.doc_tokens) - 1:
143
+ tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
144
+ else:
145
+ tok_end_position = len(all_doc_tokens) - 1
146
+
147
+ (tok_start_position, tok_end_position) = _improve_answer_span(
148
+ all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
149
+ )
150
+
151
+ spans = []
152
+
153
+ truncated_query = tokenizer.encode(
154
+ example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
155
+ )
156
+
157
+ # Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
158
+ # in the way they compute mask of added tokens.
159
+ tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
160
+ sequence_added_tokens = (
161
+ tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
162
+ if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
163
+ else tokenizer.model_max_length - tokenizer.max_len_single_sentence
164
+ )
165
+ sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
166
+
167
+ span_doc_tokens = all_doc_tokens
168
+ while len(spans) * doc_stride < len(all_doc_tokens):
169
+ # Define the side we want to truncate / pad and the text/pair sorting
170
+ if tokenizer.padding_side == "right":
171
+ texts = truncated_query
172
+ pairs = span_doc_tokens
173
+ truncation = TruncationStrategy.ONLY_SECOND.value
174
+ else:
175
+ texts = span_doc_tokens
176
+ pairs = truncated_query
177
+ truncation = TruncationStrategy.ONLY_FIRST.value
178
+
179
+ encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
180
+ texts,
181
+ pairs,
182
+ truncation=truncation,
183
+ padding=padding_strategy,
184
+ max_length=max_seq_length,
185
+ return_overflowing_tokens=True,
186
+ stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
187
+ return_token_type_ids=True,
188
+ )
189
+
190
+ paragraph_len = min(
191
+ len(all_doc_tokens) - len(spans) * doc_stride,
192
+ max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
193
+ )
194
+
195
+ if tokenizer.pad_token_id in encoded_dict["input_ids"]:
196
+ if tokenizer.padding_side == "right":
197
+ non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
198
+ else:
199
+ last_padding_id_position = (
200
+ len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
201
+ )
202
+ non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
203
+
204
+ else:
205
+ non_padded_ids = encoded_dict["input_ids"]
206
+
207
+ tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
208
+
209
+ token_to_orig_map = {}
210
+ for i in range(paragraph_len):
211
+ index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
212
+ token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
213
+
214
+ encoded_dict["paragraph_len"] = paragraph_len
215
+ encoded_dict["tokens"] = tokens
216
+ encoded_dict["token_to_orig_map"] = token_to_orig_map
217
+ encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
218
+ encoded_dict["token_is_max_context"] = {}
219
+ encoded_dict["start"] = len(spans) * doc_stride
220
+ encoded_dict["length"] = paragraph_len
221
+
222
+ spans.append(encoded_dict)
223
+
224
+ if "overflowing_tokens" not in encoded_dict or (
225
+ "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
226
+ ):
227
+ break
228
+ span_doc_tokens = encoded_dict["overflowing_tokens"]
229
+
230
+ for doc_span_index in range(len(spans)):
231
+ for j in range(spans[doc_span_index]["paragraph_len"]):
232
+ is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
233
+ index = (
234
+ j
235
+ if tokenizer.padding_side == "left"
236
+ else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
237
+ )
238
+ spans[doc_span_index]["token_is_max_context"][index] = is_max_context
239
+
240
+ for span in spans:
241
+ # Identify the position of the CLS token
242
+ cls_index = span["input_ids"].index(tokenizer.cls_token_id)
243
+
244
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
245
+ # Original TF implementation also keep the classification token (set to 0)
246
+ p_mask = np.ones_like(span["token_type_ids"])
247
+ if tokenizer.padding_side == "right":
248
+ p_mask[len(truncated_query) + sequence_added_tokens :] = 0
249
+ else:
250
+ p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
251
+
252
+ pad_token_indices = np.where(np.atleast_1d(span["input_ids"] == tokenizer.pad_token_id))
253
+ special_token_indices = np.asarray(
254
+ tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
255
+ ).nonzero()
256
+
257
+ p_mask[pad_token_indices] = 1
258
+ p_mask[special_token_indices] = 1
259
+
260
+ # Set the cls index to 0: the CLS index can be used for impossible answers
261
+ p_mask[cls_index] = 0
262
+
263
+ span_is_impossible = example.is_impossible
264
+ start_position = 0
265
+ end_position = 0
266
+ if is_training and not span_is_impossible:
267
+ # For training, if our document chunk does not contain an annotation
268
+ # we throw it out, since there is nothing to predict.
269
+ doc_start = span["start"]
270
+ doc_end = span["start"] + span["length"] - 1
271
+ out_of_span = False
272
+
273
+ if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
274
+ out_of_span = True
275
+
276
+ if out_of_span:
277
+ start_position = cls_index
278
+ end_position = cls_index
279
+ span_is_impossible = True
280
+ else:
281
+ if tokenizer.padding_side == "left":
282
+ doc_offset = 0
283
+ else:
284
+ doc_offset = len(truncated_query) + sequence_added_tokens
285
+
286
+ start_position = tok_start_position - doc_start + doc_offset
287
+ end_position = tok_end_position - doc_start + doc_offset
288
+
289
+ features.append(
290
+ SquadFeatures(
291
+ span["input_ids"],
292
+ span["attention_mask"],
293
+ span["token_type_ids"],
294
+ cls_index,
295
+ p_mask.tolist(),
296
+ example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
297
+ unique_id=0,
298
+ paragraph_len=span["paragraph_len"],
299
+ token_is_max_context=span["token_is_max_context"],
300
+ tokens=span["tokens"],
301
+ token_to_orig_map=span["token_to_orig_map"],
302
+ start_position=start_position,
303
+ end_position=end_position,
304
+ is_impossible=span_is_impossible,
305
+ qas_id=example.qas_id,
306
+ )
307
+ )
308
+ return features
309
+
310
+
311
+ def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
312
+ global tokenizer
313
+ tokenizer = tokenizer_for_convert
314
+
315
+
316
+ def squad_convert_examples_to_features(
317
+ examples,
318
+ tokenizer,
319
+ max_seq_length,
320
+ doc_stride,
321
+ max_query_length,
322
+ is_training,
323
+ padding_strategy="max_length",
324
+ return_dataset=False,
325
+ threads=1,
326
+ tqdm_enabled=True,
327
+ ):
328
+ """
329
+ Converts a list of examples into a list of features that can be directly given as input to a model. It is
330
+ model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
331
+
332
+ Args:
333
+ examples: list of [`~data.processors.squad.SquadExample`]
334
+ tokenizer: an instance of a child of [`PreTrainedTokenizer`]
335
+ max_seq_length: The maximum sequence length of the inputs.
336
+ doc_stride: The stride used when the context is too large and is split across several features.
337
+ max_query_length: The maximum length of the query.
338
+ is_training: whether to create features for model evaluation or model training.
339
+ padding_strategy: Default to "max_length". Which padding strategy to use
340
+ return_dataset: Default False. Either 'pt' or 'tf'.
341
+ if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
342
+ threads: multiple processing threads.
343
+
344
+
345
+ Returns:
346
+ list of [`~data.processors.squad.SquadFeatures`]
347
+
348
+ Example:
349
+
350
+ ```python
351
+ processor = SquadV2Processor()
352
+ examples = processor.get_dev_examples(data_dir)
353
+
354
+ features = squad_convert_examples_to_features(
355
+ examples=examples,
356
+ tokenizer=tokenizer,
357
+ max_seq_length=args.max_seq_length,
358
+ doc_stride=args.doc_stride,
359
+ max_query_length=args.max_query_length,
360
+ is_training=not evaluate,
361
+ )
362
+ ```"""
363
+ # Defining helper methods
364
+ features = []
365
+
366
+ threads = min(threads, cpu_count())
367
+ with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
368
+ annotate_ = partial(
369
+ squad_convert_example_to_features,
370
+ max_seq_length=max_seq_length,
371
+ doc_stride=doc_stride,
372
+ max_query_length=max_query_length,
373
+ padding_strategy=padding_strategy,
374
+ is_training=is_training,
375
+ )
376
+ features = list(
377
+ tqdm(
378
+ p.imap(annotate_, examples, chunksize=32),
379
+ total=len(examples),
380
+ desc="convert squad examples to features",
381
+ disable=not tqdm_enabled,
382
+ )
383
+ )
384
+
385
+ new_features = []
386
+ unique_id = 1000000000
387
+ example_index = 0
388
+ for example_features in tqdm(
389
+ features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
390
+ ):
391
+ if not example_features:
392
+ continue
393
+ for example_feature in example_features:
394
+ example_feature.example_index = example_index
395
+ example_feature.unique_id = unique_id
396
+ new_features.append(example_feature)
397
+ unique_id += 1
398
+ example_index += 1
399
+ features = new_features
400
+ del new_features
401
+ if return_dataset == "pt":
402
+ if not is_torch_available():
403
+ raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
404
+
405
+ # Convert to Tensors and build dataset
406
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
407
+ all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
408
+ all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
409
+ all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
410
+ all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
411
+ all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
412
+
413
+ if not is_training:
414
+ all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
415
+ dataset = TensorDataset(
416
+ all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
417
+ )
418
+ else:
419
+ all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
420
+ all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
421
+ dataset = TensorDataset(
422
+ all_input_ids,
423
+ all_attention_masks,
424
+ all_token_type_ids,
425
+ all_start_positions,
426
+ all_end_positions,
427
+ all_cls_index,
428
+ all_p_mask,
429
+ all_is_impossible,
430
+ )
431
+
432
+ return features, dataset
433
+ elif return_dataset == "tf":
434
+ if not is_tf_available():
435
+ raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
436
+
437
+ def gen():
438
+ for i, ex in enumerate(features):
439
+ if ex.token_type_ids is None:
440
+ yield (
441
+ {
442
+ "input_ids": ex.input_ids,
443
+ "attention_mask": ex.attention_mask,
444
+ "feature_index": i,
445
+ "qas_id": ex.qas_id,
446
+ },
447
+ {
448
+ "start_positions": ex.start_position,
449
+ "end_positions": ex.end_position,
450
+ "cls_index": ex.cls_index,
451
+ "p_mask": ex.p_mask,
452
+ "is_impossible": ex.is_impossible,
453
+ },
454
+ )
455
+ else:
456
+ yield (
457
+ {
458
+ "input_ids": ex.input_ids,
459
+ "attention_mask": ex.attention_mask,
460
+ "token_type_ids": ex.token_type_ids,
461
+ "feature_index": i,
462
+ "qas_id": ex.qas_id,
463
+ },
464
+ {
465
+ "start_positions": ex.start_position,
466
+ "end_positions": ex.end_position,
467
+ "cls_index": ex.cls_index,
468
+ "p_mask": ex.p_mask,
469
+ "is_impossible": ex.is_impossible,
470
+ },
471
+ )
472
+
473
+ # Why have we split the batch into a tuple? PyTorch just has a list of tensors.
474
+ if "token_type_ids" in tokenizer.model_input_names:
475
+ train_types = (
476
+ {
477
+ "input_ids": tf.int32,
478
+ "attention_mask": tf.int32,
479
+ "token_type_ids": tf.int32,
480
+ "feature_index": tf.int64,
481
+ "qas_id": tf.string,
482
+ },
483
+ {
484
+ "start_positions": tf.int64,
485
+ "end_positions": tf.int64,
486
+ "cls_index": tf.int64,
487
+ "p_mask": tf.int32,
488
+ "is_impossible": tf.int32,
489
+ },
490
+ )
491
+
492
+ train_shapes = (
493
+ {
494
+ "input_ids": tf.TensorShape([None]),
495
+ "attention_mask": tf.TensorShape([None]),
496
+ "token_type_ids": tf.TensorShape([None]),
497
+ "feature_index": tf.TensorShape([]),
498
+ "qas_id": tf.TensorShape([]),
499
+ },
500
+ {
501
+ "start_positions": tf.TensorShape([]),
502
+ "end_positions": tf.TensorShape([]),
503
+ "cls_index": tf.TensorShape([]),
504
+ "p_mask": tf.TensorShape([None]),
505
+ "is_impossible": tf.TensorShape([]),
506
+ },
507
+ )
508
+ else:
509
+ train_types = (
510
+ {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
511
+ {
512
+ "start_positions": tf.int64,
513
+ "end_positions": tf.int64,
514
+ "cls_index": tf.int64,
515
+ "p_mask": tf.int32,
516
+ "is_impossible": tf.int32,
517
+ },
518
+ )
519
+
520
+ train_shapes = (
521
+ {
522
+ "input_ids": tf.TensorShape([None]),
523
+ "attention_mask": tf.TensorShape([None]),
524
+ "feature_index": tf.TensorShape([]),
525
+ "qas_id": tf.TensorShape([]),
526
+ },
527
+ {
528
+ "start_positions": tf.TensorShape([]),
529
+ "end_positions": tf.TensorShape([]),
530
+ "cls_index": tf.TensorShape([]),
531
+ "p_mask": tf.TensorShape([None]),
532
+ "is_impossible": tf.TensorShape([]),
533
+ },
534
+ )
535
+
536
+ return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
537
+ else:
538
+ return features
539
+
540
+
541
+ class SquadProcessor(DataProcessor):
542
+ """
543
+ Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
544
+ version 2.0 of SQuAD, respectively.
545
+ """
546
+
547
+ train_file = None
548
+ dev_file = None
549
+
550
+ def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
551
+ if not evaluate:
552
+ answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
553
+ answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
554
+ answers = []
555
+ else:
556
+ answers = [
557
+ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
558
+ for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
559
+ ]
560
+
561
+ answer = None
562
+ answer_start = None
563
+
564
+ return SquadExample(
565
+ qas_id=tensor_dict["id"].numpy().decode("utf-8"),
566
+ question_text=tensor_dict["question"].numpy().decode("utf-8"),
567
+ context_text=tensor_dict["context"].numpy().decode("utf-8"),
568
+ answer_text=answer,
569
+ start_position_character=answer_start,
570
+ title=tensor_dict["title"].numpy().decode("utf-8"),
571
+ answers=answers,
572
+ )
573
+
574
+ def get_examples_from_dataset(self, dataset, evaluate=False):
575
+ """
576
+ Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
577
+
578
+ Args:
579
+ dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
580
+ evaluate: Boolean specifying if in evaluation mode or in training mode
581
+
582
+ Returns:
583
+ List of SquadExample
584
+
585
+ Examples:
586
+
587
+ ```python
588
+ >>> import tensorflow_datasets as tfds
589
+
590
+ >>> dataset = tfds.load("squad")
591
+
592
+ >>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
593
+ >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
594
+ ```"""
595
+
596
+ if evaluate:
597
+ dataset = dataset["validation"]
598
+ else:
599
+ dataset = dataset["train"]
600
+
601
+ examples = []
602
+ for tensor_dict in tqdm(dataset):
603
+ examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
604
+
605
+ return examples
606
+
607
+ def get_train_examples(self, data_dir, filename=None):
608
+ """
609
+ Returns the training examples from the data directory.
610
+
611
+ Args:
612
+ data_dir: Directory containing the data files used for training and evaluating.
613
+ filename: None by default, specify this if the training file has a different name than the original one
614
+ which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
615
+
616
+ """
617
+ if data_dir is None:
618
+ data_dir = ""
619
+
620
+ if self.train_file is None:
621
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
622
+
623
+ with open(
624
+ os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
625
+ ) as reader:
626
+ input_data = json.load(reader)["data"]
627
+ return self._create_examples(input_data, "train")
628
+
629
+ def get_dev_examples(self, data_dir, filename=None):
630
+ """
631
+ Returns the evaluation example from the data directory.
632
+
633
+ Args:
634
+ data_dir: Directory containing the data files used for training and evaluating.
635
+ filename: None by default, specify this if the evaluation file has a different name than the original one
636
+ which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
637
+ """
638
+ if data_dir is None:
639
+ data_dir = ""
640
+
641
+ if self.dev_file is None:
642
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
643
+
644
+ with open(
645
+ os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
646
+ ) as reader:
647
+ input_data = json.load(reader)["data"]
648
+ return self._create_examples(input_data, "dev")
649
+
650
+ def _create_examples(self, input_data, set_type):
651
+ is_training = set_type == "train"
652
+ examples = []
653
+ for entry in tqdm(input_data):
654
+ title = entry["title"]
655
+ for paragraph in entry["paragraphs"]:
656
+ context_text = paragraph["context"]
657
+ for qa in paragraph["qas"]:
658
+ qas_id = qa["id"]
659
+ question_text = qa["question"]
660
+ start_position_character = None
661
+ answer_text = None
662
+ answers = []
663
+
664
+ is_impossible = qa.get("is_impossible", False)
665
+ if not is_impossible:
666
+ if is_training:
667
+ answer = qa["answers"][0]
668
+ answer_text = answer["text"]
669
+ start_position_character = answer["answer_start"]
670
+ else:
671
+ answers = qa["answers"]
672
+
673
+ example = SquadExample(
674
+ qas_id=qas_id,
675
+ question_text=question_text,
676
+ context_text=context_text,
677
+ answer_text=answer_text,
678
+ start_position_character=start_position_character,
679
+ title=title,
680
+ is_impossible=is_impossible,
681
+ answers=answers,
682
+ )
683
+ examples.append(example)
684
+ return examples
685
+
686
+
687
+ class SquadV1Processor(SquadProcessor):
688
+ train_file = "train-v1.1.json"
689
+ dev_file = "dev-v1.1.json"
690
+
691
+
692
+ class SquadV2Processor(SquadProcessor):
693
+ train_file = "train-v2.0.json"
694
+ dev_file = "dev-v2.0.json"
695
+
696
+
697
+ class SquadExample:
698
+ """
699
+ A single training/test example for the Squad dataset, as loaded from disk.
700
+
701
+ Args:
702
+ qas_id: The example's unique identifier
703
+ question_text: The question string
704
+ context_text: The context string
705
+ answer_text: The answer string
706
+ start_position_character: The character position of the start of the answer
707
+ title: The title of the example
708
+ answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
709
+ is_impossible: False by default, set to True if the example has no possible answer.
710
+ """
711
+
712
+ def __init__(
713
+ self,
714
+ qas_id,
715
+ question_text,
716
+ context_text,
717
+ answer_text,
718
+ start_position_character,
719
+ title,
720
+ answers=[],
721
+ is_impossible=False,
722
+ ):
723
+ self.qas_id = qas_id
724
+ self.question_text = question_text
725
+ self.context_text = context_text
726
+ self.answer_text = answer_text
727
+ self.title = title
728
+ self.is_impossible = is_impossible
729
+ self.answers = answers
730
+
731
+ self.start_position, self.end_position = 0, 0
732
+
733
+ doc_tokens = []
734
+ char_to_word_offset = []
735
+ prev_is_whitespace = True
736
+
737
+ # Split on whitespace so that different tokens may be attributed to their original position.
738
+ for c in self.context_text:
739
+ if _is_whitespace(c):
740
+ prev_is_whitespace = True
741
+ else:
742
+ if prev_is_whitespace:
743
+ doc_tokens.append(c)
744
+ else:
745
+ doc_tokens[-1] += c
746
+ prev_is_whitespace = False
747
+ char_to_word_offset.append(len(doc_tokens) - 1)
748
+
749
+ self.doc_tokens = doc_tokens
750
+ self.char_to_word_offset = char_to_word_offset
751
+
752
+ # Start and end positions only has a value during evaluation.
753
+ if start_position_character is not None and not is_impossible:
754
+ self.start_position = char_to_word_offset[start_position_character]
755
+ self.end_position = char_to_word_offset[
756
+ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
757
+ ]
758
+
759
+
760
+ class SquadFeatures:
761
+ """
762
+ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
763
+ [`~data.processors.squad.SquadExample`] using the
764
+ :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
765
+
766
+ Args:
767
+ input_ids: Indices of input sequence tokens in the vocabulary.
768
+ attention_mask: Mask to avoid performing attention on padding token indices.
769
+ token_type_ids: Segment token indices to indicate first and second portions of the inputs.
770
+ cls_index: the index of the CLS token.
771
+ p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
772
+ Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
773
+ example_index: the index of the example
774
+ unique_id: The unique Feature identifier
775
+ paragraph_len: The length of the context
776
+ token_is_max_context:
777
+ List of booleans identifying which tokens have their maximum context in this feature object. If a token
778
+ does not have their maximum context in this feature object, it means that another feature object has more
779
+ information related to that token and should be prioritized over this feature for that token.
780
+ tokens: list of tokens corresponding to the input ids
781
+ token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
782
+ start_position: start of the answer token index
783
+ end_position: end of the answer token index
784
+ encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
785
+ """
786
+
787
+ def __init__(
788
+ self,
789
+ input_ids,
790
+ attention_mask,
791
+ token_type_ids,
792
+ cls_index,
793
+ p_mask,
794
+ example_index,
795
+ unique_id,
796
+ paragraph_len,
797
+ token_is_max_context,
798
+ tokens,
799
+ token_to_orig_map,
800
+ start_position,
801
+ end_position,
802
+ is_impossible,
803
+ qas_id: str = None,
804
+ encoding: BatchEncoding = None,
805
+ ):
806
+ self.input_ids = input_ids
807
+ self.attention_mask = attention_mask
808
+ self.token_type_ids = token_type_ids
809
+ self.cls_index = cls_index
810
+ self.p_mask = p_mask
811
+
812
+ self.example_index = example_index
813
+ self.unique_id = unique_id
814
+ self.paragraph_len = paragraph_len
815
+ self.token_is_max_context = token_is_max_context
816
+ self.tokens = tokens
817
+ self.token_to_orig_map = token_to_orig_map
818
+
819
+ self.start_position = start_position
820
+ self.end_position = end_position
821
+ self.is_impossible = is_impossible
822
+ self.qas_id = qas_id
823
+
824
+ self.encoding = encoding
825
+
826
+
827
+ class SquadResult:
828
+ """
829
+ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
830
+
831
+ Args:
832
+ unique_id: The unique identifier corresponding to that example.
833
+ start_logits: The logits corresponding to the start of the answer
834
+ end_logits: The logits corresponding to the end of the answer
835
+ """
836
+
837
+ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
838
+ self.start_logits = start_logits
839
+ self.end_logits = end_logits
840
+ self.unique_id = unique_id
841
+
842
+ if start_top_index:
843
+ self.start_top_index = start_top_index
844
+ self.end_top_index = end_top_index
845
+ self.cls_logits = cls_logits
vllm/lib/python3.10/site-packages/transformers/data/processors/utils.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import csv
18
+ import dataclasses
19
+ import json
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Union
22
+
23
+ from ...utils import is_tf_available, is_torch_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ @dataclass
30
+ class InputExample:
31
+ """
32
+ A single training/test example for simple sequence classification.
33
+
34
+ Args:
35
+ guid: Unique id for the example.
36
+ text_a: string. The untokenized text of the first sequence. For single
37
+ sequence tasks, only this sequence must be specified.
38
+ text_b: (Optional) string. The untokenized text of the second sequence.
39
+ Only must be specified for sequence pair tasks.
40
+ label: (Optional) string. The label of the example. This should be
41
+ specified for train and dev examples, but not for test examples.
42
+ """
43
+
44
+ guid: str
45
+ text_a: str
46
+ text_b: Optional[str] = None
47
+ label: Optional[str] = None
48
+
49
+ def to_json_string(self):
50
+ """Serializes this instance to a JSON string."""
51
+ return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class InputFeatures:
56
+ """
57
+ A single set of features of data. Property names are the same names as the corresponding inputs to a model.
58
+
59
+ Args:
60
+ input_ids: Indices of input sequence tokens in the vocabulary.
61
+ attention_mask: Mask to avoid performing attention on padding token indices.
62
+ Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
63
+ tokens.
64
+ token_type_ids: (Optional) Segment token indices to indicate first and second
65
+ portions of the inputs. Only some models use them.
66
+ label: (Optional) Label corresponding to the input. Int for classification problems,
67
+ float for regression problems.
68
+ """
69
+
70
+ input_ids: List[int]
71
+ attention_mask: Optional[List[int]] = None
72
+ token_type_ids: Optional[List[int]] = None
73
+ label: Optional[Union[int, float]] = None
74
+
75
+ def to_json_string(self):
76
+ """Serializes this instance to a JSON string."""
77
+ return json.dumps(dataclasses.asdict(self)) + "\n"
78
+
79
+
80
+ class DataProcessor:
81
+ """Base class for data converters for sequence classification data sets."""
82
+
83
+ def get_example_from_tensor_dict(self, tensor_dict):
84
+ """
85
+ Gets an example from a dict with tensorflow tensors.
86
+
87
+ Args:
88
+ tensor_dict: Keys and values should match the corresponding Glue
89
+ tensorflow_dataset examples.
90
+ """
91
+ raise NotImplementedError()
92
+
93
+ def get_train_examples(self, data_dir):
94
+ """Gets a collection of [`InputExample`] for the train set."""
95
+ raise NotImplementedError()
96
+
97
+ def get_dev_examples(self, data_dir):
98
+ """Gets a collection of [`InputExample`] for the dev set."""
99
+ raise NotImplementedError()
100
+
101
+ def get_test_examples(self, data_dir):
102
+ """Gets a collection of [`InputExample`] for the test set."""
103
+ raise NotImplementedError()
104
+
105
+ def get_labels(self):
106
+ """Gets the list of labels for this data set."""
107
+ raise NotImplementedError()
108
+
109
+ def tfds_map(self, example):
110
+ """
111
+ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
112
+ examples to the correct format.
113
+ """
114
+ if len(self.get_labels()) > 1:
115
+ example.label = self.get_labels()[int(example.label)]
116
+ return example
117
+
118
+ @classmethod
119
+ def _read_tsv(cls, input_file, quotechar=None):
120
+ """Reads a tab separated value file."""
121
+ with open(input_file, "r", encoding="utf-8-sig") as f:
122
+ return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
123
+
124
+
125
+ class SingleSentenceClassificationProcessor(DataProcessor):
126
+ """Generic processor for a single sentence classification data set."""
127
+
128
+ def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
129
+ self.labels = [] if labels is None else labels
130
+ self.examples = [] if examples is None else examples
131
+ self.mode = mode
132
+ self.verbose = verbose
133
+
134
+ def __len__(self):
135
+ return len(self.examples)
136
+
137
+ def __getitem__(self, idx):
138
+ if isinstance(idx, slice):
139
+ return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
140
+ return self.examples[idx]
141
+
142
+ @classmethod
143
+ def create_from_csv(
144
+ cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
145
+ ):
146
+ processor = cls(**kwargs)
147
+ processor.add_examples_from_csv(
148
+ file_name,
149
+ split_name=split_name,
150
+ column_label=column_label,
151
+ column_text=column_text,
152
+ column_id=column_id,
153
+ skip_first_row=skip_first_row,
154
+ overwrite_labels=True,
155
+ overwrite_examples=True,
156
+ )
157
+ return processor
158
+
159
+ @classmethod
160
+ def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
161
+ processor = cls(**kwargs)
162
+ processor.add_examples(texts_or_text_and_labels, labels=labels)
163
+ return processor
164
+
165
+ def add_examples_from_csv(
166
+ self,
167
+ file_name,
168
+ split_name="",
169
+ column_label=0,
170
+ column_text=1,
171
+ column_id=None,
172
+ skip_first_row=False,
173
+ overwrite_labels=False,
174
+ overwrite_examples=False,
175
+ ):
176
+ lines = self._read_tsv(file_name)
177
+ if skip_first_row:
178
+ lines = lines[1:]
179
+ texts = []
180
+ labels = []
181
+ ids = []
182
+ for i, line in enumerate(lines):
183
+ texts.append(line[column_text])
184
+ labels.append(line[column_label])
185
+ if column_id is not None:
186
+ ids.append(line[column_id])
187
+ else:
188
+ guid = f"{split_name}-{i}" if split_name else str(i)
189
+ ids.append(guid)
190
+
191
+ return self.add_examples(
192
+ texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
193
+ )
194
+
195
+ def add_examples(
196
+ self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
197
+ ):
198
+ if labels is not None and len(texts_or_text_and_labels) != len(labels):
199
+ raise ValueError(
200
+ f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
201
+ )
202
+ if ids is not None and len(texts_or_text_and_labels) != len(ids):
203
+ raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
204
+ if ids is None:
205
+ ids = [None] * len(texts_or_text_and_labels)
206
+ if labels is None:
207
+ labels = [None] * len(texts_or_text_and_labels)
208
+ examples = []
209
+ added_labels = set()
210
+ for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
211
+ if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
212
+ text, label = text_or_text_and_label
213
+ else:
214
+ text = text_or_text_and_label
215
+ added_labels.add(label)
216
+ examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
217
+
218
+ # Update examples
219
+ if overwrite_examples:
220
+ self.examples = examples
221
+ else:
222
+ self.examples.extend(examples)
223
+
224
+ # Update labels
225
+ if overwrite_labels:
226
+ self.labels = list(added_labels)
227
+ else:
228
+ self.labels = list(set(self.labels).union(added_labels))
229
+
230
+ return self.examples
231
+
232
+ def get_features(
233
+ self,
234
+ tokenizer,
235
+ max_length=None,
236
+ pad_on_left=False,
237
+ pad_token=0,
238
+ mask_padding_with_zero=True,
239
+ return_tensors=None,
240
+ ):
241
+ """
242
+ Convert examples in a list of `InputFeatures`
243
+
244
+ Args:
245
+ tokenizer: Instance of a tokenizer that will tokenize the examples
246
+ max_length: Maximum example length
247
+ pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
248
+ pad_token: Padding token
249
+ mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
250
+ and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
251
+ values)
252
+
253
+ Returns:
254
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
255
+ task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
256
+ `InputFeatures` which can be fed to the model.
257
+
258
+ """
259
+ if max_length is None:
260
+ max_length = tokenizer.max_len
261
+
262
+ label_map = {label: i for i, label in enumerate(self.labels)}
263
+
264
+ all_input_ids = []
265
+ for ex_index, example in enumerate(self.examples):
266
+ if ex_index % 10000 == 0:
267
+ logger.info(f"Tokenizing example {ex_index}")
268
+
269
+ input_ids = tokenizer.encode(
270
+ example.text_a,
271
+ add_special_tokens=True,
272
+ max_length=min(max_length, tokenizer.max_len),
273
+ )
274
+ all_input_ids.append(input_ids)
275
+
276
+ batch_length = max(len(input_ids) for input_ids in all_input_ids)
277
+
278
+ features = []
279
+ for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
280
+ if ex_index % 10000 == 0:
281
+ logger.info(f"Writing example {ex_index}/{len(self.examples)}")
282
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
283
+ # tokens are attended to.
284
+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
285
+
286
+ # Zero-pad up to the sequence length.
287
+ padding_length = batch_length - len(input_ids)
288
+ if pad_on_left:
289
+ input_ids = ([pad_token] * padding_length) + input_ids
290
+ attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
291
+ else:
292
+ input_ids = input_ids + ([pad_token] * padding_length)
293
+ attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
294
+
295
+ if len(input_ids) != batch_length:
296
+ raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
297
+ if len(attention_mask) != batch_length:
298
+ raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
299
+
300
+ if self.mode == "classification":
301
+ label = label_map[example.label]
302
+ elif self.mode == "regression":
303
+ label = float(example.label)
304
+ else:
305
+ raise ValueError(self.mode)
306
+
307
+ if ex_index < 5 and self.verbose:
308
+ logger.info("*** Example ***")
309
+ logger.info(f"guid: {example.guid}")
310
+ logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
311
+ logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
312
+ logger.info(f"label: {example.label} (id = {label})")
313
+
314
+ features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
315
+
316
+ if return_tensors is None:
317
+ return features
318
+ elif return_tensors == "tf":
319
+ if not is_tf_available():
320
+ raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
321
+ import tensorflow as tf
322
+
323
+ def gen():
324
+ for ex in features:
325
+ yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
326
+
327
+ dataset = tf.data.Dataset.from_generator(
328
+ gen,
329
+ ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
330
+ ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
331
+ )
332
+ return dataset
333
+ elif return_tensors == "pt":
334
+ if not is_torch_available():
335
+ raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
336
+ import torch
337
+ from torch.utils.data import TensorDataset
338
+
339
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
340
+ all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
341
+ if self.mode == "classification":
342
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
343
+ elif self.mode == "regression":
344
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
345
+
346
+ dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
347
+ return dataset
348
+ else:
349
+ raise ValueError("return_tensors should be one of 'tf' or 'pt'")
vllm/lib/python3.10/site-packages/transformers/data/processors/xnli.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """XNLI utils (dataset loading and evaluation)"""
17
+
18
+ import os
19
+
20
+ from ...utils import logging
21
+ from .utils import DataProcessor, InputExample
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class XnliProcessor(DataProcessor):
28
+ """
29
+ Processor for the XNLI dataset. Adapted from
30
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
31
+ """
32
+
33
+ def __init__(self, language, train_language=None):
34
+ self.language = language
35
+ self.train_language = train_language
36
+
37
+ def get_train_examples(self, data_dir):
38
+ """See base class."""
39
+ lg = self.language if self.train_language is None else self.train_language
40
+ lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
41
+ examples = []
42
+ for i, line in enumerate(lines):
43
+ if i == 0:
44
+ continue
45
+ guid = f"train-{i}"
46
+ text_a = line[0]
47
+ text_b = line[1]
48
+ label = "contradiction" if line[2] == "contradictory" else line[2]
49
+ if not isinstance(text_a, str):
50
+ raise TypeError(f"Training input {text_a} is not a string")
51
+ if not isinstance(text_b, str):
52
+ raise TypeError(f"Training input {text_b} is not a string")
53
+ if not isinstance(label, str):
54
+ raise TypeError(f"Training label {label} is not a string")
55
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
56
+ return examples
57
+
58
+ def get_test_examples(self, data_dir):
59
+ """See base class."""
60
+ lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
61
+ examples = []
62
+ for i, line in enumerate(lines):
63
+ if i == 0:
64
+ continue
65
+ language = line[0]
66
+ if language != self.language:
67
+ continue
68
+ guid = f"test-{i}"
69
+ text_a = line[6]
70
+ text_b = line[7]
71
+ label = line[1]
72
+ if not isinstance(text_a, str):
73
+ raise TypeError(f"Training input {text_a} is not a string")
74
+ if not isinstance(text_b, str):
75
+ raise TypeError(f"Training input {text_b} is not a string")
76
+ if not isinstance(label, str):
77
+ raise TypeError(f"Training label {label} is not a string")
78
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
79
+ return examples
80
+
81
+ def get_labels(self):
82
+ """See base class."""
83
+ return ["contradiction", "entailment", "neutral"]
84
+
85
+
86
+ xnli_processors = {
87
+ "xnli": XnliProcessor,
88
+ }
89
+
90
+ xnli_output_modes = {
91
+ "xnli": "classification",
92
+ }
93
+
94
+ xnli_tasks_num_labels = {
95
+ "xnli": 3,
96
+ }