ZTWHHH commited on
Commit
6874fce
·
verified ·
1 Parent(s): 30fed2b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/builtin_types.h +64 -0
  2. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/common_functions.h +65 -0
  3. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/coalesced_reduce.h +95 -0
  4. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/coalesced_scan.h +174 -0
  5. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/invoke.h +189 -0
  6. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/memory.h +135 -0
  7. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/partitioning.h +159 -0
  8. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/host_runtime.h +306 -0
  9. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/math_functions.hpp +0 -0
  10. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/sm_70_rt.hpp +192 -0
  11. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cudaTypedefs.h +0 -0
  12. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_awbarrier.h +280 -0
  13. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_pipeline_primitives.h +148 -0
  14. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_texture_types.h +76 -0
  15. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_activity.h +0 -0
  16. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_driver_cbid.h +767 -0
  17. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_result.h +346 -0
  18. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_sass_metrics.h +436 -0
  19. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/driver_types.h +0 -0
  20. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cudaVDPAU_meta.h +46 -0
  21. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cuda_runtime_api_meta.h +2288 -0
  22. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cuda_vdpau_interop_meta.h +38 -0
  23. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cudart_removed_meta.h +162 -0
  24. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/sm_20_atomic_functions.hpp +92 -0
  25. deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/vector_types.h +449 -0
  26. evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc +0 -0
  27. evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py +119 -0
  28. evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py +181 -0
  29. evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__init__.py +28 -0
  30. evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc +0 -0
  31. evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc +0 -0
  32. evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py +60 -0
  33. evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py +250 -0
  34. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py +88 -0
  35. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc +0 -0
  36. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc +0 -0
  37. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc +0 -0
  38. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc +0 -0
  39. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc +0 -0
  40. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc +0 -0
  41. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc +0 -0
  42. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py +462 -0
  43. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py +134 -0
  44. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py +33 -0
  45. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py +311 -0
  46. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py +1581 -0
  47. evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py +141 -0
  48. evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__init__.py +81 -0
  49. evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  50. evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc +0 -0
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/builtin_types.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*******************************************************************************
51
+ * *
52
+ * *
53
+ * *
54
+ *******************************************************************************/
55
+
56
+ #include "device_types.h"
57
+ #if !defined(__CUDACC_RTC__)
58
+ #define EXCLUDE_FROM_RTC
59
+ #include "driver_types.h"
60
+ #undef EXCLUDE_FROM_RTC
61
+ #endif /* !__CUDACC_RTC__ */
62
+ #include "surface_types.h"
63
+ #include "texture_types.h"
64
+ #include "vector_types.h"
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/common_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/common_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
65
+ #endif
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/coalesced_reduce.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_REDUCE_H_
50
+ #define _CG_COALESCED_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "coalesced_scan.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyVal, typename TyOp, unsigned int TySize, typename ParentT>
63
+ _CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group,
64
+ TyVal&& val,
65
+ TyOp&& op) -> decltype(op(val, val)) {
66
+ auto out = val;
67
+ for (int mask = TySize >> 1; mask > 0; mask >>= 1) {
68
+ out = op(out, group.shfl_xor(out, mask));
69
+ }
70
+
71
+ return out;
72
+ }
73
+
74
+ template <typename TyVal, typename TyOp>
75
+ _CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
76
+ if (group.size() == 32) {
77
+ // Full coalesced group can go through faster path by being treated as a tile of size 32
78
+ auto tile = details::tiled_partition_internal<32, void>();
79
+ return coalesced_reduce(tile, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
80
+ }
81
+ else {
82
+ auto scan_result =
83
+ inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
84
+ unsigned int group_mask = _coalesced_group_data_access::get_mask(group);
85
+ unsigned int last_thread_id = 31 - __clz(group_mask);
86
+ return details::tile::shuffle_dispatch<TyVal>::shfl(
87
+ _CG_STL_NAMESPACE::forward<TyVal>(scan_result), group_mask, last_thread_id, 32);
88
+ }
89
+ }
90
+
91
+ } // details
92
+
93
+ _CG_END_NAMESPACE
94
+
95
+ #endif // _CG_COALESCED_REDUCE_H_
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/coalesced_scan.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_SCAN_H_
50
+ #define _CG_COALESCED_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "functional.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyGroup, typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ auto out = val;
65
+ for (int mask = 1; mask < group.size(); mask <<= 1) {
66
+ auto tmp = group.shfl_up(out, mask);
67
+ if (mask <= group.thread_rank()) {
68
+ out = op(out, tmp);
69
+ }
70
+ }
71
+
72
+ return out;
73
+ }
74
+
75
+ template <typename TyGroup, typename TyVal, typename TyOp>
76
+ _CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
77
+ const unsigned int groupSize = group.size();
78
+ auto out = val;
79
+
80
+ const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
81
+ unsigned int lanemask = details::lanemask32_lt() & mask;
82
+ unsigned int srcLane = details::laneid();
83
+
84
+ const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
85
+ const unsigned int rank = __popc(lanemask);
86
+
87
+ for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
88
+ if (i <= rank) {
89
+ srcLane -= j;
90
+ j = i; /* maximum possible lane */
91
+
92
+ unsigned int begLane = base + rank - i; /* minimum possible lane */
93
+
94
+ /* Next source lane is in the range [ begLane .. srcLane ]
95
+ * If begLane < srcLane then do a binary search.
96
+ */
97
+ while (begLane < srcLane) {
98
+ const unsigned int halfLane = (begLane + srcLane) >> 1;
99
+ const unsigned int halfMask = lanemask >> halfLane;
100
+ const unsigned int d = __popc(halfMask);
101
+ if (d < i) {
102
+ srcLane = halfLane - 1; /* halfLane too large */
103
+ }
104
+ else if ((i < d) || !(halfMask & 0x01)) {
105
+ begLane = halfLane + 1; /* halfLane too small */
106
+ }
107
+ else {
108
+ begLane = srcLane = halfLane; /* happen to hit */
109
+ }
110
+ }
111
+ }
112
+
113
+ auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
114
+ if (i <= rank) {
115
+ out = op(out, tmp);
116
+ }
117
+ }
118
+ return out;
119
+ }
120
+
121
+ template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
122
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
123
+ TyVal&& val,
124
+ TyOp&& op) -> decltype(op(val, val)) {
125
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
126
+ }
127
+
128
+ template <typename TyVal, typename TyOp>
129
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
130
+ if (group.size() == 32) {
131
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
132
+ }
133
+ else {
134
+ return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
135
+ }
136
+ }
137
+
138
+ template <bool IntegralOptimized>
139
+ struct scan_choose_convertion;
140
+
141
+ template<>
142
+ struct scan_choose_convertion<true> {
143
+ template <typename TyGroup, typename TyRes, typename TyVal>
144
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
145
+ return result - val;
146
+ }
147
+ };
148
+
149
+ template<>
150
+ struct scan_choose_convertion<false> {
151
+ template <typename TyGroup, typename TyRes, typename TyVal>
152
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
153
+ auto ret = group.shfl_up(result, 1);
154
+ if (group.thread_rank() == 0) {
155
+ return {};
156
+ }
157
+ else {
158
+ return ret;
159
+ }
160
+ }
161
+ };
162
+
163
+ template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
164
+ _CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
165
+ using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
166
+ && _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
167
+ return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
168
+ }
169
+
170
+ } // details
171
+
172
+ _CG_END_NAMESPACE
173
+
174
+ #endif // _CG_COALESCED_SCAN_H_
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/invoke.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_INVOKE_H
51
+ #define _CG_INVOKE_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename Group>
63
+ struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {};
64
+ #ifdef _CG_HAS_INSTR_ELECT
65
+ template<>
66
+ struct _elect_group_supported<coalesced_group> : _CG_STL_NAMESPACE::true_type {};
67
+ template<unsigned int Size, typename Parent>
68
+ struct _elect_group_supported<thread_block_tile<Size, Parent>> :
69
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size <= 32)> {};
70
+ #endif
71
+
72
+ template <typename Group>
73
+ struct elect_group_supported : public _elect_group_supported<details::remove_qual<Group>> {};
74
+
75
+ template<typename Group>
76
+ _CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) {
77
+ int is_leader = 0;
78
+ #ifdef _CG_HAS_INSTR_ELECT
79
+ asm("{\n\t"
80
+ " .reg .pred p;\n\t"
81
+ " elect.sync %0|p, %2;\n\t"
82
+ " @p mov.s32 %1, 1;\n\t"
83
+ "}"
84
+ : "+r"(leader_lane), "+r"(is_leader) : "r" (mask));
85
+ #endif
86
+ return is_leader;
87
+ }
88
+
89
+ template<bool UseElect>
90
+ struct invoke_one_impl {};
91
+
92
+ template<>
93
+ struct invoke_one_impl<true> {
94
+ template<typename Group, typename Fn, typename... Args>
95
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
96
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
97
+ unsigned int leader_lane = 0;
98
+
99
+ if (elect_one(group, mask, leader_lane)) {
100
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
101
+ }
102
+ }
103
+
104
+ template<typename Group, typename Fn, typename... Args>
105
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
106
+ -> typename _CG_STL_NAMESPACE::remove_reference<
107
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
108
+
109
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
110
+ details::remove_qual<ResultType> result;
111
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
112
+ unsigned int leader_lane = 0;
113
+
114
+ if (elect_one(group, mask, leader_lane)) {
115
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
116
+ }
117
+
118
+ // Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank.
119
+ return tile::shuffle_dispatch<ResultType>::shfl(result, mask, leader_lane, 32);
120
+ }
121
+ };
122
+
123
+ template<>
124
+ struct invoke_one_impl<false> {
125
+ template<typename Group, typename Fn, typename... Args>
126
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
127
+ if (group.thread_rank() == 0) {
128
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
129
+ }
130
+ }
131
+
132
+ template<typename Group, typename Fn, typename... Args>
133
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
134
+ -> typename _CG_STL_NAMESPACE::remove_reference<
135
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
136
+
137
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
138
+ details::remove_qual<ResultType> result;
139
+
140
+ if (group.thread_rank() == 0) {
141
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
142
+ }
143
+
144
+ return group.shfl(result, 0);
145
+ }
146
+ };
147
+
148
+
149
+ }; // namespace details
150
+
151
+ template<typename Group, typename Fn, typename... Args>
152
+ _CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
153
+ using impl = details::invoke_one_impl<details::elect_group_supported<Group>::value>;
154
+ impl::invoke_one(group, _CG_STL_NAMESPACE::forward<Fn>(fn), _CG_STL_NAMESPACE::forward<Args>(args)...);
155
+ }
156
+
157
+ template<typename Fn, typename... Args>
158
+ _CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args)
159
+ -> typename _CG_STL_NAMESPACE::remove_reference<
160
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
161
+
162
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
163
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
164
+ "For invocables returning void invoke_one should be used instead");
165
+ using impl = details::invoke_one_impl<details::elect_group_supported<coalesced_group>::value>;
166
+ return impl::invoke_one_broadcast(group,
167
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
168
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
169
+ }
170
+
171
+ template<unsigned int Size, typename Parent, typename Fn, typename... Args>
172
+ _CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile<Size, Parent>& group, Fn&& fn, Args&&... args)
173
+ -> typename _CG_STL_NAMESPACE::remove_reference<
174
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
175
+
176
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
177
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
178
+ "For invocables returning void invoke_one should be used instead");
179
+ using impl = details::invoke_one_impl<details::elect_group_supported<thread_block_tile<Size, Parent>>::value>;
180
+ return impl::invoke_one_broadcast(group,
181
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
182
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
183
+ }
184
+
185
+ _CG_END_NAMESPACE
186
+
187
+ #endif //_CG_CPP11_FEATURES
188
+
189
+ #endif // _CG_INVOKE_H
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/memory.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMORY_H_
50
+ # define _COOPERATIVE_GROUPS_MEMORY_H_
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+ namespace details {
58
+ _CG_STATIC_CONST_DECL int scratch_num_reserved_bytes = 12;
59
+
60
+ #if defined(_CG_HAS_RESERVED_SHARED)
61
+ _CG_STATIC_QUALIFIER void* reserved_shared_ptr()
62
+ {
63
+ void *ptr;
64
+ asm ("{\n\t"
65
+ " .reg .u32 start;\n\t"
66
+ " .reg .u64 extended;\n\t"
67
+ " mov.u32 start, %%reserved_smem_offset_1;\n\t"
68
+ " cvt.u64.u32 extended, start;\n\t"
69
+ " cvta.shared.u64 %0, extended;\n\t"
70
+ "}"
71
+ : "=" _CG_ASM_PTR_CONSTRAINT(ptr));
72
+ return ptr;
73
+ }
74
+ #endif
75
+
76
+ struct multi_warp_scratch {
77
+ // One barrier per possible size of the group.
78
+ _CG_STATIC_CONST_DECL unsigned int memory_barriers_count = 5;
79
+ _CG_STATIC_CONST_DECL size_t sync_memory_size = memory_barriers_count * sizeof(barrier_t);
80
+
81
+ using communication_type = unsigned long long;
82
+ _CG_STATIC_CONST_DECL size_t communication_size = sizeof(communication_type);
83
+
84
+ // Layout of the scratch space:
85
+ barrier_t barriers[memory_barriers_count];
86
+ char reserved[scratch_num_reserved_bytes]; // Reserve 12 bytes for future use
87
+ communication_type communication_memory[default_max_block_size / 32];
88
+
89
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int scratch_size_needed(unsigned int max_block_size) {
90
+ // One slot of collectives memory per warp.
91
+ return scratch_num_reserved_bytes + sync_memory_size + max_block_size / 32 * communication_size;
92
+ }
93
+
94
+ _CG_QUALIFIER void init_barriers(unsigned int thread_rank) {
95
+ if (thread_rank < memory_barriers_count) {
96
+ barriers[thread_rank] = 0;
97
+ }
98
+ }
99
+ };
100
+
101
+ #if defined(_CG_HAS_RESERVED_SHARED)
102
+ // CG can expect at least 288 bytes available in reserved shared
103
+ static_assert(sizeof(multi_warp_scratch) <= 288, "multi-warp scratch size is too large");
104
+ #endif
105
+
106
+ // Make sure the structure can fit into the user provided memory
107
+ static_assert(sizeof(multi_warp_scratch) <= multi_warp_scratch::scratch_size_needed(default_max_block_size),
108
+ "multi-warp scratch size is too large");
109
+
110
+
111
+ _CG_QUALIFIER multi_warp_scratch* get_scratch_ptr(void* user_scratch) {
112
+ void *ptr;
113
+ #if defined(_CG_HAS_RESERVED_SHARED)
114
+ ptr = reserved_shared_ptr();
115
+ #else
116
+ ptr = user_scratch;
117
+ #endif
118
+ return static_cast<multi_warp_scratch*>(ptr);
119
+
120
+ }
121
+
122
+ }
123
+
124
+ template <unsigned int MaxBlockSize = details::default_max_block_size>
125
+ struct __align__(details::multi_warp_scratch::communication_size) block_tile_memory {
126
+ private:
127
+ #if !defined(_CG_HAS_RESERVED_SHARED)
128
+ char scratch[details::multi_warp_scratch::scratch_size_needed(MaxBlockSize)];
129
+ #endif
130
+ };
131
+ #endif
132
+
133
+ _CG_END_NAMESPACE
134
+
135
+ #endif /* !_COOPERATIVE_GROUPS_MEMORY_H_ */
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cooperative_groups/details/partitioning.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_PARTITIONING_H
51
+ #define _CG_PARTITIONING_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ _CG_BEGIN_NAMESPACE
57
+
58
+ namespace details {
59
+
60
+ template <typename TyGroup>
61
+ _CG_STATIC_QUALIFIER coalesced_group _binary_partition(const TyGroup &tile, bool pred) {
62
+ const unsigned int fullMask = ~0u;
63
+
64
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
65
+ unsigned int predMask = pred ? 0 : fullMask;
66
+ unsigned int setMask = __ballot_sync(thisMask, pred);
67
+
68
+ if (setMask == thisMask || setMask == 0) {
69
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(thisMask);
70
+ _coalesced_group_data_access::modify_meta_group(subTile, 0, 1);
71
+ return subTile;
72
+ }
73
+ else {
74
+ unsigned int subMask = thisMask & (setMask ^ predMask);
75
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
76
+ _coalesced_group_data_access::modify_meta_group(subTile, pred, 2);
77
+ return subTile;
78
+ }
79
+ }
80
+
81
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
82
+ template <typename TyPredicate>
83
+ struct _labeled_partition_dispatch {
84
+ template <typename TyGroup>
85
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, TyPredicate pred) {
86
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
87
+ unsigned int thisBias = __ffs(thisMask) - 1; // Subtract 1 to index properly from [1-32]
88
+ unsigned int subMask = __match_any_sync(thisMask, pred);
89
+
90
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
91
+
92
+ int leaderLaneId = subTile.shfl(details::laneid(), 0);
93
+
94
+ bool isLeader = !subTile.thread_rank();
95
+ unsigned int leaderMask = __ballot_sync(thisMask, isLeader);
96
+ unsigned int tileRank = __fns(leaderMask, leaderLaneId, 0) - thisBias;
97
+
98
+ _coalesced_group_data_access::modify_meta_group(subTile, tileRank, __popc(leaderMask));
99
+
100
+ return subTile;
101
+ }
102
+ };
103
+
104
+ template <>
105
+ struct _labeled_partition_dispatch<bool> {
106
+ template <typename TyGroup>
107
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, bool pred) {
108
+ return _binary_partition(tile, pred);
109
+ }
110
+ };
111
+
112
+ template <typename TyPredicate>
113
+ struct _labeled_partition_dispatch<TyPredicate*> {
114
+ template <typename TyGroup>
115
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, TyPredicate* pred) {
116
+ auto impl = _labeled_partition_dispatch<unsigned long long>();
117
+ return impl(tile, reinterpret_cast<unsigned long long>(pred));
118
+ }
119
+ };
120
+ #endif
121
+ }; // namespace details
122
+
123
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const coalesced_group &tile, bool pred) {
124
+ return details::_binary_partition(tile, pred);
125
+ }
126
+
127
+ template <unsigned int Size, typename ParentT>
128
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const thread_block_tile<Size, ParentT> &tile, bool pred) {
129
+ #ifdef _CG_CPP11_FEATURES
130
+ static_assert(Size <= 32, "Binary partition is available only for tiles of size smaller or equal to 32");
131
+ #endif
132
+ return details::_binary_partition(tile, pred);
133
+ }
134
+
135
+
136
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
137
+ template <typename TyPredicate>
138
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const coalesced_group &tile, TyPredicate pred) {
139
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value ||
140
+ _CG_STL_NAMESPACE::is_pointer<TyPredicate>::value,
141
+ "labeled_partition predicate must be an integral or pointer type");
142
+ auto dispatch = details::_labeled_partition_dispatch<details::remove_qual<TyPredicate>>();
143
+ return dispatch(tile, pred);
144
+ }
145
+
146
+ template <typename TyPredicate, unsigned int Size, typename ParentT>
147
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const thread_block_tile<Size, ParentT> &tile, TyPredicate pred) {
148
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value ||
149
+ _CG_STL_NAMESPACE::is_pointer<TyPredicate>::value,
150
+ "labeled_partition predicate must be an integral or pointer type");
151
+ static_assert(Size <= 32, "Labeled partition is available only for tiles of size smaller or equal to 32");
152
+ auto dispatch = details::_labeled_partition_dispatch<details::remove_qual<TyPredicate>>();
153
+ return dispatch(tile, pred);
154
+ }
155
+ #endif
156
+
157
+ _CG_END_NAMESPACE
158
+
159
+ #endif // _CG_PARTITIONING_H
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/host_runtime.h ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * NVIDIA_COPYRIGHT_BEGIN
3
+ *
4
+ * Copyright (c) 2008-2023, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ * and proprietary rights in and to this software, related documentation
8
+ * and any modifications thereto. Any use, reproduction, disclosure or
9
+ * distribution of this software and related documentation without an express
10
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ *
12
+ * NVIDIA_COPYRIGHT_END
13
+ */
14
+
15
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
16
+ #if defined(_MSC_VER)
17
+ #pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
18
+ #else
19
+ #warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
20
+ #endif
21
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
22
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
23
+ #endif
24
+
25
+ #if !defined(__CUDA_INTERNAL_COMPILATION__)
26
+
27
+ #define __CUDA_INTERNAL_COMPILATION__
28
+ #define __text__
29
+ #define __surf__
30
+ #define __name__shadow_var(c, cpp) \
31
+ #c
32
+ #define __name__text_var(c, cpp) \
33
+ #cpp
34
+ #define __host__shadow_var(c, cpp) \
35
+ cpp
36
+ #define __text_var(c, cpp) \
37
+ cpp
38
+ #define __device_fun(fun) \
39
+ #fun
40
+ #define __device_var(var) \
41
+ #var
42
+ #define __device__text_var(c, cpp) \
43
+ #c
44
+ #define __device__shadow_var(c, cpp) \
45
+ #c
46
+
47
+ #if defined(_WIN32) && !defined(_WIN64)
48
+
49
+ #define __pad__(f) \
50
+ f
51
+
52
+ #else /* _WIN32 && !_WIN64 */
53
+
54
+ #define __pad__(f)
55
+
56
+ #endif /* _WIN32 && !_WIN64 */
57
+
58
+ #include "builtin_types.h"
59
+ #include "storage_class.h"
60
+
61
+ #else /* !__CUDA_INTERNAL_COMPILATION__ */
62
+
63
+ template <typename T>
64
+ static inline T *__cudaAddressOf(T &val)
65
+ {
66
+ return (T *)((void *)(&(const_cast<char &>(reinterpret_cast<const volatile char &>(val)))));
67
+ }
68
+
69
+ #define __cudaRegisterBinary(X) \
70
+ __cudaFatCubinHandle = __cudaRegisterFatBinary((void*)&__fatDeviceText); \
71
+ { void (*callback_fp)(void **) = (void (*)(void **))(X); (*callback_fp)(__cudaFatCubinHandle); __cudaRegisterFatBinaryEnd(__cudaFatCubinHandle); }\
72
+ atexit(__cudaUnregisterBinaryUtil)
73
+
74
+ #define __cudaRegisterVariable(handle, var, ext, size, constant, global) \
75
+ __cudaRegisterVar(handle, (char*)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
76
+ #define __cudaRegisterManagedVariable(handle, var, ext, size, constant, global) \
77
+ __cudaRegisterManagedVar(handle, (void **)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
78
+
79
+ #define __cudaRegisterGlobalTexture(handle, tex, dim, norm, ext) \
80
+ __cudaRegisterTexture(handle, (const struct textureReference*)&tex, (const void**)(void*)__device##tex, __name##tex, dim, norm, ext)
81
+ #define __cudaRegisterGlobalSurface(handle, surf, dim, ext) \
82
+ __cudaRegisterSurface(handle, (const struct surfaceReference*)&surf, (const void**)(void*)__device##surf, __name##surf, dim, ext)
83
+ #define __cudaRegisterEntry(handle, funptr, fun, thread_limit) \
84
+ __cudaRegisterFunction(handle, (const char*)funptr, (char*)__device_fun(fun), #fun, -1, (uint3*)0, (uint3*)0, (dim3*)0, (dim3*)0, (int*)0)
85
+
86
+ extern "C" cudaError_t CUDARTAPI __cudaPopCallConfiguration(
87
+ dim3 *gridDim,
88
+ dim3 *blockDim,
89
+ size_t *sharedMem,
90
+ void *stream
91
+ );
92
+
93
+ #define __cudaLaunchPrologue(size) \
94
+ void * __args_arr[size]; \
95
+ int __args_idx = 0
96
+
97
+ #define __cudaSetupArg(arg, offset) \
98
+ __args_arr[__args_idx] = (void *)__cudaAddressOf(arg); ++__args_idx
99
+
100
+ #define __cudaSetupArgSimple(arg, offset) \
101
+ __args_arr[__args_idx] = (void *)(char *)&arg; ++__args_idx
102
+
103
+ #if defined(__GNUC__)
104
+ #define __NV_ATTR_UNUSED_FOR_LAUNCH __attribute__((unused))
105
+ #else /* !__GNUC__ */
106
+ #define __NV_ATTR_UNUSED_FOR_LAUNCH
107
+ #endif /* __GNUC__ */
108
+
109
+ #ifdef __NV_LEGACY_LAUNCH
110
+ /* the use of __args_idx in the expression below avoids host compiler warning about it being an
111
+ unused variable when the launch has no arguments */
112
+ #define __cudaLaunch(fun) \
113
+ { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \
114
+ dim3 __gridDim, __blockDim;\
115
+ size_t __sharedMem; \
116
+ cudaStream_t __stream; \
117
+ if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \
118
+ return; \
119
+ if (__args_idx == 0) {\
120
+ (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\
121
+ } else { \
122
+ (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\
123
+ }\
124
+ }
125
+ #else /* !__NV_LEGACY_LAUNCH */
126
+ #define __cudaLaunch(fun) \
127
+ { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \
128
+ static cudaKernel_t __handle = 0; \
129
+ volatile static bool __tmp __NV_ATTR_UNUSED_FOR_LAUNCH = (__cudaGetKernel(&__handle, (const void *)fun) == cudaSuccess); \
130
+ dim3 __gridDim, __blockDim;\
131
+ size_t __sharedMem; \
132
+ cudaStream_t __stream; \
133
+ if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \
134
+ return; \
135
+ if (__args_idx == 0) {\
136
+ (void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\
137
+ } else { \
138
+ (void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\
139
+ }\
140
+ }
141
+ #endif /* __NV_LEGACY_LAUNCH */
142
+
143
+ #if defined(__GNUC__)
144
+ #define __nv_dummy_param_ref(param) \
145
+ { volatile static void **__ref __attribute__((unused)); __ref = (volatile void **)param; }
146
+ #else /* __GNUC__ */
147
+ #define __nv_dummy_param_ref(param) \
148
+ { volatile static void **__ref; __ref = (volatile void **)param; }
149
+ #endif /* __GNUC__ */
150
+
151
+ static void ____nv_dummy_param_ref(void *param) __nv_dummy_param_ref(param)
152
+
153
+ #define __REGISTERFUNCNAME_CORE(X) __cudaRegisterLinkedBinary##X
154
+ #define __REGISTERFUNCNAME(X) __REGISTERFUNCNAME_CORE(X)
155
+
156
+ extern "C" {
157
+ void __REGISTERFUNCNAME( __NV_MODULE_ID ) ( void (*)(void **), void *, void *, void (*)(void *));
158
+ }
159
+
160
+ #define __TO_STRING_CORE(X) #X
161
+ #define __TO_STRING(X) __TO_STRING_CORE(X)
162
+
163
+ extern "C" {
164
+ #if defined(_WIN32)
165
+ #pragma data_seg("__nv_module_id")
166
+ static const __declspec(allocate("__nv_module_id")) unsigned char __module_id_str[] = __TO_STRING(__NV_MODULE_ID);
167
+ #pragma data_seg()
168
+ #elif defined(__APPLE__)
169
+ static const unsigned char __module_id_str[] __attribute__((section ("__NV_CUDA,__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
170
+ #else
171
+ static const unsigned char __module_id_str[] __attribute__((section ("__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
172
+ #endif
173
+
174
+ #undef __FATIDNAME_CORE
175
+ #undef __FATIDNAME
176
+ #define __FATIDNAME_CORE(X) __fatbinwrap##X
177
+ #define __FATIDNAME(X) __FATIDNAME_CORE(X)
178
+
179
+ #define ____cudaRegisterLinkedBinary(X) \
180
+ { __REGISTERFUNCNAME(__NV_MODULE_ID) (( void (*)(void **))(X), (void *)&__FATIDNAME(__NV_MODULE_ID), (void *)&__module_id_str, (void (*)(void *))&____nv_dummy_param_ref); }
181
+
182
+ }
183
+
184
+ extern "C" {
185
+ extern void** CUDARTAPI __cudaRegisterFatBinary(
186
+ void *fatCubin
187
+ );
188
+
189
+ extern void CUDARTAPI __cudaRegisterFatBinaryEnd(
190
+ void **fatCubinHandle
191
+ );
192
+
193
+ extern void CUDARTAPI __cudaUnregisterFatBinary(
194
+ void **fatCubinHandle
195
+ );
196
+
197
+ extern void CUDARTAPI __cudaRegisterVar(
198
+ void **fatCubinHandle,
199
+ char *hostVar,
200
+ char *deviceAddress,
201
+ const char *deviceName,
202
+ int ext,
203
+ size_t size,
204
+ int constant,
205
+ int global
206
+ );
207
+
208
+ extern void CUDARTAPI __cudaRegisterManagedVar(
209
+ void **fatCubinHandle,
210
+ void **hostVarPtrAddress,
211
+ char *deviceAddress,
212
+ const char *deviceName,
213
+ int ext,
214
+ size_t size,
215
+ int constant,
216
+ int global
217
+ );
218
+
219
+ extern char CUDARTAPI __cudaInitModule(
220
+ void **fatCubinHandle
221
+ );
222
+
223
+ extern void CUDARTAPI __cudaRegisterTexture(
224
+ void **fatCubinHandle,
225
+ const struct textureReference *hostVar,
226
+ const void **deviceAddress,
227
+ const char *deviceName,
228
+ int dim,
229
+ int norm,
230
+ int ext
231
+ );
232
+
233
+ extern void CUDARTAPI __cudaRegisterSurface(
234
+ void **fatCubinHandle,
235
+ const struct surfaceReference *hostVar,
236
+ const void **deviceAddress,
237
+ const char *deviceName,
238
+ int dim,
239
+ int ext
240
+ );
241
+
242
+ extern void CUDARTAPI __cudaRegisterFunction(
243
+ void **fatCubinHandle,
244
+ const char *hostFun,
245
+ char *deviceFun,
246
+ const char *deviceName,
247
+ int thread_limit,
248
+ uint3 *tid,
249
+ uint3 *bid,
250
+ dim3 *bDim,
251
+ dim3 *gDim,
252
+ int *wSize
253
+ );
254
+
255
+ #if defined(__APPLE__)
256
+ extern "C" int atexit(void (*)(void));
257
+
258
+ #elif defined(__GNUC__) && !defined(__ANDROID__) && !defined(__HORIZON__)
259
+ extern int atexit(void(*)(void)) throw();
260
+
261
+ #elif defined(__HORIZON__)
262
+
263
+ // __TEMP_WAR__ 200132570 HOS : Disable atexit call until it works
264
+ #define atexit(p)
265
+
266
+ #else /* __GNUC__ && !__ANDROID__ */
267
+ extern int __cdecl atexit(void(__cdecl *)(void));
268
+ #endif
269
+
270
+ }
271
+
272
+ static void **__cudaFatCubinHandle;
273
+
274
+ static void __cdecl __cudaUnregisterBinaryUtil(void)
275
+ {
276
+ ____nv_dummy_param_ref((void *)&__cudaFatCubinHandle);
277
+ __cudaUnregisterFatBinary(__cudaFatCubinHandle);
278
+ }
279
+
280
+ static char __nv_init_managed_rt_with_module(void **handle)
281
+ {
282
+ return __cudaInitModule(handle);
283
+ }
284
+
285
+ #include "common_functions.h"
286
+
287
+ #pragma pack()
288
+
289
+ #if defined(_WIN32)
290
+
291
+ #pragma warning(disable: 4099)
292
+
293
+ #if !defined(_WIN64)
294
+
295
+ #pragma warning(disable: 4408)
296
+
297
+ #endif /* !_WIN64 */
298
+
299
+ #endif /* _WIN32 */
300
+
301
+ #endif /* !__CUDA_INTERNAL_COMPILATION__ */
302
+
303
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__)
304
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
305
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
306
+ #endif
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/math_functions.hpp ADDED
The diff for this file is too large to render. See raw diff
 
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/crt/sm_70_rt.hpp ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
58
+ #endif
59
+
60
+ #if !defined(__SM_70_RT_HPP__)
61
+ #define __SM_70_RT_HPP__
62
+
63
+ #if defined(__CUDACC_RTC__)
64
+ #define __SM_70_RT_DECL__ __host__ __device__
65
+ #else /* !__CUDACC_RTC__ */
66
+ #define __SM_70_RT_DECL__ static __device__ __inline__
67
+ #endif /* __CUDACC_RTC__ */
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
72
+
73
+ /*******************************************************************************
74
+ * *
75
+ * *
76
+ * *
77
+ *******************************************************************************/
78
+
79
+ #include "builtin_types.h"
80
+ #include "device_types.h"
81
+ #include "host_defines.h"
82
+
83
+ /*******************************************************************************
84
+ * *
85
+ * Below are implementations of SM-7.0 builtin functions which are included as *
86
+ * source (instead of being built in to the compiler) *
87
+ * *
88
+ *******************************************************************************/
89
+
90
+ //
91
+ // __match_any_sync
92
+ //
93
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) {
94
+ return __match32_any_sync(mask, value);
95
+ }
96
+
97
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) {
98
+ return __match32_any_sync(mask, value);
99
+ }
100
+
101
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) {
102
+ return (sizeof(long) == sizeof(long long)) ?
103
+ __match64_any_sync(mask, (unsigned long long)value):
104
+ __match32_any_sync(mask, (unsigned)value);
105
+ }
106
+
107
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) {
108
+ return (sizeof(long) == sizeof(long long)) ?
109
+ __match64_any_sync(mask, (unsigned long long)value):
110
+ __match32_any_sync(mask, (unsigned)value);
111
+ }
112
+
113
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) {
114
+ return __match64_any_sync(mask, value);
115
+ }
116
+
117
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) {
118
+ return __match64_any_sync(mask, value);
119
+ }
120
+
121
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) {
122
+ return __match32_any_sync(mask, __float_as_uint(value));
123
+ }
124
+
125
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) {
126
+ return __match64_any_sync(mask, __double_as_longlong(value));
127
+ }
128
+
129
+ //
130
+ // __match_all_sync
131
+ //
132
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) {
133
+ return __match32_all_sync(mask, value, pred);
134
+ }
135
+
136
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) {
137
+ return __match32_all_sync(mask, value, pred);
138
+ }
139
+
140
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) {
141
+ return (sizeof(long) == sizeof(long long)) ?
142
+ __match64_all_sync(mask, (unsigned long long)value, pred):
143
+ __match32_all_sync(mask, (unsigned)value, pred);
144
+ }
145
+
146
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) {
147
+ return (sizeof(long) == sizeof(long long)) ?
148
+ __match64_all_sync(mask, (unsigned long long)value, pred):
149
+ __match32_all_sync(mask, (unsigned)value, pred);
150
+ }
151
+
152
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) {
153
+ return __match64_all_sync(mask, value, pred);
154
+ }
155
+
156
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) {
157
+ return __match64_all_sync(mask, value, pred);
158
+ }
159
+
160
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) {
161
+ return __match32_all_sync(mask, __float_as_uint(value), pred);
162
+ }
163
+
164
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) {
165
+ return __match64_all_sync(mask, __double_as_longlong(value), pred);
166
+ }
167
+
168
+ __SM_70_RT_DECL__ void __nanosleep(unsigned int ns) {
169
+ asm volatile("nanosleep.u32 %0;" :: "r"(ns));
170
+ }
171
+
172
+
173
+ extern "C" __device__ __device_builtin__
174
+ unsigned short __usAtomicCAS(unsigned short *, unsigned short, unsigned short);
175
+
176
+ __SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) {
177
+ return __usAtomicCAS(address, compare, val);
178
+ }
179
+
180
+
181
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
182
+
183
+ #endif /* __cplusplus && __CUDACC__ */
184
+
185
+ #undef __SM_70_RT_DECL__
186
+
187
+ #endif /* !__SM_70_RT_HPP__ */
188
+
189
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__)
190
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
191
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
192
+ #endif
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cudaTypedefs.h ADDED
The diff for this file is too large to render. See raw diff
 
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_awbarrier.h ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_H_
51
+ # define _CUDA_AWBARRIER_H_
52
+
53
+ # include "cuda_awbarrier_primitives.h"
54
+
55
+ # if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ # endif
58
+
59
+ # if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER)
60
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
61
+ -std=c++11 compiler option.
62
+ # endif
63
+
64
+ _CUDA_AWBARRIER_BEGIN_NAMESPACE
65
+
66
+ class awbarrier {
67
+ public:
68
+ class arrival_token {
69
+ public:
70
+ arrival_token() = default;
71
+ ~arrival_token() = default;
72
+ _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const;
73
+ private:
74
+ _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token);
75
+ uint64_t token;
76
+ friend awbarrier;
77
+ };
78
+ awbarrier() = default;
79
+ awbarrier(const awbarrier&) = delete;
80
+ awbarrier& operator=(const awbarrier&) = delete;
81
+ ~awbarrier() = default;
82
+
83
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive();
84
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop();
85
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles);
86
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait_parity(bool phase, uint32_t hint_cycles);
87
+ _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token);
88
+ _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait();
89
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait(arrival_token token, uint32_t maxSleepNanosec);
90
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait_parity(bool phase, uint32_t maxSleepNanosec);
91
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max();
92
+
93
+ private:
94
+ uint64_t barrier;
95
+ friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count);
96
+ friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier);
97
+ friend class pipeline;
98
+ };
99
+
100
+ _CUDA_AWBARRIER_QUALIFIER
101
+ uint32_t awbarrier::arrival_token::pending_count() const
102
+ {
103
+ const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token);
104
+ #if (__CUDA_ARCH__ >= 900)
105
+ return pending_count;
106
+ #else
107
+ return (pending_count >> 15);
108
+ #endif
109
+ }
110
+
111
+ _CUDA_AWBARRIER_QUALIFIER
112
+ awbarrier::arrival_token::arrival_token(uint64_t token)
113
+ : token(token)
114
+ {
115
+ }
116
+
117
+ _CUDA_AWBARRIER_QUALIFIER
118
+ void init(awbarrier* barrier, uint32_t expected_count)
119
+ {
120
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
121
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT);
122
+
123
+ #if (__CUDA_ARCH__ >= 900)
124
+ const uint32_t init_count = expected_count;
125
+ #else
126
+ const uint32_t init_count = (expected_count << 15) + expected_count;
127
+ #endif
128
+
129
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count);
130
+ }
131
+
132
+ _CUDA_AWBARRIER_QUALIFIER
133
+ void inval(awbarrier* barrier)
134
+ {
135
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
136
+
137
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier);
138
+ }
139
+
140
+ _CUDA_AWBARRIER_QUALIFIER
141
+ awbarrier::arrival_token awbarrier::arrive()
142
+ {
143
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
144
+
145
+ #if (__CUDA_ARCH__ < 900)
146
+ const uint32_t arrive_count = 1 << 15;
147
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<false>(&this->barrier, arrive_count);
148
+ (void)
149
+ #else
150
+ const uint64_t token =
151
+ #endif
152
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(&this->barrier);
153
+
154
+ return arrival_token(token);
155
+ }
156
+
157
+ _CUDA_AWBARRIER_QUALIFIER
158
+ awbarrier::arrival_token awbarrier::arrive_and_drop()
159
+ {
160
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
161
+
162
+ #if (__CUDA_ARCH__ < 900)
163
+ const uint32_t arrive_count = 1 << 15;
164
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<true>(&this->barrier, arrive_count);
165
+ (void)
166
+ #else
167
+ const uint64_t token =
168
+ #endif
169
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(&this->barrier);
170
+
171
+ return arrival_token(token);
172
+ }
173
+
174
+ _CUDA_AWBARRIER_QUALIFIER
175
+ bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles)
176
+ {
177
+ constexpr uint64_t max_busy_wait_cycles = 1024;
178
+ constexpr uint32_t max_sleep_ns = 1 << 20;
179
+
180
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
181
+
182
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
183
+ return true;
184
+ }
185
+
186
+ uint64_t start_cycles = clock64();
187
+ uint64_t elapsed_cycles = 0;
188
+ uint32_t sleep_ns = 32;
189
+ while (elapsed_cycles < hint_cycles) {
190
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
191
+ return true;
192
+ }
193
+
194
+ if (elapsed_cycles > max_busy_wait_cycles) {
195
+ __nanosleep(sleep_ns);
196
+ if (sleep_ns < max_sleep_ns) {
197
+ sleep_ns *= 2;
198
+ }
199
+ }
200
+
201
+ elapsed_cycles = clock64() - start_cycles;
202
+ }
203
+
204
+ return false;
205
+ }
206
+
207
+ _CUDA_AWBARRIER_QUALIFIER
208
+ bool awbarrier::timed_wait_parity(bool phase, uint32_t hint_cycles)
209
+ {
210
+ constexpr uint64_t max_busy_wait_cycles = 1024;
211
+ constexpr uint32_t max_sleep_ns = 1 << 20;
212
+
213
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
214
+
215
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
216
+ return true;
217
+ }
218
+
219
+ uint64_t start_cycles = clock64();
220
+ uint64_t elapsed_cycles = 0;
221
+ uint32_t sleep_ns = 32;
222
+ while (elapsed_cycles < hint_cycles) {
223
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
224
+ return true;
225
+ }
226
+
227
+ if (elapsed_cycles > max_busy_wait_cycles) {
228
+ __nanosleep(sleep_ns);
229
+ if (sleep_ns < max_sleep_ns) {
230
+ sleep_ns *= 2;
231
+ }
232
+ }
233
+
234
+ elapsed_cycles = clock64() - start_cycles;
235
+ }
236
+
237
+ return false;
238
+ }
239
+
240
+ _CUDA_AWBARRIER_QUALIFIER
241
+ bool awbarrier::try_wait(arrival_token token, uint32_t maxSleepNanosec)
242
+ {
243
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
244
+
245
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(&this->barrier, token.token, maxSleepNanosec);
246
+ }
247
+
248
+ _CUDA_AWBARRIER_QUALIFIER
249
+ bool awbarrier::try_wait_parity(bool phase, uint32_t maxSleepNanosec)
250
+ {
251
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
252
+
253
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(&this->barrier, phase, maxSleepNanosec);
254
+ }
255
+
256
+ _CUDA_AWBARRIER_QUALIFIER
257
+ void awbarrier::wait(arrival_token token)
258
+ {
259
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
260
+
261
+ while (!timed_wait(token, ~0u));
262
+ }
263
+
264
+ _CUDA_AWBARRIER_QUALIFIER
265
+ void awbarrier::arrive_and_wait()
266
+ {
267
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
268
+
269
+ this->wait(this->arrive());
270
+ }
271
+
272
+ _CUDA_AWBARRIER_QUALIFIER __host__
273
+ constexpr uint32_t awbarrier::max()
274
+ {
275
+ return _CUDA_AWBARRIER_MAX_COUNT;
276
+ }
277
+
278
+ _CUDA_AWBARRIER_END_NAMESPACE
279
+
280
+ #endif /* !_CUDA_AWBARRIER_H_ */
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_pipeline_primitives.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_PRIMITIVES_H_
51
+ # define _CUDA_PIPELINE_PRIMITIVES_H_
52
+
53
+ # include "cuda_pipeline_helpers.h"
54
+
55
+ _CUDA_PIPELINE_STATIC_QUALIFIER
56
+ void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align,
57
+ size_t zfill = 0)
58
+ {
59
+ _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16);
60
+ _CUDA_PIPELINE_ASSERT(zfill <= size_and_align);
61
+ _CUDA_PIPELINE_ASSERT(__isShared(dst_shared));
62
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src_global));
63
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst_shared) & (size_and_align - 1)));
64
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src_global) & (size_and_align - 1)));
65
+
66
+ switch (size_and_align) {
67
+ case 16:
68
+ switch (zfill) {
69
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return;
70
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return;
71
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return;
72
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return;
73
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return;
74
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return;
75
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return;
76
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return;
77
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return;
78
+ case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return;
79
+ case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return;
80
+ case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return;
81
+ case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return;
82
+ case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return;
83
+ case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return;
84
+ case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return;
85
+ case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return;
86
+ default: _CUDA_PIPELINE_ABORT(); return;
87
+ }
88
+ case 8:
89
+ switch (zfill) {
90
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return;
91
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return;
92
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return;
93
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return;
94
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return;
95
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return;
96
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return;
97
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return;
98
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return;
99
+ default: _CUDA_PIPELINE_ABORT(); return;
100
+ }
101
+ case 4:
102
+ switch (zfill) {
103
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return;
104
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return;
105
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return;
106
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return;
107
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return;
108
+ default: _CUDA_PIPELINE_ABORT(); return;
109
+ }
110
+ default:
111
+ _CUDA_PIPELINE_ABORT();
112
+ return;
113
+ }
114
+ }
115
+
116
+ _CUDA_PIPELINE_STATIC_QUALIFIER
117
+ void __pipeline_commit()
118
+ {
119
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
120
+ }
121
+
122
+ _CUDA_PIPELINE_STATIC_QUALIFIER
123
+ void __pipeline_wait_prior(size_t prior)
124
+ {
125
+ switch (prior) {
126
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return;
127
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return;
128
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return;
129
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return;
130
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return;
131
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return;
132
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return;
133
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return;
134
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return;
135
+ }
136
+ }
137
+
138
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
139
+ # include "cuda_awbarrier_primitives.h"
140
+
141
+ _CUDA_PIPELINE_STATIC_QUALIFIER
142
+ void __pipeline_arrive_on(__mbarrier_t* barrier)
143
+ {
144
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier);
145
+ }
146
+ # endif
147
+
148
+ #endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cuda_texture_types.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_TEXTURE_TYPES_H__)
51
+ #define __CUDA_TEXTURE_TYPES_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #if !defined(__CUDACC_RTC__)
62
+ #define EXCLUDE_FROM_RTC
63
+ #include "channel_descriptor.h"
64
+ #undef EXCLUDE_FROM_RTC
65
+ #endif /* !__CUDACC_RTC__ */
66
+ #include "cuda_runtime_api.h"
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #endif /* __cplusplus && __CUDACC__ */
75
+
76
+ #endif /* !__CUDA_TEXTURE_TYPES_H__ */
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_activity.h ADDED
The diff for this file is too large to render. See raw diff
 
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_driver_cbid.h ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_driver_api_trace_cbid_enum {
10
+ CUPTI_DRIVER_TRACE_CBID_INVALID = 0,
11
+ CUPTI_DRIVER_TRACE_CBID_cuInit = 1,
12
+ CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2,
13
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3,
14
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4,
15
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5,
16
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6,
17
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7,
18
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8,
19
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9,
20
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10,
21
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11,
22
+ CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12,
23
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13,
24
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14,
25
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15,
26
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16,
27
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17,
28
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18,
29
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19,
30
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20,
31
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21,
32
+ CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22,
33
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23,
34
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24,
35
+ CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25,
36
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26,
37
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27,
38
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28,
39
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29,
40
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30,
41
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31,
42
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32,
43
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33,
44
+ CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34,
45
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35,
46
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36,
47
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37,
48
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38,
49
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39,
50
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40,
51
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41,
52
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42,
53
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43,
54
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44,
55
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45,
56
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46,
57
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47,
58
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48,
59
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49,
60
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50,
61
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51,
62
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52,
63
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53,
64
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54,
65
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55,
66
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56,
67
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57,
68
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58,
69
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59,
70
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60,
71
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61,
72
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62,
73
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63,
74
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64,
75
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65,
76
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66,
77
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67,
78
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68,
79
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69,
80
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70,
81
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71,
82
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72,
83
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73,
84
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74,
85
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75,
86
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76,
87
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77,
88
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78,
89
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79,
90
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80,
91
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81,
92
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82,
93
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83,
94
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84,
95
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85,
96
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86,
97
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87,
98
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88,
99
+ CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89,
100
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90,
101
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91,
102
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92,
103
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93,
104
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94,
105
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95,
106
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96,
107
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97,
108
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98,
109
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99,
110
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100,
111
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101,
112
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102,
113
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103,
114
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104,
115
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105,
116
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106,
117
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107,
118
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108,
119
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109,
120
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110,
121
+ CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111,
122
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112,
123
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113,
124
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114,
125
+ CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115,
126
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116,
127
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117,
128
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118,
129
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119,
130
+ CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120,
131
+ CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121,
132
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122,
133
+ CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123,
134
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124,
135
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125,
136
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126,
137
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127,
138
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128,
139
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129,
140
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130,
141
+ CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131,
142
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132,
143
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133,
144
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134,
145
+ CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135,
146
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136,
147
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137,
148
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138,
149
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139,
150
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140,
151
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141,
152
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142,
153
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143,
154
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144,
155
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145,
156
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146,
157
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147,
158
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148,
159
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149,
160
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150,
161
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151,
162
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152,
163
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153,
164
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154,
165
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155,
166
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156,
167
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157,
168
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158,
169
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159,
170
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160,
171
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161,
172
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162,
173
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163,
174
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164,
175
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165,
176
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166,
177
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167,
178
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168,
179
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169,
180
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170,
181
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171,
182
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172,
183
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173,
184
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174,
185
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175,
186
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176,
187
+ CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177,
188
+ CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178,
189
+ CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179,
190
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180,
191
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181,
192
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182,
193
+ CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183,
194
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184,
195
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185,
196
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186,
197
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187,
198
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188,
199
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189,
200
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190,
201
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191,
202
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192,
203
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193,
204
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194,
205
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195,
206
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196,
207
+ CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197,
208
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198,
209
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199,
210
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200,
211
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201,
212
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202,
213
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203,
214
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204,
215
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205,
216
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206,
217
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207,
218
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208,
219
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209,
220
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210,
221
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211,
222
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212,
223
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213,
224
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214,
225
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215,
226
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216,
227
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217,
228
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218,
229
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219,
230
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220,
231
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221,
232
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222,
233
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223,
234
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224,
235
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225,
236
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226,
237
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227,
238
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228,
239
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229,
240
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230,
241
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231,
242
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232,
243
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233,
244
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234,
245
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235,
246
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236,
247
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237,
248
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238,
249
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239,
250
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240,
251
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241,
252
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242,
253
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243,
254
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244,
255
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245,
256
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246,
257
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247,
258
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248,
259
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249,
260
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250,
261
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251,
262
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252,
263
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253,
264
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254,
265
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255,
266
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256,
267
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257,
268
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258,
269
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259,
270
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260,
271
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261,
272
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262,
273
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263,
274
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264,
275
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265,
276
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266,
277
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267,
278
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268,
279
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269,
280
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270,
281
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271,
282
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272,
283
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273,
284
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274,
285
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275,
286
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276,
287
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277,
288
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278,
289
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279,
290
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280,
291
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281,
292
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282,
293
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283,
294
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284,
295
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285,
296
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286,
297
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287,
298
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288,
299
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289,
300
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290,
301
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291,
302
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292,
303
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293,
304
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294,
305
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295,
306
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296,
307
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297,
308
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298,
309
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299,
310
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300,
311
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301,
312
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302,
313
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303,
314
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304,
315
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305,
316
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306,
317
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307,
318
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308,
319
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309,
320
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310,
321
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311,
322
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312,
323
+ CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313,
324
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314,
325
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315,
326
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316,
327
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317,
328
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318,
329
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319,
330
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320,
331
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321,
332
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322,
333
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323,
334
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324,
335
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325,
336
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326,
337
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327,
338
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328,
339
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329,
340
+ CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330,
341
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331,
342
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332,
343
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333,
344
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334,
345
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335,
346
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336,
347
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337,
348
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338,
349
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339,
350
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340,
351
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341,
352
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342,
353
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343,
354
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344,
355
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345,
356
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346,
357
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347,
358
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348,
359
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349,
360
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350,
361
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351,
362
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352,
363
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353,
364
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354,
365
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355,
366
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356,
367
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357,
368
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358,
369
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359,
370
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360,
371
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361,
372
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362,
373
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363,
374
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364,
375
+ CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365,
376
+ CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366,
377
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367,
378
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368,
379
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369,
380
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370,
381
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371,
382
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372,
383
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373,
384
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374,
385
+ CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375,
386
+ CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376,
387
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377,
388
+ CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378,
389
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379,
390
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380,
391
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381,
392
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382,
393
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383,
394
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384,
395
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385,
396
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386,
397
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387,
398
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388,
399
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389,
400
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390,
401
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391,
402
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392,
403
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393,
404
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394,
405
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395,
406
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396,
407
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397,
408
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398,
409
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399,
410
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400,
411
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401,
412
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402,
413
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403,
414
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404,
415
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405,
416
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406,
417
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407,
418
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408,
419
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409,
420
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410,
421
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411,
422
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412,
423
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413,
424
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414,
425
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415,
426
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416,
427
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417,
428
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418,
429
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419,
430
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420,
431
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421,
432
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422,
433
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423,
434
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424,
435
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425,
436
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426,
437
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427,
438
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428,
439
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429,
440
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430,
441
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431,
442
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432,
443
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433,
444
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434,
445
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435,
446
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436,
447
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437,
448
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438,
449
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439,
450
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440,
451
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441,
452
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442,
453
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443,
454
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444,
455
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445,
456
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446,
457
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447,
458
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448,
459
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449,
460
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450,
461
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451,
462
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452,
463
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453,
464
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454,
465
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455,
466
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456,
467
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457,
468
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458,
469
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459,
470
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460,
471
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461,
472
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462,
473
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463,
474
+ CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464,
475
+ CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465,
476
+ CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466,
477
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467,
478
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468,
479
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469,
480
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470,
481
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471,
482
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472,
483
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473,
484
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474,
485
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475,
486
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476,
487
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477,
488
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478,
489
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479,
490
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480,
491
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481,
492
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482,
493
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483,
494
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484,
495
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485,
496
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486,
497
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487,
498
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488,
499
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489,
500
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490,
501
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491,
502
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492,
503
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493,
504
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494,
505
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495,
506
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496,
507
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497,
508
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498,
509
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499,
510
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500,
511
+ CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501,
512
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502,
513
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503,
514
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504,
515
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505,
516
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506,
517
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507,
518
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508,
519
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509,
520
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510,
521
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511,
522
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512,
523
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513,
524
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514,
525
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515,
526
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516,
527
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517,
528
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518,
529
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519,
530
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520,
531
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521,
532
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522,
533
+ CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523,
534
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524,
535
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525,
536
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526,
537
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527,
538
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528,
539
+ CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529,
540
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530,
541
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531,
542
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532,
543
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533,
544
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534,
545
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535,
546
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536,
547
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537,
548
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538,
549
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539,
550
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540,
551
+ CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541,
552
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542,
553
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543,
554
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544,
555
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545,
556
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546,
557
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547,
558
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548,
559
+ CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549,
560
+ CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550,
561
+ CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551,
562
+ CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552,
563
+ CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553,
564
+ CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554,
565
+ CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555,
566
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556,
567
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557,
568
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558,
569
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559,
570
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560,
571
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561,
572
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562,
573
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563,
574
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564,
575
+ CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565,
576
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566,
577
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567,
578
+ CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568,
579
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569,
580
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570,
581
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571,
582
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572,
583
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573,
584
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574,
585
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575,
586
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576,
587
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577,
588
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578,
589
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579,
590
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580,
591
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581,
592
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582,
593
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583,
594
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584,
595
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585,
596
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586,
597
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587,
598
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588,
599
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589,
600
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590,
601
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591,
602
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592,
603
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593,
604
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594,
605
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595,
606
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596,
607
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597,
608
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598,
609
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599,
610
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600,
611
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601,
612
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602,
613
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603,
614
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604,
615
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605,
616
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606,
617
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607,
618
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608,
619
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609,
620
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610,
621
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611,
622
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612,
623
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613,
624
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614,
625
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615,
626
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616,
627
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617,
628
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618,
629
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619,
630
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620,
631
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621,
632
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622,
633
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623,
634
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624,
635
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625,
636
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626,
637
+ CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627,
638
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628,
639
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629,
640
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630,
641
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631,
642
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632,
643
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633,
644
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634,
645
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635,
646
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636,
647
+ CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637,
648
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638,
649
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639,
650
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640,
651
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641,
652
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642,
653
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643,
654
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644,
655
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645,
656
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646,
657
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647,
658
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648,
659
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649,
660
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650,
661
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651,
662
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652,
663
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653,
664
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654,
665
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655,
666
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656,
667
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657,
668
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658,
669
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659,
670
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660,
671
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661,
672
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662,
673
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663,
674
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664,
675
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665,
676
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666,
677
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667,
678
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668,
679
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669,
680
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670,
681
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671,
682
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672,
683
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673,
684
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674,
685
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675,
686
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676,
687
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677,
688
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678,
689
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679,
690
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680,
691
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681,
692
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682,
693
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683,
694
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684,
695
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685,
696
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686,
697
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687,
698
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688,
699
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689,
700
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690,
701
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691,
702
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692,
703
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693,
704
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694,
705
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695,
706
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696,
707
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697,
708
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698,
709
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699,
710
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700,
711
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701,
712
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702,
713
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703,
714
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704,
715
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705,
716
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706,
717
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707,
718
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708,
719
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709,
720
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710,
721
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711,
722
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode = 712,
723
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetParams = 713,
724
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecNodeSetParams = 714,
725
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise_v2 = 715,
726
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2 = 716,
727
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2_ptsz = 717,
728
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetName = 718,
729
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetName = 719,
730
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph = 720,
731
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph_ptsz = 721,
732
+ CUPTI_DRIVER_TRACE_CBID_cuGraphConditionalHandleCreate = 722,
733
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode_v2 = 723,
734
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges_v2 = 724,
735
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies_v2 = 725,
736
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes_v2 = 726,
737
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies_v2 = 727,
738
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies_v2 = 728,
739
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3 = 729,
740
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3_ptsz = 730,
741
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2 = 731,
742
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2_ptsz = 732,
743
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetParamInfo = 733,
744
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetParamInfo = 734,
745
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceRegisterAsyncNotification = 735,
746
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceUnregisterAsyncNotification = 736,
747
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunctionCount = 737,
748
+ CUPTI_DRIVER_TRACE_CBID_cuModuleEnumerateFunctions = 738,
749
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernelCount = 739,
750
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryEnumerateKernels = 740,
751
+ CUPTI_DRIVER_TRACE_CBID_cuFuncIsLoaded = 741,
752
+ CUPTI_DRIVER_TRACE_CBID_cuFuncLoad = 742,
753
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxCreate = 743,
754
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxDestroy = 744,
755
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDevResource = 745,
756
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevResource = 746,
757
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxGetDevResource = 747,
758
+ CUPTI_DRIVER_TRACE_CBID_cuDevResourceGenerateDesc = 748,
759
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxRecordEvent = 749,
760
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxWaitEvent = 750,
761
+ CUPTI_DRIVER_TRACE_CBID_cuDevSmResourceSplitByCount = 751,
762
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetGreenCtx = 752,
763
+ CUPTI_DRIVER_TRACE_CBID_cuCtxFromGreenCtx = 753,
764
+ CUPTI_DRIVER_TRACE_CBID_SIZE = 754,
765
+ CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff
766
+ } CUpti_driver_api_trace_cbid;
767
+
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_result.h ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_RESULT_H_)
51
+ #define _CUPTI_RESULT_H_
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #if defined(__cplusplus)
62
+ extern "C" {
63
+ #endif
64
+
65
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
66
+ #pragma GCC visibility push(default)
67
+ #endif
68
+
69
+ /**
70
+ * \defgroup CUPTI_RESULT_API CUPTI Result Codes
71
+ * Error and result codes returned by CUPTI functions.
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief CUPTI result codes.
77
+ *
78
+ * Error and result codes returned by CUPTI functions.
79
+ */
80
+ typedef enum {
81
+ /**
82
+ * No error.
83
+ */
84
+ CUPTI_SUCCESS = 0,
85
+ /**
86
+ * One or more of the parameters is invalid.
87
+ */
88
+ CUPTI_ERROR_INVALID_PARAMETER = 1,
89
+ /**
90
+ * The device does not correspond to a valid CUDA device.
91
+ */
92
+ CUPTI_ERROR_INVALID_DEVICE = 2,
93
+ /**
94
+ * The context is NULL or not valid.
95
+ */
96
+ CUPTI_ERROR_INVALID_CONTEXT = 3,
97
+ /**
98
+ * The event domain id is invalid.
99
+ */
100
+ CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = 4,
101
+ /**
102
+ * The event id is invalid.
103
+ */
104
+ CUPTI_ERROR_INVALID_EVENT_ID = 5,
105
+ /**
106
+ * The event name is invalid.
107
+ */
108
+ CUPTI_ERROR_INVALID_EVENT_NAME = 6,
109
+ /**
110
+ * The current operation cannot be performed due to dependency on
111
+ * other factors.
112
+ */
113
+ CUPTI_ERROR_INVALID_OPERATION = 7,
114
+ /**
115
+ * Unable to allocate enough memory to perform the requested
116
+ * operation.
117
+ */
118
+ CUPTI_ERROR_OUT_OF_MEMORY = 8,
119
+ /**
120
+ * An error occurred on the performance monitoring hardware.
121
+ */
122
+ CUPTI_ERROR_HARDWARE = 9,
123
+ /**
124
+ * The output buffer size is not sufficient to return all
125
+ * requested data.
126
+ */
127
+ CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = 10,
128
+ /**
129
+ * API is not implemented.
130
+ */
131
+ CUPTI_ERROR_API_NOT_IMPLEMENTED = 11,
132
+ /**
133
+ * The maximum limit is reached.
134
+ */
135
+ CUPTI_ERROR_MAX_LIMIT_REACHED = 12,
136
+ /**
137
+ * The object is not yet ready to perform the requested operation.
138
+ */
139
+ CUPTI_ERROR_NOT_READY = 13,
140
+ /**
141
+ * The current operation is not compatible with the current state
142
+ * of the object
143
+ */
144
+ CUPTI_ERROR_NOT_COMPATIBLE = 14,
145
+ /**
146
+ * CUPTI is unable to initialize its connection to the CUDA
147
+ * driver.
148
+ */
149
+ CUPTI_ERROR_NOT_INITIALIZED = 15,
150
+ /**
151
+ * The metric id is invalid.
152
+ */
153
+ CUPTI_ERROR_INVALID_METRIC_ID = 16,
154
+ /**
155
+ * The metric name is invalid.
156
+ */
157
+ CUPTI_ERROR_INVALID_METRIC_NAME = 17,
158
+ /**
159
+ * The queue is empty.
160
+ */
161
+ CUPTI_ERROR_QUEUE_EMPTY = 18,
162
+ /**
163
+ * Invalid handle (internal?).
164
+ */
165
+ CUPTI_ERROR_INVALID_HANDLE = 19,
166
+ /**
167
+ * Invalid stream.
168
+ */
169
+ CUPTI_ERROR_INVALID_STREAM = 20,
170
+ /**
171
+ * Invalid kind.
172
+ */
173
+ CUPTI_ERROR_INVALID_KIND = 21,
174
+ /**
175
+ * Invalid event value.
176
+ */
177
+ CUPTI_ERROR_INVALID_EVENT_VALUE = 22,
178
+ /**
179
+ * CUPTI is disabled due to conflicts with other enabled profilers
180
+ */
181
+ CUPTI_ERROR_DISABLED = 23,
182
+ /**
183
+ * Invalid module.
184
+ */
185
+ CUPTI_ERROR_INVALID_MODULE = 24,
186
+ /**
187
+ * Invalid metric value.
188
+ */
189
+ CUPTI_ERROR_INVALID_METRIC_VALUE = 25,
190
+ /**
191
+ * The performance monitoring hardware is in use by other client.
192
+ */
193
+ CUPTI_ERROR_HARDWARE_BUSY = 26,
194
+ /**
195
+ * The attempted operation is not supported on the current
196
+ * system or device.
197
+ */
198
+ CUPTI_ERROR_NOT_SUPPORTED = 27,
199
+ /**
200
+ * Unified memory profiling is not supported on the system.
201
+ * Potential reason could be unsupported OS or architecture.
202
+ */
203
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = 28,
204
+ /**
205
+ * Unified memory profiling is not supported on the device
206
+ */
207
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = 29,
208
+ /**
209
+ * Unified memory profiling is not supported on a multi-GPU
210
+ * configuration without P2P support between any pair of devices
211
+ */
212
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = 30,
213
+ /**
214
+ * Unified memory profiling is not supported under the
215
+ * Multi-Process Service (MPS) environment. CUDA 7.5 removes this
216
+ * restriction.
217
+ */
218
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = 31,
219
+ /**
220
+ * In CUDA 9.0, devices with compute capability 7.0 don't
221
+ * support CDP tracing
222
+ */
223
+ CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = 32,
224
+ /**
225
+ * Profiling on virtualized GPU is not supported.
226
+ */
227
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 33,
228
+ /**
229
+ * Profiling results might be incorrect for CUDA applications
230
+ * compiled with nvcc version older than 9.0 for devices with
231
+ * compute capability 6.0 and 6.1.
232
+ * Profiling session will continue and CUPTI will notify it using this error code.
233
+ * User is advised to recompile the application code with nvcc version 9.0 or later.
234
+ * Ignore this warning if code is already compiled with the recommended nvcc version.
235
+ */
236
+ CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = 34,
237
+ /**
238
+ * User doesn't have sufficient privileges which are required to
239
+ * start the profiling session.
240
+ * One possible reason for this may be that the NVIDIA driver or your system
241
+ * administrator may have restricted access to the NVIDIA GPU performance counters.
242
+ * To learn how to resolve this issue and find more information, please visit
243
+ * https://developer.nvidia.com/CUPTI_ERROR_INSUFFICIENT_PRIVILEGES
244
+ */
245
+ CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = 35,
246
+ /**
247
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
248
+ * metric API from the header cupti_metrics.h are not compatible with the
249
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
250
+ * in the headers nvperf_host.h and nvperf_target.h.
251
+ */
252
+ CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = 36,
253
+ /**
254
+ * Missing definition of the OpenACC API routine in the linked OpenACC library.
255
+ *
256
+ * One possible reason is that OpenACC library is linked statically in the
257
+ * user application, which might not have the definition of all the OpenACC
258
+ * API routines needed for the OpenACC profiling, as compiler might ignore
259
+ * definitions for the functions not used in the application. This issue
260
+ * can be mitigated by linking the OpenACC library dynamically.
261
+ */
262
+ CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = 37,
263
+ /**
264
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
265
+ * metric API from the header cupti_metrics.h are not supported on devices with
266
+ * compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
267
+ * These API will be deprecated in a future CUDA release. These are replaced by
268
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
269
+ * in the headers nvperf_host.h and nvperf_target.h.
270
+ */
271
+ CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = 38,
272
+ /**
273
+ * CUPTI doesn't allow multiple callback subscribers. Only a single subscriber
274
+ * can be registered at a time.
275
+ * Same error code is used when application is launched using NVIDIA tools
276
+ * like nvprof, Visual Profiler, Nsight Systems, Nsight Compute, cuda-gdb and
277
+ * cuda-memcheck.
278
+ */
279
+ CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = 39,
280
+ /**
281
+ * Profiling on virtualized GPU is not allowed by hypervisor.
282
+ */
283
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = 40,
284
+ /**
285
+ * Profiling and tracing are not allowed when confidential computing mode
286
+ * is enabled.
287
+ */
288
+ CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = 41,
289
+ /**
290
+ * CUPTI does not support NVIDIA Crypto Mining Processors (CMP).
291
+ * For more information, please visit https://developer.nvidia.com/ERR_NVCMPGPU
292
+ */
293
+ CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = 42,
294
+ /**
295
+ * An unknown internal error has occurred.
296
+ */
297
+ CUPTI_ERROR_UNKNOWN = 999,
298
+ CUPTI_ERROR_FORCE_INT = 0x7fffffff
299
+ } CUptiResult;
300
+
301
+ /**
302
+ * \brief Get the descriptive string for a CUptiResult.
303
+ *
304
+ * Return the descriptive string for a CUptiResult in \p *str.
305
+ * \note \b Thread-safety: this function is thread safe.
306
+ *
307
+ * \param result The result to get the string for
308
+ * \param str Returns the string
309
+ *
310
+ * \retval CUPTI_SUCCESS on success
311
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
312
+ * result is not a valid CUptiResult
313
+ */
314
+ CUptiResult CUPTIAPI cuptiGetResultString(CUptiResult result, const char **str);
315
+
316
+ /**
317
+ * @brief Get the descriptive message corresponding to error codes returned
318
+ * by CUPTI.
319
+ *
320
+ * Return the descriptive error message for a CUptiResult in \p *str.
321
+ * \note \b Thread-safety: this function is thread safe.
322
+ *
323
+ * \param result The result to get the descriptive error message for
324
+ * \param str Returns the error message string
325
+ *
326
+ * \retval CUPTI_SUCCESS on success
327
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
328
+ * result is not a valid CUptiResult
329
+ *
330
+ */
331
+
332
+ CUptiResult CUPTIAPI cuptiGetErrorMessage(CUptiResult result, const char **str);
333
+
334
+ /** @} */ /* END CUPTI_RESULT_API */
335
+
336
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
337
+ #pragma GCC visibility pop
338
+ #endif
339
+
340
+ #if defined(__cplusplus)
341
+ }
342
+ #endif
343
+
344
+ #endif /*_CUPTI_RESULT_H_*/
345
+
346
+
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/cupti_sass_metrics.h ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_SASS_METRICS_H_)
51
+ #define _CUPTI_SASS_METRICS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <cupti_profiler_target.h>
56
+
57
+ #ifdef __cplusplus
58
+ extern "C" {
59
+ #endif
60
+
61
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
62
+ #pragma GCC visibility push(default)
63
+ #endif
64
+
65
+ /**
66
+ * \defgroup CUPTI_SASS_METRICS_API CUPTI SASS Metrics API
67
+ * Functions, types, and enums that implement the CUPTI SASS Metrics API.
68
+ * @{
69
+ */
70
+
71
+ typedef enum
72
+ {
73
+ /// SASS metric data will be collected at GPU level.
74
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to 1
75
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_GPU = 0,
76
+
77
+ /// SASS metric data will be collected at SM level
78
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SMs in the GPU
79
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SM = 1,
80
+
81
+ /// SASS metric data will be collected at SM sub-partition level
82
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SM sub-partitions in the GPU
83
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SMSP = 2,
84
+
85
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_INVALID
86
+ } CUpti_SassMetrics_OutputGranularity;
87
+
88
+ typedef struct CUpti_SassMetrics_MetricDetails
89
+ {
90
+ /// unique ID for the SASS metric
91
+ uint64_t metricId;
92
+ /// metric name
93
+ const char* pMetricName;
94
+ /// metric description
95
+ const char* pMetricDescription;
96
+ } CUpti_SassMetrics_MetricDetails;
97
+
98
+ /**
99
+ * \brief Params for cuptiSassMetricsGetNumOfMetrics
100
+ */
101
+ typedef struct CUpti_SassMetrics_GetNumOfMetrics_Params
102
+ {
103
+ /// [in] should be equal to CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE
104
+ size_t structSize;
105
+ /// [in] assign to NULL
106
+ void* pPriv;
107
+ /// [in] chip name for which metrics will be queried
108
+ const char* pChipName;
109
+ /// [out] number of metrics supported for the queried chip
110
+ size_t numOfMetrics;
111
+ } CUpti_SassMetrics_GetNumOfMetrics_Params;
112
+
113
+ #define CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetNumOfMetrics_Params, numOfMetrics)
114
+
115
+ /**
116
+ * \brief Get the number of supported SASS metrics for the chip.
117
+ *
118
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetNumOfMetrics_Params
119
+ *
120
+ * \retval CUPTI_SUCCESS
121
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
122
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection
123
+ */
124
+ CUptiResult CUPTIAPI cuptiSassMetricsGetNumOfMetrics(CUpti_SassMetrics_GetNumOfMetrics_Params* pParams);
125
+
126
+ /**
127
+ * \brief Params for cuptiSassMetricsGetMetrics
128
+ */
129
+ typedef struct CUpti_SassMetrics_GetMetrics_Params
130
+ {
131
+ /// [in] should be equal to CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE
132
+ size_t structSize;
133
+ /// [in] assign to NULL
134
+ void* pPriv;
135
+ /// [in] chip name for which metrics will be queried
136
+ const char* pChipName;
137
+ /// [in] number of metrics supported for the queried chip (can be queried using cuptiSassMetricsGetNumOfMetrics())
138
+ size_t numOfMetrics;
139
+ /// [out] list of metrics supported for queried chip
140
+ CUpti_SassMetrics_MetricDetails* pMetricsList;
141
+ } CUpti_SassMetrics_GetMetrics_Params;
142
+ #define CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetMetrics_Params, pMetricsList)
143
+
144
+ /**
145
+ * \brief Get the list of all supported SASS metrics for the chip.
146
+ *
147
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetMetrics_Params
148
+ *
149
+ * \retval CUPTI_SUCCESS
150
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
151
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection
152
+ */
153
+ CUptiResult CUPTIAPI cuptiSassMetricsGetMetrics(CUpti_SassMetrics_GetMetrics_Params* pParams);
154
+
155
+ /**
156
+ * \brief Params for cuptiSassMetricsGetProperties
157
+ */
158
+ typedef struct CUpti_SassMetrics_GetProperties_Params
159
+ {
160
+ /// [in] should be equal to CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE
161
+ size_t structSize;
162
+ /// [in] assign to NULL
163
+ void* pPriv;
164
+ /// [in] chip name for which metric will be queried
165
+ const char* pChipName;
166
+ /// [in] metric name
167
+ const char* pMetricName;
168
+ /// [out] returns the metric ID and the metric description
169
+ CUpti_SassMetrics_MetricDetails metric;
170
+ } CUpti_SassMetrics_GetProperties_Params;
171
+ #define CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetProperties_Params, metric)
172
+
173
+ /**
174
+ * \brief Get metric properties for the queried metric.
175
+ * For a given metric the results will be put in CUpti_SassMetrics_MetricDetails which
176
+ * stores metric ID, description of the metric.
177
+ *
178
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetProperties_Params
179
+ *
180
+ * \retval CUPTI_SUCCESS
181
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
182
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
183
+ */
184
+ CUptiResult CUPTIAPI cuptiSassMetricsGetProperties(CUpti_SassMetrics_GetProperties_Params *pParams);
185
+
186
+ typedef struct CUpti_SassMetrics_Config
187
+ {
188
+ /// [in] unique id for the SASS metric, can be queried using cuptiSassMetricsGetProperties()
189
+ uint64_t metricId;
190
+ /// [in] CUpti_SassMetrics_OutputGranularity
191
+ uint8_t outputGranularity;
192
+ } CUpti_SassMetrics_Config;
193
+
194
+ /**
195
+ * \brief Params for cuptiSassMetricsSetConfig
196
+ */
197
+ typedef struct CUpti_SassMetricsSetConfig_Params
198
+ {
199
+ /// [in] equal to CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE
200
+ size_t structSize;
201
+ /// [in] assign to NULL
202
+ void* pPriv;
203
+ /// [in] num of metric configs, will be equal to number of metrics queried
204
+ size_t numOfMetricConfig;
205
+ /// [in] list of metric config generated for given sass metrics
206
+ CUpti_SassMetrics_Config* pConfigs;
207
+ /// [in] device index for which config will be set, user can call this once for
208
+ /// the device on which the the SASS metric data will be collected
209
+ uint32_t deviceIndex;
210
+ } CUpti_SassMetricsSetConfig_Params;
211
+ #define CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsSetConfig_Params, deviceIndex)
212
+
213
+ /**
214
+ * \brief Set config for the SASS metric data collection for a device.
215
+ * User need to call this API before calling any of the SASS metric data collection APIs.
216
+ * Each set config API call need to be followed by cuptiSassPatchingUnSetConfig API
217
+ * before calling the cuptiSassMetricsSetConfig() API again for the same device.
218
+ *
219
+ * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params
220
+ *
221
+ * \retval CUPTI_SUCCESS
222
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
223
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
224
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling unset config API
225
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
226
+ */
227
+ CUptiResult CUPTIAPI cuptiSassMetricsSetConfig(CUpti_SassMetricsSetConfig_Params *pParams);
228
+
229
+ /**
230
+ * \brief Params for cuptiSassMetricsUnsetConfig
231
+ */
232
+ typedef struct CUpti_SassMetricsUnsetConfig_Params
233
+ {
234
+ /// [in] equal to CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE
235
+ size_t structSize;
236
+ /// [in] assign to NULL
237
+ void* pPriv;
238
+ /// [in] device index for which SASS metric data collection config will get reset, user need to call this API for
239
+ /// all the devices on which the the SASS metric data collection have been configured.
240
+ uint32_t deviceIndex;
241
+ } CUpti_SassMetricsUnsetConfig_Params;
242
+ #define CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsUnsetConfig_Params, deviceIndex)
243
+
244
+ /**
245
+ * \brief Unset config API will reset the SASS metric data collection configuration for the device.
246
+ * Once this API called CUPTI will deallocate all the memory allocated and remove all
247
+ * the configuration for SASS metric data collection. User can only call this API for a device where
248
+ * cuptiSassMetricsSetConfig() API has been called earlier for the device.
249
+ *
250
+ * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params
251
+ *
252
+ * \retval CUPTI_SUCCESS
253
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
254
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
255
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling set config API
256
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
257
+ */
258
+ CUptiResult CUPTIAPI cuptiSassMetricsUnsetConfig(CUpti_SassMetricsUnsetConfig_Params *pParams);
259
+
260
+ /**
261
+ * \brief Params for cuptiSassMetricsEnable
262
+ */
263
+ typedef struct CUpti_SassMetricsEnable_Params
264
+ {
265
+ /// [in] equal to CUpti_SassMetricsEnable_Params_STRUCT_SIZE
266
+ size_t structSize;
267
+ /// [in] assign to NULL
268
+ void* pPriv;
269
+ /// [in] CUDA context on which SASS metric data collection will be enabled.
270
+ /// If set NULL, default context will be consider for SASS metric data collection.
271
+ CUcontext ctx;
272
+ /// [in] if false, all the functions will patched regardless of their execution with cuptiSassMetricsEnable() API call.
273
+ /// when this parameter is set to true, metric data collection for the function will be done at the very first execution in the enable/disble
274
+ /// range.
275
+ uint8_t enableLazyPatching;
276
+ } CUpti_SassMetricsEnable_Params;
277
+ #define CUpti_SassMetricsEnable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsEnable_Params, enableLazyPatching)
278
+
279
+ /**
280
+ * \brief Sass metric data collection enable API will mark the start of a range, between which kernel
281
+ * will be profiled for SASS metrics.
282
+ *
283
+ * \param pParams A pointer to \ref CUpti_SassMetricsEnable_Params
284
+ *
285
+ * \retval CUPTI_SUCCESS
286
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
287
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
288
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
289
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling
290
+ * cuptiSassMetricsDisable() API or called before cuptiSassMetricsSetConfig() API call.
291
+ */
292
+ CUptiResult CUPTIAPI cuptiSassMetricsEnable(CUpti_SassMetricsEnable_Params* pParams);
293
+
294
+ /**
295
+ * \brief Params for cuptiSassMetricsDisable
296
+ */
297
+ typedef struct CUpti_SassMetricsDisable_Params
298
+ {
299
+ /// [in] equal to CUpti_SassMetricsDisable_Params_STRUCT_SIZE
300
+ size_t structSize;
301
+ /// [in] assign to NULL
302
+ void* pPriv;
303
+ /// [in] CUDA context on which SASS metric data collection will be disabled.
304
+ /// If set NULL, default context will be consider for SASS metric data collection.
305
+ CUcontext ctx;
306
+ /// [out] Num of dropped SASS records will be equal to numOfPatchedInstructions * numOfInstances.
307
+ /// Number of dropped records will be zero when data is flushed prior to calling the disable API.
308
+ size_t numOfDroppedRecords;
309
+ } CUpti_SassMetricsDisable_Params;
310
+ #define CUpti_SassMetricsDisable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsDisable_Params, numOfDroppedRecords)
311
+
312
+ /**
313
+ * \brief SASS metric data collection disable API will mark the end of a range, any kernel launched after this
314
+ * API call will not be profiled for the SASS metrics.
315
+ *
316
+ * \param pParams A pointer to \ref CUpti_SassMetricsDisable_Params
317
+ *
318
+ * \retval CUPTI_SUCCESS
319
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
320
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
321
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
322
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling
323
+ * cuptiSassMetricsEnable() API or called before cuptiSassMetricsSetConfig() API call.
324
+ */
325
+ CUptiResult CUPTIAPI cuptiSassMetricsDisable(CUpti_SassMetricsDisable_Params* pParams);
326
+
327
+ /**
328
+ * \brief Params for cuptiSassMetricsGetDataProperties
329
+ */
330
+ typedef struct CUpti_SassMetricsGetDataProperties_Params
331
+ {
332
+ /// [in] equal to CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE
333
+ size_t structSize;
334
+ /// [in] assign to NULL
335
+ void* pPriv;
336
+ /// [in] CUDA context on which SASS metric data collection was enabled.
337
+ /// If set NULL, default context will be consider for SASS metric data collection.
338
+ CUcontext ctx;
339
+ /// [out] total number of SASS records has been collected
340
+ size_t numOfPatchedInstructionRecords;
341
+ /// [out] number of instances for each metric value per instruction.
342
+ /// This will depend on CUpti_SassPatching_OutputGranularity level set for the metric config.
343
+ size_t numOfInstances;
344
+ } CUpti_SassMetricsGetDataProperties_Params;
345
+
346
+ #define CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsGetDataProperties_Params, numOfInstances)
347
+ /**
348
+ * \brief SASS metric data properties API will give the data regarding number of instances of a metric
349
+ * value and number of SASS instruction data has been collected. The number of instances of a metric
350
+ * will vary as per user set the output granularity level with CUpti_SassMetrics_OutputGranularity value.
351
+ * User need to allocate memory for retriving the SASS data using cuptiSassMetricsFlushData() API.
352
+ *
353
+ * \param pParams A pointer to \ref CUpti_SassMetricsGetDataProperties_Params
354
+ *
355
+ * \retval CUPTI_SUCCESS
356
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
357
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
358
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range.
359
+ */
360
+ CUptiResult CUPTIAPI cuptiSassMetricsGetDataProperties(CUpti_SassMetricsGetDataProperties_Params* pParams);
361
+
362
+ typedef struct CUpti_SassMetrics_InstanceValue
363
+ {
364
+ // unique id of the metric
365
+ uint64_t metricId;
366
+ // metric value
367
+ uint64_t value;
368
+ } CUpti_SassMetrics_InstanceValue;
369
+ #define CUpti_SassMetrics_InstanceValue_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_InstanceValue, value)
370
+
371
+ typedef struct CUpti_SassMetrics_Data
372
+ {
373
+ /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE
374
+ size_t structSize;
375
+ /// [in] assign to NULL
376
+ void* pPriv;
377
+ /// [out] Unique cubin id
378
+ uint32_t cubinCrc;
379
+ /// [out] function's unique symbol index in the module.
380
+ uint32_t functionIndex;
381
+ /// [out] The function name
382
+ const char* functionName;
383
+ /// [out] pc offset for the function in a module
384
+ uint32_t pcOffset;
385
+ /// [out] array of size equal to number of instances per metric, which contains the metric ID and metric value.
386
+ CUpti_SassMetrics_InstanceValue* pInstanceValues;
387
+ } CUpti_SassMetrics_Data;
388
+
389
+ /**
390
+ * \brief Params for cuptiSassMetricsFlushData
391
+ */
392
+ typedef struct CUpti_SassMetricsFlushData_Params
393
+ {
394
+ /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE
395
+ size_t structSize;
396
+ /// [in] assign to NULL
397
+ void* pPriv;
398
+ /// [in] CUDA context on which SASS metric data collection was enabled.
399
+ /// If set NULL, default context will be consider for SASS metric data collection.
400
+ CUcontext ctx;
401
+ /// [in] number of patched instruction record will be retrived, user can call cuptiSassMetricsGetDataProperties()
402
+ /// for getting total number of records available.
403
+ size_t numOfPatchedInstructionRecords;
404
+ /// [in] number of patched instruction record instances for a metric, user can call cuptiSassMetricsGetDataProperties()
405
+ /// for getting total number of instances for each record per metric available.
406
+ size_t numOfInstances;
407
+ /// [out]
408
+ CUpti_SassMetrics_Data* pMetricsData;
409
+ } CUpti_SassMetricsFlushData_Params;
410
+ #define CUpti_SassMetricsFlushData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsFlushData_Params, numOfInstances)
411
+
412
+ /**
413
+ * \brief Flush SASS metrics data from CUPTI internal buffer to the user buffer.
414
+ * User needs to allocate the buffer for retrieving the data. The number of records collected
415
+ * can be queried using the API cuptiSassMetricsGetDataProperties().
416
+ *
417
+ * \param pParams A pointer to \ref CUpti_SassMetricsFlushData_Params
418
+ *
419
+ * \retval CUPTI_SUCCESS
420
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
421
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection.
422
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range.
423
+ */
424
+ CUptiResult CUPTIAPI cuptiSassMetricsFlushData(CUpti_SassMetricsFlushData_Params* pParams);
425
+
426
+ /** @} */ /* END CUPTI_SASS_METRICS_API */
427
+
428
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
429
+ #pragma GCC visibility pop
430
+ #endif
431
+
432
+ #ifdef __cplusplus
433
+ } /* extern "C" */
434
+ #endif
435
+
436
+ #endif // _CUPTI_SASS_METRICS_H_
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/driver_types.h ADDED
The diff for this file is too large to render. See raw diff
 
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cudaVDPAU_meta.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // Dependent includes
4
+ #include <vdpau/vdpau.h>
5
+
6
+ // CUDA public interface, for type definitions and cu* function prototypes
7
+ #include "cudaVDPAU.h"
8
+
9
+
10
+ // *************************************************************************
11
+ // Definitions of structs to hold parameters for each function
12
+ // *************************************************************************
13
+
14
+ typedef struct cuVDPAUGetDevice_params_st {
15
+ CUdevice *pDevice;
16
+ VdpDevice vdpDevice;
17
+ VdpGetProcAddress *vdpGetProcAddress;
18
+ } cuVDPAUGetDevice_params;
19
+
20
+ typedef struct cuVDPAUCtxCreate_v2_params_st {
21
+ CUcontext *pCtx;
22
+ unsigned int flags;
23
+ CUdevice device;
24
+ VdpDevice vdpDevice;
25
+ VdpGetProcAddress *vdpGetProcAddress;
26
+ } cuVDPAUCtxCreate_v2_params;
27
+
28
+ typedef struct cuGraphicsVDPAURegisterVideoSurface_params_st {
29
+ CUgraphicsResource *pCudaResource;
30
+ VdpVideoSurface vdpSurface;
31
+ unsigned int flags;
32
+ } cuGraphicsVDPAURegisterVideoSurface_params;
33
+
34
+ typedef struct cuGraphicsVDPAURegisterOutputSurface_params_st {
35
+ CUgraphicsResource *pCudaResource;
36
+ VdpOutputSurface vdpSurface;
37
+ unsigned int flags;
38
+ } cuGraphicsVDPAURegisterOutputSurface_params;
39
+
40
+ typedef struct cuVDPAUCtxCreate_params_st {
41
+ CUcontext *pCtx;
42
+ unsigned int flags;
43
+ CUdevice device;
44
+ VdpDevice vdpDevice;
45
+ VdpGetProcAddress *vdpGetProcAddress;
46
+ } cuVDPAUCtxCreate_params;
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cuda_runtime_api_meta.h ADDED
@@ -0,0 +1,2288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_runtime_api.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaDeviceSetLimit_v3020_params_st {
12
+ enum cudaLimit limit;
13
+ size_t value;
14
+ } cudaDeviceSetLimit_v3020_params;
15
+
16
+ typedef struct cudaDeviceGetLimit_v3020_params_st {
17
+ size_t *pValue;
18
+ enum cudaLimit limit;
19
+ } cudaDeviceGetLimit_v3020_params;
20
+
21
+ typedef struct cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st {
22
+ size_t *maxWidthInElements;
23
+ const struct cudaChannelFormatDesc *fmtDesc;
24
+ int device;
25
+ } cudaDeviceGetTexture1DLinearMaxWidth_v11010_params;
26
+
27
+ typedef struct cudaDeviceGetCacheConfig_v3020_params_st {
28
+ enum cudaFuncCache *pCacheConfig;
29
+ } cudaDeviceGetCacheConfig_v3020_params;
30
+
31
+ typedef struct cudaDeviceGetStreamPriorityRange_v5050_params_st {
32
+ int *leastPriority;
33
+ int *greatestPriority;
34
+ } cudaDeviceGetStreamPriorityRange_v5050_params;
35
+
36
+ typedef struct cudaDeviceSetCacheConfig_v3020_params_st {
37
+ enum cudaFuncCache cacheConfig;
38
+ } cudaDeviceSetCacheConfig_v3020_params;
39
+
40
+ typedef struct cudaDeviceGetByPCIBusId_v4010_params_st {
41
+ int *device;
42
+ const char *pciBusId;
43
+ } cudaDeviceGetByPCIBusId_v4010_params;
44
+
45
+ typedef struct cudaDeviceGetPCIBusId_v4010_params_st {
46
+ char *pciBusId;
47
+ int len;
48
+ int device;
49
+ } cudaDeviceGetPCIBusId_v4010_params;
50
+
51
+ typedef struct cudaIpcGetEventHandle_v4010_params_st {
52
+ cudaIpcEventHandle_t *handle;
53
+ cudaEvent_t event;
54
+ } cudaIpcGetEventHandle_v4010_params;
55
+
56
+ typedef struct cudaIpcOpenEventHandle_v4010_params_st {
57
+ cudaEvent_t *event;
58
+ cudaIpcEventHandle_t handle;
59
+ } cudaIpcOpenEventHandle_v4010_params;
60
+
61
+ typedef struct cudaIpcGetMemHandle_v4010_params_st {
62
+ cudaIpcMemHandle_t *handle;
63
+ void *devPtr;
64
+ } cudaIpcGetMemHandle_v4010_params;
65
+
66
+ typedef struct cudaIpcOpenMemHandle_v4010_params_st {
67
+ void **devPtr;
68
+ cudaIpcMemHandle_t handle;
69
+ unsigned int flags;
70
+ } cudaIpcOpenMemHandle_v4010_params;
71
+
72
+ typedef struct cudaIpcCloseMemHandle_v4010_params_st {
73
+ void *devPtr;
74
+ } cudaIpcCloseMemHandle_v4010_params;
75
+
76
+ typedef struct cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st {
77
+ enum cudaFlushGPUDirectRDMAWritesTarget target;
78
+ enum cudaFlushGPUDirectRDMAWritesScope scope;
79
+ } cudaDeviceFlushGPUDirectRDMAWrites_v11030_params;
80
+
81
+ typedef struct cudaDeviceGetSharedMemConfig_v4020_params_st {
82
+ enum cudaSharedMemConfig *pConfig;
83
+ } cudaDeviceGetSharedMemConfig_v4020_params;
84
+
85
+ typedef struct cudaDeviceSetSharedMemConfig_v4020_params_st {
86
+ enum cudaSharedMemConfig config;
87
+ } cudaDeviceSetSharedMemConfig_v4020_params;
88
+
89
+ typedef struct cudaGetErrorName_v6050_params_st {
90
+ cudaError_t error;
91
+ } cudaGetErrorName_v6050_params;
92
+
93
+ typedef struct cudaGetErrorString_v3020_params_st {
94
+ cudaError_t error;
95
+ } cudaGetErrorString_v3020_params;
96
+
97
+ typedef struct cudaGetDeviceCount_v3020_params_st {
98
+ int *count;
99
+ } cudaGetDeviceCount_v3020_params;
100
+
101
+ typedef struct cudaGetDeviceProperties_v2_v12000_params_st {
102
+ struct cudaDeviceProp *prop;
103
+ int device;
104
+ } cudaGetDeviceProperties_v2_v12000_params;
105
+
106
+ typedef struct cudaDeviceGetAttribute_v5000_params_st {
107
+ int *value;
108
+ enum cudaDeviceAttr attr;
109
+ int device;
110
+ } cudaDeviceGetAttribute_v5000_params;
111
+
112
+ typedef struct cudaDeviceGetDefaultMemPool_v11020_params_st {
113
+ cudaMemPool_t *memPool;
114
+ int device;
115
+ } cudaDeviceGetDefaultMemPool_v11020_params;
116
+
117
+ typedef struct cudaDeviceSetMemPool_v11020_params_st {
118
+ int device;
119
+ cudaMemPool_t memPool;
120
+ } cudaDeviceSetMemPool_v11020_params;
121
+
122
+ typedef struct cudaDeviceGetMemPool_v11020_params_st {
123
+ cudaMemPool_t *memPool;
124
+ int device;
125
+ } cudaDeviceGetMemPool_v11020_params;
126
+
127
+ typedef struct cudaDeviceGetNvSciSyncAttributes_v10020_params_st {
128
+ void *nvSciSyncAttrList;
129
+ int device;
130
+ int flags;
131
+ } cudaDeviceGetNvSciSyncAttributes_v10020_params;
132
+
133
+ typedef struct cudaDeviceGetP2PAttribute_v8000_params_st {
134
+ int *value;
135
+ enum cudaDeviceP2PAttr attr;
136
+ int srcDevice;
137
+ int dstDevice;
138
+ } cudaDeviceGetP2PAttribute_v8000_params;
139
+
140
+ typedef struct cudaChooseDevice_v3020_params_st {
141
+ int *device;
142
+ const struct cudaDeviceProp *prop;
143
+ } cudaChooseDevice_v3020_params;
144
+
145
+ typedef struct cudaInitDevice_v12000_params_st {
146
+ int device;
147
+ unsigned int deviceFlags;
148
+ unsigned int flags;
149
+ } cudaInitDevice_v12000_params;
150
+
151
+ typedef struct cudaSetDevice_v3020_params_st {
152
+ int device;
153
+ } cudaSetDevice_v3020_params;
154
+
155
+ typedef struct cudaGetDevice_v3020_params_st {
156
+ int *device;
157
+ } cudaGetDevice_v3020_params;
158
+
159
+ typedef struct cudaSetValidDevices_v3020_params_st {
160
+ int *device_arr;
161
+ int len;
162
+ } cudaSetValidDevices_v3020_params;
163
+
164
+ typedef struct cudaSetDeviceFlags_v3020_params_st {
165
+ unsigned int flags;
166
+ } cudaSetDeviceFlags_v3020_params;
167
+
168
+ typedef struct cudaGetDeviceFlags_v7000_params_st {
169
+ unsigned int *flags;
170
+ } cudaGetDeviceFlags_v7000_params;
171
+
172
+ typedef struct cudaStreamCreate_v3020_params_st {
173
+ cudaStream_t *pStream;
174
+ } cudaStreamCreate_v3020_params;
175
+
176
+ typedef struct cudaStreamCreateWithFlags_v5000_params_st {
177
+ cudaStream_t *pStream;
178
+ unsigned int flags;
179
+ } cudaStreamCreateWithFlags_v5000_params;
180
+
181
+ typedef struct cudaStreamCreateWithPriority_v5050_params_st {
182
+ cudaStream_t *pStream;
183
+ unsigned int flags;
184
+ int priority;
185
+ } cudaStreamCreateWithPriority_v5050_params;
186
+
187
+ typedef struct cudaStreamGetPriority_ptsz_v7000_params_st {
188
+ cudaStream_t hStream;
189
+ int *priority;
190
+ } cudaStreamGetPriority_ptsz_v7000_params;
191
+
192
+ typedef struct cudaStreamGetFlags_ptsz_v7000_params_st {
193
+ cudaStream_t hStream;
194
+ unsigned int *flags;
195
+ } cudaStreamGetFlags_ptsz_v7000_params;
196
+
197
+ typedef struct cudaStreamGetId_ptsz_v12000_params_st {
198
+ cudaStream_t hStream;
199
+ unsigned long long *streamId;
200
+ } cudaStreamGetId_ptsz_v12000_params;
201
+
202
+ typedef struct cudaStreamCopyAttributes_ptsz_v11000_params_st {
203
+ cudaStream_t dst;
204
+ cudaStream_t src;
205
+ } cudaStreamCopyAttributes_ptsz_v11000_params;
206
+
207
+ typedef struct cudaStreamGetAttribute_ptsz_v11000_params_st {
208
+ cudaStream_t hStream;
209
+ cudaStreamAttrID attr;
210
+ cudaStreamAttrValue *value_out;
211
+ } cudaStreamGetAttribute_ptsz_v11000_params;
212
+
213
+ typedef struct cudaStreamSetAttribute_ptsz_v11000_params_st {
214
+ cudaStream_t hStream;
215
+ cudaStreamAttrID attr;
216
+ const cudaStreamAttrValue *value;
217
+ } cudaStreamSetAttribute_ptsz_v11000_params;
218
+
219
+ typedef struct cudaStreamDestroy_v5050_params_st {
220
+ cudaStream_t stream;
221
+ } cudaStreamDestroy_v5050_params;
222
+
223
+ typedef struct cudaStreamWaitEvent_ptsz_v7000_params_st {
224
+ cudaStream_t stream;
225
+ cudaEvent_t event;
226
+ unsigned int flags;
227
+ } cudaStreamWaitEvent_ptsz_v7000_params;
228
+
229
+ typedef struct cudaStreamAddCallback_ptsz_v7000_params_st {
230
+ cudaStream_t stream;
231
+ cudaStreamCallback_t callback;
232
+ void *userData;
233
+ unsigned int flags;
234
+ } cudaStreamAddCallback_ptsz_v7000_params;
235
+
236
+ typedef struct cudaStreamSynchronize_ptsz_v7000_params_st {
237
+ cudaStream_t stream;
238
+ } cudaStreamSynchronize_ptsz_v7000_params;
239
+
240
+ typedef struct cudaStreamQuery_ptsz_v7000_params_st {
241
+ cudaStream_t stream;
242
+ } cudaStreamQuery_ptsz_v7000_params;
243
+
244
+ typedef struct cudaStreamAttachMemAsync_ptsz_v7000_params_st {
245
+ cudaStream_t stream;
246
+ void *devPtr;
247
+ size_t length;
248
+ unsigned int flags;
249
+ } cudaStreamAttachMemAsync_ptsz_v7000_params;
250
+
251
+ typedef struct cudaStreamBeginCapture_ptsz_v10000_params_st {
252
+ cudaStream_t stream;
253
+ enum cudaStreamCaptureMode mode;
254
+ } cudaStreamBeginCapture_ptsz_v10000_params;
255
+
256
+ typedef struct cudaStreamBeginCaptureToGraph_ptsz_v12030_params_st {
257
+ cudaStream_t stream;
258
+ cudaGraph_t graph;
259
+ const cudaGraphNode_t *dependencies;
260
+ const cudaGraphEdgeData *dependencyData;
261
+ size_t numDependencies;
262
+ enum cudaStreamCaptureMode mode;
263
+ } cudaStreamBeginCaptureToGraph_ptsz_v12030_params;
264
+
265
+ typedef struct cudaThreadExchangeStreamCaptureMode_v10010_params_st {
266
+ enum cudaStreamCaptureMode *mode;
267
+ } cudaThreadExchangeStreamCaptureMode_v10010_params;
268
+
269
+ typedef struct cudaStreamEndCapture_ptsz_v10000_params_st {
270
+ cudaStream_t stream;
271
+ cudaGraph_t *pGraph;
272
+ } cudaStreamEndCapture_ptsz_v10000_params;
273
+
274
+ typedef struct cudaStreamIsCapturing_ptsz_v10000_params_st {
275
+ cudaStream_t stream;
276
+ enum cudaStreamCaptureStatus *pCaptureStatus;
277
+ } cudaStreamIsCapturing_ptsz_v10000_params;
278
+
279
+ typedef struct cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st {
280
+ cudaStream_t stream;
281
+ enum cudaStreamCaptureStatus *captureStatus_out;
282
+ unsigned long long *id_out;
283
+ cudaGraph_t *graph_out;
284
+ const cudaGraphNode_t **dependencies_out;
285
+ size_t *numDependencies_out;
286
+ } cudaStreamGetCaptureInfo_v2_ptsz_v11030_params;
287
+
288
+ typedef struct cudaStreamGetCaptureInfo_v3_ptsz_v12030_params_st {
289
+ cudaStream_t stream;
290
+ enum cudaStreamCaptureStatus *captureStatus_out;
291
+ unsigned long long *id_out;
292
+ cudaGraph_t *graph_out;
293
+ const cudaGraphNode_t **dependencies_out;
294
+ const cudaGraphEdgeData **edgeData_out;
295
+ size_t *numDependencies_out;
296
+ } cudaStreamGetCaptureInfo_v3_ptsz_v12030_params;
297
+
298
+ typedef struct cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st {
299
+ cudaStream_t stream;
300
+ cudaGraphNode_t *dependencies;
301
+ size_t numDependencies;
302
+ unsigned int flags;
303
+ } cudaStreamUpdateCaptureDependencies_ptsz_v11030_params;
304
+
305
+ typedef struct cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params_st {
306
+ cudaStream_t stream;
307
+ cudaGraphNode_t *dependencies;
308
+ const cudaGraphEdgeData *dependencyData;
309
+ size_t numDependencies;
310
+ unsigned int flags;
311
+ } cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params;
312
+
313
+ typedef struct cudaEventCreate_v3020_params_st {
314
+ cudaEvent_t *event;
315
+ } cudaEventCreate_v3020_params;
316
+
317
+ typedef struct cudaEventCreateWithFlags_v3020_params_st {
318
+ cudaEvent_t *event;
319
+ unsigned int flags;
320
+ } cudaEventCreateWithFlags_v3020_params;
321
+
322
+ typedef struct cudaEventRecord_ptsz_v7000_params_st {
323
+ cudaEvent_t event;
324
+ cudaStream_t stream;
325
+ } cudaEventRecord_ptsz_v7000_params;
326
+
327
+ typedef struct cudaEventRecordWithFlags_ptsz_v11010_params_st {
328
+ cudaEvent_t event;
329
+ cudaStream_t stream;
330
+ unsigned int flags;
331
+ } cudaEventRecordWithFlags_ptsz_v11010_params;
332
+
333
+ typedef struct cudaEventQuery_v3020_params_st {
334
+ cudaEvent_t event;
335
+ } cudaEventQuery_v3020_params;
336
+
337
+ typedef struct cudaEventSynchronize_v3020_params_st {
338
+ cudaEvent_t event;
339
+ } cudaEventSynchronize_v3020_params;
340
+
341
+ typedef struct cudaEventDestroy_v3020_params_st {
342
+ cudaEvent_t event;
343
+ } cudaEventDestroy_v3020_params;
344
+
345
+ typedef struct cudaEventElapsedTime_v3020_params_st {
346
+ float *ms;
347
+ cudaEvent_t start;
348
+ cudaEvent_t end;
349
+ } cudaEventElapsedTime_v3020_params;
350
+
351
+ typedef struct cudaImportExternalMemory_v10000_params_st {
352
+ cudaExternalMemory_t *extMem_out;
353
+ const struct cudaExternalMemoryHandleDesc *memHandleDesc;
354
+ } cudaImportExternalMemory_v10000_params;
355
+
356
+ typedef struct cudaExternalMemoryGetMappedBuffer_v10000_params_st {
357
+ void **devPtr;
358
+ cudaExternalMemory_t extMem;
359
+ const struct cudaExternalMemoryBufferDesc *bufferDesc;
360
+ } cudaExternalMemoryGetMappedBuffer_v10000_params;
361
+
362
+ typedef struct cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st {
363
+ cudaMipmappedArray_t *mipmap;
364
+ cudaExternalMemory_t extMem;
365
+ const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc;
366
+ } cudaExternalMemoryGetMappedMipmappedArray_v10000_params;
367
+
368
+ typedef struct cudaDestroyExternalMemory_v10000_params_st {
369
+ cudaExternalMemory_t extMem;
370
+ } cudaDestroyExternalMemory_v10000_params;
371
+
372
+ typedef struct cudaImportExternalSemaphore_v10000_params_st {
373
+ cudaExternalSemaphore_t *extSem_out;
374
+ const struct cudaExternalSemaphoreHandleDesc *semHandleDesc;
375
+ } cudaImportExternalSemaphore_v10000_params;
376
+
377
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
378
+ const cudaExternalSemaphore_t *extSemArray;
379
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
380
+ unsigned int numExtSems;
381
+ cudaStream_t stream;
382
+ } cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params;
383
+
384
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
385
+ const cudaExternalSemaphore_t *extSemArray;
386
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
387
+ unsigned int numExtSems;
388
+ cudaStream_t stream;
389
+ } cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params;
390
+
391
+ typedef struct cudaDestroyExternalSemaphore_v10000_params_st {
392
+ cudaExternalSemaphore_t extSem;
393
+ } cudaDestroyExternalSemaphore_v10000_params;
394
+
395
+ typedef struct cudaLaunchKernel_ptsz_v7000_params_st {
396
+ const void *func;
397
+ dim3 gridDim;
398
+ dim3 blockDim;
399
+ void **args;
400
+ size_t sharedMem;
401
+ cudaStream_t stream;
402
+ } cudaLaunchKernel_ptsz_v7000_params;
403
+
404
+ typedef struct cudaLaunchKernelExC_ptsz_v11060_params_st {
405
+ const cudaLaunchConfig_t *config;
406
+ const void *func;
407
+ void **args;
408
+ } cudaLaunchKernelExC_ptsz_v11060_params;
409
+
410
+ typedef struct cudaLaunchCooperativeKernel_ptsz_v9000_params_st {
411
+ const void *func;
412
+ dim3 gridDim;
413
+ dim3 blockDim;
414
+ void **args;
415
+ size_t sharedMem;
416
+ cudaStream_t stream;
417
+ } cudaLaunchCooperativeKernel_ptsz_v9000_params;
418
+
419
+ typedef struct cudaLaunchCooperativeKernelMultiDevice_v9000_params_st {
420
+ struct cudaLaunchParams *launchParamsList;
421
+ unsigned int numDevices;
422
+ unsigned int flags;
423
+ } cudaLaunchCooperativeKernelMultiDevice_v9000_params;
424
+
425
+ typedef struct cudaFuncSetCacheConfig_v3020_params_st {
426
+ const void *func;
427
+ enum cudaFuncCache cacheConfig;
428
+ } cudaFuncSetCacheConfig_v3020_params;
429
+
430
+ typedef struct cudaFuncGetAttributes_v3020_params_st {
431
+ struct cudaFuncAttributes *attr;
432
+ const void *func;
433
+ } cudaFuncGetAttributes_v3020_params;
434
+
435
+ typedef struct cudaFuncSetAttribute_v9000_params_st {
436
+ const void *func;
437
+ enum cudaFuncAttribute attr;
438
+ int value;
439
+ } cudaFuncSetAttribute_v9000_params;
440
+
441
+ typedef struct cudaFuncGetName_v12030_params_st {
442
+ const char **name;
443
+ const void *func;
444
+ } cudaFuncGetName_v12030_params;
445
+
446
+ typedef struct cudaFuncGetParamInfo_v12040_params_st {
447
+ const void *func;
448
+ size_t paramIndex;
449
+ size_t *paramOffset;
450
+ size_t *paramSize;
451
+ } cudaFuncGetParamInfo_v12040_params;
452
+
453
+ typedef struct cudaLaunchHostFunc_ptsz_v10000_params_st {
454
+ cudaStream_t stream;
455
+ cudaHostFn_t fn;
456
+ void *userData;
457
+ } cudaLaunchHostFunc_ptsz_v10000_params;
458
+
459
+ typedef struct cudaFuncSetSharedMemConfig_v4020_params_st {
460
+ const void *func;
461
+ enum cudaSharedMemConfig config;
462
+ } cudaFuncSetSharedMemConfig_v4020_params;
463
+
464
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st {
465
+ int *numBlocks;
466
+ const void *func;
467
+ int blockSize;
468
+ size_t dynamicSMemSize;
469
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params;
470
+
471
+ typedef struct cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st {
472
+ size_t *dynamicSmemSize;
473
+ const void *func;
474
+ int numBlocks;
475
+ int blockSize;
476
+ } cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params;
477
+
478
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st {
479
+ int *numBlocks;
480
+ const void *func;
481
+ int blockSize;
482
+ size_t dynamicSMemSize;
483
+ unsigned int flags;
484
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params;
485
+
486
+ typedef struct cudaOccupancyMaxPotentialClusterSize_v11070_params_st {
487
+ int *clusterSize;
488
+ const void *func;
489
+ const cudaLaunchConfig_t *launchConfig;
490
+ } cudaOccupancyMaxPotentialClusterSize_v11070_params;
491
+
492
+ typedef struct cudaOccupancyMaxActiveClusters_v11070_params_st {
493
+ int *numClusters;
494
+ const void *func;
495
+ const cudaLaunchConfig_t *launchConfig;
496
+ } cudaOccupancyMaxActiveClusters_v11070_params;
497
+
498
+ typedef struct cudaMallocManaged_v6000_params_st {
499
+ void **devPtr;
500
+ size_t size;
501
+ unsigned int flags;
502
+ } cudaMallocManaged_v6000_params;
503
+
504
+ typedef struct cudaMalloc_v3020_params_st {
505
+ void **devPtr;
506
+ size_t size;
507
+ } cudaMalloc_v3020_params;
508
+
509
+ typedef struct cudaMallocHost_v3020_params_st {
510
+ void **ptr;
511
+ size_t size;
512
+ } cudaMallocHost_v3020_params;
513
+
514
+ typedef struct cudaMallocPitch_v3020_params_st {
515
+ void **devPtr;
516
+ size_t *pitch;
517
+ size_t width;
518
+ size_t height;
519
+ } cudaMallocPitch_v3020_params;
520
+
521
+ typedef struct cudaMallocArray_v3020_params_st {
522
+ cudaArray_t *array;
523
+ const struct cudaChannelFormatDesc *desc;
524
+ size_t width;
525
+ size_t height;
526
+ unsigned int flags;
527
+ } cudaMallocArray_v3020_params;
528
+
529
+ typedef struct cudaFree_v3020_params_st {
530
+ void *devPtr;
531
+ } cudaFree_v3020_params;
532
+
533
+ typedef struct cudaFreeHost_v3020_params_st {
534
+ void *ptr;
535
+ } cudaFreeHost_v3020_params;
536
+
537
+ typedef struct cudaFreeArray_v3020_params_st {
538
+ cudaArray_t array;
539
+ } cudaFreeArray_v3020_params;
540
+
541
+ typedef struct cudaFreeMipmappedArray_v5000_params_st {
542
+ cudaMipmappedArray_t mipmappedArray;
543
+ } cudaFreeMipmappedArray_v5000_params;
544
+
545
+ typedef struct cudaHostAlloc_v3020_params_st {
546
+ void **pHost;
547
+ size_t size;
548
+ unsigned int flags;
549
+ } cudaHostAlloc_v3020_params;
550
+
551
+ typedef struct cudaHostRegister_v4000_params_st {
552
+ void *ptr;
553
+ size_t size;
554
+ unsigned int flags;
555
+ } cudaHostRegister_v4000_params;
556
+
557
+ typedef struct cudaHostUnregister_v4000_params_st {
558
+ void *ptr;
559
+ } cudaHostUnregister_v4000_params;
560
+
561
+ typedef struct cudaHostGetDevicePointer_v3020_params_st {
562
+ void **pDevice;
563
+ void *pHost;
564
+ unsigned int flags;
565
+ } cudaHostGetDevicePointer_v3020_params;
566
+
567
+ typedef struct cudaHostGetFlags_v3020_params_st {
568
+ unsigned int *pFlags;
569
+ void *pHost;
570
+ } cudaHostGetFlags_v3020_params;
571
+
572
+ typedef struct cudaMalloc3D_v3020_params_st {
573
+ struct cudaPitchedPtr *pitchedDevPtr;
574
+ struct cudaExtent extent;
575
+ } cudaMalloc3D_v3020_params;
576
+
577
+ typedef struct cudaMalloc3DArray_v3020_params_st {
578
+ cudaArray_t *array;
579
+ const struct cudaChannelFormatDesc *desc;
580
+ struct cudaExtent extent;
581
+ unsigned int flags;
582
+ } cudaMalloc3DArray_v3020_params;
583
+
584
+ typedef struct cudaMallocMipmappedArray_v5000_params_st {
585
+ cudaMipmappedArray_t *mipmappedArray;
586
+ const struct cudaChannelFormatDesc *desc;
587
+ struct cudaExtent extent;
588
+ unsigned int numLevels;
589
+ unsigned int flags;
590
+ } cudaMallocMipmappedArray_v5000_params;
591
+
592
+ typedef struct cudaGetMipmappedArrayLevel_v5000_params_st {
593
+ cudaArray_t *levelArray;
594
+ cudaMipmappedArray_const_t mipmappedArray;
595
+ unsigned int level;
596
+ } cudaGetMipmappedArrayLevel_v5000_params;
597
+
598
+ typedef struct cudaMemcpy3D_ptds_v7000_params_st {
599
+ const struct cudaMemcpy3DParms *p;
600
+ } cudaMemcpy3D_ptds_v7000_params;
601
+
602
+ typedef struct cudaMemcpy3DPeer_ptds_v7000_params_st {
603
+ const struct cudaMemcpy3DPeerParms *p;
604
+ } cudaMemcpy3DPeer_ptds_v7000_params;
605
+
606
+ typedef struct cudaMemcpy3DAsync_ptsz_v7000_params_st {
607
+ const struct cudaMemcpy3DParms *p;
608
+ cudaStream_t stream;
609
+ } cudaMemcpy3DAsync_ptsz_v7000_params;
610
+
611
+ typedef struct cudaMemcpy3DPeerAsync_ptsz_v7000_params_st {
612
+ const struct cudaMemcpy3DPeerParms *p;
613
+ cudaStream_t stream;
614
+ } cudaMemcpy3DPeerAsync_ptsz_v7000_params;
615
+
616
+ typedef struct cudaMemGetInfo_v3020_params_st {
617
+ size_t *free;
618
+ size_t *total;
619
+ } cudaMemGetInfo_v3020_params;
620
+
621
+ typedef struct cudaArrayGetInfo_v4010_params_st {
622
+ struct cudaChannelFormatDesc *desc;
623
+ struct cudaExtent *extent;
624
+ unsigned int *flags;
625
+ cudaArray_t array;
626
+ } cudaArrayGetInfo_v4010_params;
627
+
628
+ typedef struct cudaArrayGetPlane_v11020_params_st {
629
+ cudaArray_t *pPlaneArray;
630
+ cudaArray_t hArray;
631
+ unsigned int planeIdx;
632
+ } cudaArrayGetPlane_v11020_params;
633
+
634
+ typedef struct cudaArrayGetMemoryRequirements_v11060_params_st {
635
+ struct cudaArrayMemoryRequirements *memoryRequirements;
636
+ cudaArray_t array;
637
+ int device;
638
+ } cudaArrayGetMemoryRequirements_v11060_params;
639
+
640
+ typedef struct cudaMipmappedArrayGetMemoryRequirements_v11060_params_st {
641
+ struct cudaArrayMemoryRequirements *memoryRequirements;
642
+ cudaMipmappedArray_t mipmap;
643
+ int device;
644
+ } cudaMipmappedArrayGetMemoryRequirements_v11060_params;
645
+
646
+ typedef struct cudaArrayGetSparseProperties_v11010_params_st {
647
+ struct cudaArraySparseProperties *sparseProperties;
648
+ cudaArray_t array;
649
+ } cudaArrayGetSparseProperties_v11010_params;
650
+
651
+ typedef struct cudaMipmappedArrayGetSparseProperties_v11010_params_st {
652
+ struct cudaArraySparseProperties *sparseProperties;
653
+ cudaMipmappedArray_t mipmap;
654
+ } cudaMipmappedArrayGetSparseProperties_v11010_params;
655
+
656
+ typedef struct cudaMemcpy_ptds_v7000_params_st {
657
+ void *dst;
658
+ const void *src;
659
+ size_t count;
660
+ enum cudaMemcpyKind kind;
661
+ } cudaMemcpy_ptds_v7000_params;
662
+
663
+ typedef struct cudaMemcpyPeer_v4000_params_st {
664
+ void *dst;
665
+ int dstDevice;
666
+ const void *src;
667
+ int srcDevice;
668
+ size_t count;
669
+ } cudaMemcpyPeer_v4000_params;
670
+
671
+ typedef struct cudaMemcpy2D_ptds_v7000_params_st {
672
+ void *dst;
673
+ size_t dpitch;
674
+ const void *src;
675
+ size_t spitch;
676
+ size_t width;
677
+ size_t height;
678
+ enum cudaMemcpyKind kind;
679
+ } cudaMemcpy2D_ptds_v7000_params;
680
+
681
+ typedef struct cudaMemcpy2DToArray_ptds_v7000_params_st {
682
+ cudaArray_t dst;
683
+ size_t wOffset;
684
+ size_t hOffset;
685
+ const void *src;
686
+ size_t spitch;
687
+ size_t width;
688
+ size_t height;
689
+ enum cudaMemcpyKind kind;
690
+ } cudaMemcpy2DToArray_ptds_v7000_params;
691
+
692
+ typedef struct cudaMemcpy2DFromArray_ptds_v7000_params_st {
693
+ void *dst;
694
+ size_t dpitch;
695
+ cudaArray_const_t src;
696
+ size_t wOffset;
697
+ size_t hOffset;
698
+ size_t width;
699
+ size_t height;
700
+ enum cudaMemcpyKind kind;
701
+ } cudaMemcpy2DFromArray_ptds_v7000_params;
702
+
703
+ typedef struct cudaMemcpy2DArrayToArray_ptds_v7000_params_st {
704
+ cudaArray_t dst;
705
+ size_t wOffsetDst;
706
+ size_t hOffsetDst;
707
+ cudaArray_const_t src;
708
+ size_t wOffsetSrc;
709
+ size_t hOffsetSrc;
710
+ size_t width;
711
+ size_t height;
712
+ enum cudaMemcpyKind kind;
713
+ } cudaMemcpy2DArrayToArray_ptds_v7000_params;
714
+
715
+ typedef struct cudaMemcpyToSymbol_ptds_v7000_params_st {
716
+ const void *symbol;
717
+ const void *src;
718
+ size_t count;
719
+ size_t offset;
720
+ enum cudaMemcpyKind kind;
721
+ } cudaMemcpyToSymbol_ptds_v7000_params;
722
+
723
+ typedef struct cudaMemcpyFromSymbol_ptds_v7000_params_st {
724
+ void *dst;
725
+ const void *symbol;
726
+ size_t count;
727
+ size_t offset;
728
+ enum cudaMemcpyKind kind;
729
+ } cudaMemcpyFromSymbol_ptds_v7000_params;
730
+
731
+ typedef struct cudaMemcpyAsync_ptsz_v7000_params_st {
732
+ void *dst;
733
+ const void *src;
734
+ size_t count;
735
+ enum cudaMemcpyKind kind;
736
+ cudaStream_t stream;
737
+ } cudaMemcpyAsync_ptsz_v7000_params;
738
+
739
+ typedef struct cudaMemcpyPeerAsync_v4000_params_st {
740
+ void *dst;
741
+ int dstDevice;
742
+ const void *src;
743
+ int srcDevice;
744
+ size_t count;
745
+ cudaStream_t stream;
746
+ } cudaMemcpyPeerAsync_v4000_params;
747
+
748
+ typedef struct cudaMemcpy2DAsync_ptsz_v7000_params_st {
749
+ void *dst;
750
+ size_t dpitch;
751
+ const void *src;
752
+ size_t spitch;
753
+ size_t width;
754
+ size_t height;
755
+ enum cudaMemcpyKind kind;
756
+ cudaStream_t stream;
757
+ } cudaMemcpy2DAsync_ptsz_v7000_params;
758
+
759
+ typedef struct cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st {
760
+ cudaArray_t dst;
761
+ size_t wOffset;
762
+ size_t hOffset;
763
+ const void *src;
764
+ size_t spitch;
765
+ size_t width;
766
+ size_t height;
767
+ enum cudaMemcpyKind kind;
768
+ cudaStream_t stream;
769
+ } cudaMemcpy2DToArrayAsync_ptsz_v7000_params;
770
+
771
+ typedef struct cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st {
772
+ void *dst;
773
+ size_t dpitch;
774
+ cudaArray_const_t src;
775
+ size_t wOffset;
776
+ size_t hOffset;
777
+ size_t width;
778
+ size_t height;
779
+ enum cudaMemcpyKind kind;
780
+ cudaStream_t stream;
781
+ } cudaMemcpy2DFromArrayAsync_ptsz_v7000_params;
782
+
783
+ typedef struct cudaMemcpyToSymbolAsync_ptsz_v7000_params_st {
784
+ const void *symbol;
785
+ const void *src;
786
+ size_t count;
787
+ size_t offset;
788
+ enum cudaMemcpyKind kind;
789
+ cudaStream_t stream;
790
+ } cudaMemcpyToSymbolAsync_ptsz_v7000_params;
791
+
792
+ typedef struct cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st {
793
+ void *dst;
794
+ const void *symbol;
795
+ size_t count;
796
+ size_t offset;
797
+ enum cudaMemcpyKind kind;
798
+ cudaStream_t stream;
799
+ } cudaMemcpyFromSymbolAsync_ptsz_v7000_params;
800
+
801
+ typedef struct cudaMemset_ptds_v7000_params_st {
802
+ void *devPtr;
803
+ int value;
804
+ size_t count;
805
+ } cudaMemset_ptds_v7000_params;
806
+
807
+ typedef struct cudaMemset2D_ptds_v7000_params_st {
808
+ void *devPtr;
809
+ size_t pitch;
810
+ int value;
811
+ size_t width;
812
+ size_t height;
813
+ } cudaMemset2D_ptds_v7000_params;
814
+
815
+ typedef struct cudaMemset3D_ptds_v7000_params_st {
816
+ struct cudaPitchedPtr pitchedDevPtr;
817
+ int value;
818
+ struct cudaExtent extent;
819
+ } cudaMemset3D_ptds_v7000_params;
820
+
821
+ typedef struct cudaMemsetAsync_ptsz_v7000_params_st {
822
+ void *devPtr;
823
+ int value;
824
+ size_t count;
825
+ cudaStream_t stream;
826
+ } cudaMemsetAsync_ptsz_v7000_params;
827
+
828
+ typedef struct cudaMemset2DAsync_ptsz_v7000_params_st {
829
+ void *devPtr;
830
+ size_t pitch;
831
+ int value;
832
+ size_t width;
833
+ size_t height;
834
+ cudaStream_t stream;
835
+ } cudaMemset2DAsync_ptsz_v7000_params;
836
+
837
+ typedef struct cudaMemset3DAsync_ptsz_v7000_params_st {
838
+ struct cudaPitchedPtr pitchedDevPtr;
839
+ int value;
840
+ struct cudaExtent extent;
841
+ cudaStream_t stream;
842
+ } cudaMemset3DAsync_ptsz_v7000_params;
843
+
844
+ typedef struct cudaGetSymbolAddress_v3020_params_st {
845
+ void **devPtr;
846
+ const void *symbol;
847
+ } cudaGetSymbolAddress_v3020_params;
848
+
849
+ typedef struct cudaGetSymbolSize_v3020_params_st {
850
+ size_t *size;
851
+ const void *symbol;
852
+ } cudaGetSymbolSize_v3020_params;
853
+
854
+ typedef struct cudaMemPrefetchAsync_ptsz_v8000_params_st {
855
+ const void *devPtr;
856
+ size_t count;
857
+ int dstDevice;
858
+ cudaStream_t stream;
859
+ } cudaMemPrefetchAsync_ptsz_v8000_params;
860
+
861
+ typedef struct cudaMemPrefetchAsync_v2_ptsz_v12020_params_st {
862
+ const void *devPtr;
863
+ size_t count;
864
+ struct cudaMemLocation location;
865
+ unsigned int flags;
866
+ cudaStream_t stream;
867
+ } cudaMemPrefetchAsync_v2_ptsz_v12020_params;
868
+
869
+ typedef struct cudaMemAdvise_v8000_params_st {
870
+ const void *devPtr;
871
+ size_t count;
872
+ enum cudaMemoryAdvise advice;
873
+ int device;
874
+ } cudaMemAdvise_v8000_params;
875
+
876
+ typedef struct cudaMemAdvise_v2_v12020_params_st {
877
+ const void *devPtr;
878
+ size_t count;
879
+ enum cudaMemoryAdvise advice;
880
+ struct cudaMemLocation location;
881
+ } cudaMemAdvise_v2_v12020_params;
882
+
883
+ typedef struct cudaMemRangeGetAttribute_v8000_params_st {
884
+ void *data;
885
+ size_t dataSize;
886
+ enum cudaMemRangeAttribute attribute;
887
+ const void *devPtr;
888
+ size_t count;
889
+ } cudaMemRangeGetAttribute_v8000_params;
890
+
891
+ typedef struct cudaMemRangeGetAttributes_v8000_params_st {
892
+ void **data;
893
+ size_t *dataSizes;
894
+ enum cudaMemRangeAttribute *attributes;
895
+ size_t numAttributes;
896
+ const void *devPtr;
897
+ size_t count;
898
+ } cudaMemRangeGetAttributes_v8000_params;
899
+
900
+ typedef struct cudaMemcpyToArray_ptds_v7000_params_st {
901
+ cudaArray_t dst;
902
+ size_t wOffset;
903
+ size_t hOffset;
904
+ const void *src;
905
+ size_t count;
906
+ enum cudaMemcpyKind kind;
907
+ } cudaMemcpyToArray_ptds_v7000_params;
908
+
909
+ typedef struct cudaMemcpyFromArray_ptds_v7000_params_st {
910
+ void *dst;
911
+ cudaArray_const_t src;
912
+ size_t wOffset;
913
+ size_t hOffset;
914
+ size_t count;
915
+ enum cudaMemcpyKind kind;
916
+ } cudaMemcpyFromArray_ptds_v7000_params;
917
+
918
+ typedef struct cudaMemcpyArrayToArray_ptds_v7000_params_st {
919
+ cudaArray_t dst;
920
+ size_t wOffsetDst;
921
+ size_t hOffsetDst;
922
+ cudaArray_const_t src;
923
+ size_t wOffsetSrc;
924
+ size_t hOffsetSrc;
925
+ size_t count;
926
+ enum cudaMemcpyKind kind;
927
+ } cudaMemcpyArrayToArray_ptds_v7000_params;
928
+
929
+ typedef struct cudaMemcpyToArrayAsync_ptsz_v7000_params_st {
930
+ cudaArray_t dst;
931
+ size_t wOffset;
932
+ size_t hOffset;
933
+ const void *src;
934
+ size_t count;
935
+ enum cudaMemcpyKind kind;
936
+ cudaStream_t stream;
937
+ } cudaMemcpyToArrayAsync_ptsz_v7000_params;
938
+
939
+ typedef struct cudaMemcpyFromArrayAsync_ptsz_v7000_params_st {
940
+ void *dst;
941
+ cudaArray_const_t src;
942
+ size_t wOffset;
943
+ size_t hOffset;
944
+ size_t count;
945
+ enum cudaMemcpyKind kind;
946
+ cudaStream_t stream;
947
+ } cudaMemcpyFromArrayAsync_ptsz_v7000_params;
948
+
949
+ typedef struct cudaMallocAsync_ptsz_v11020_params_st {
950
+ void **devPtr;
951
+ size_t size;
952
+ cudaStream_t hStream;
953
+ } cudaMallocAsync_ptsz_v11020_params;
954
+
955
+ typedef struct cudaFreeAsync_ptsz_v11020_params_st {
956
+ void *devPtr;
957
+ cudaStream_t hStream;
958
+ } cudaFreeAsync_ptsz_v11020_params;
959
+
960
+ typedef struct cudaMemPoolTrimTo_v11020_params_st {
961
+ cudaMemPool_t memPool;
962
+ size_t minBytesToKeep;
963
+ } cudaMemPoolTrimTo_v11020_params;
964
+
965
+ typedef struct cudaMemPoolSetAttribute_v11020_params_st {
966
+ cudaMemPool_t memPool;
967
+ enum cudaMemPoolAttr attr;
968
+ void *value;
969
+ } cudaMemPoolSetAttribute_v11020_params;
970
+
971
+ typedef struct cudaMemPoolGetAttribute_v11020_params_st {
972
+ cudaMemPool_t memPool;
973
+ enum cudaMemPoolAttr attr;
974
+ void *value;
975
+ } cudaMemPoolGetAttribute_v11020_params;
976
+
977
+ typedef struct cudaMemPoolSetAccess_v11020_params_st {
978
+ cudaMemPool_t memPool;
979
+ const struct cudaMemAccessDesc *descList;
980
+ size_t count;
981
+ } cudaMemPoolSetAccess_v11020_params;
982
+
983
+ typedef struct cudaMemPoolGetAccess_v11020_params_st {
984
+ enum cudaMemAccessFlags *flags;
985
+ cudaMemPool_t memPool;
986
+ struct cudaMemLocation *location;
987
+ } cudaMemPoolGetAccess_v11020_params;
988
+
989
+ typedef struct cudaMemPoolCreate_v11020_params_st {
990
+ cudaMemPool_t *memPool;
991
+ const struct cudaMemPoolProps *poolProps;
992
+ } cudaMemPoolCreate_v11020_params;
993
+
994
+ typedef struct cudaMemPoolDestroy_v11020_params_st {
995
+ cudaMemPool_t memPool;
996
+ } cudaMemPoolDestroy_v11020_params;
997
+
998
+ typedef struct cudaMallocFromPoolAsync_ptsz_v11020_params_st {
999
+ void **ptr;
1000
+ size_t size;
1001
+ cudaMemPool_t memPool;
1002
+ cudaStream_t stream;
1003
+ } cudaMallocFromPoolAsync_ptsz_v11020_params;
1004
+
1005
+ typedef struct cudaMemPoolExportToShareableHandle_v11020_params_st {
1006
+ void *shareableHandle;
1007
+ cudaMemPool_t memPool;
1008
+ enum cudaMemAllocationHandleType handleType;
1009
+ unsigned int flags;
1010
+ } cudaMemPoolExportToShareableHandle_v11020_params;
1011
+
1012
+ typedef struct cudaMemPoolImportFromShareableHandle_v11020_params_st {
1013
+ cudaMemPool_t *memPool;
1014
+ void *shareableHandle;
1015
+ enum cudaMemAllocationHandleType handleType;
1016
+ unsigned int flags;
1017
+ } cudaMemPoolImportFromShareableHandle_v11020_params;
1018
+
1019
+ typedef struct cudaMemPoolExportPointer_v11020_params_st {
1020
+ struct cudaMemPoolPtrExportData *exportData;
1021
+ void *ptr;
1022
+ } cudaMemPoolExportPointer_v11020_params;
1023
+
1024
+ typedef struct cudaMemPoolImportPointer_v11020_params_st {
1025
+ void **ptr;
1026
+ cudaMemPool_t memPool;
1027
+ struct cudaMemPoolPtrExportData *exportData;
1028
+ } cudaMemPoolImportPointer_v11020_params;
1029
+
1030
+ typedef struct cudaPointerGetAttributes_v4000_params_st {
1031
+ struct cudaPointerAttributes *attributes;
1032
+ const void *ptr;
1033
+ } cudaPointerGetAttributes_v4000_params;
1034
+
1035
+ typedef struct cudaDeviceCanAccessPeer_v4000_params_st {
1036
+ int *canAccessPeer;
1037
+ int device;
1038
+ int peerDevice;
1039
+ } cudaDeviceCanAccessPeer_v4000_params;
1040
+
1041
+ typedef struct cudaDeviceEnablePeerAccess_v4000_params_st {
1042
+ int peerDevice;
1043
+ unsigned int flags;
1044
+ } cudaDeviceEnablePeerAccess_v4000_params;
1045
+
1046
+ typedef struct cudaDeviceDisablePeerAccess_v4000_params_st {
1047
+ int peerDevice;
1048
+ } cudaDeviceDisablePeerAccess_v4000_params;
1049
+
1050
+ typedef struct cudaGraphicsUnregisterResource_v3020_params_st {
1051
+ cudaGraphicsResource_t resource;
1052
+ } cudaGraphicsUnregisterResource_v3020_params;
1053
+
1054
+ typedef struct cudaGraphicsResourceSetMapFlags_v3020_params_st {
1055
+ cudaGraphicsResource_t resource;
1056
+ unsigned int flags;
1057
+ } cudaGraphicsResourceSetMapFlags_v3020_params;
1058
+
1059
+ typedef struct cudaGraphicsMapResources_v3020_params_st {
1060
+ int count;
1061
+ cudaGraphicsResource_t *resources;
1062
+ cudaStream_t stream;
1063
+ } cudaGraphicsMapResources_v3020_params;
1064
+
1065
+ typedef struct cudaGraphicsUnmapResources_v3020_params_st {
1066
+ int count;
1067
+ cudaGraphicsResource_t *resources;
1068
+ cudaStream_t stream;
1069
+ } cudaGraphicsUnmapResources_v3020_params;
1070
+
1071
+ typedef struct cudaGraphicsResourceGetMappedPointer_v3020_params_st {
1072
+ void **devPtr;
1073
+ size_t *size;
1074
+ cudaGraphicsResource_t resource;
1075
+ } cudaGraphicsResourceGetMappedPointer_v3020_params;
1076
+
1077
+ typedef struct cudaGraphicsSubResourceGetMappedArray_v3020_params_st {
1078
+ cudaArray_t *array;
1079
+ cudaGraphicsResource_t resource;
1080
+ unsigned int arrayIndex;
1081
+ unsigned int mipLevel;
1082
+ } cudaGraphicsSubResourceGetMappedArray_v3020_params;
1083
+
1084
+ typedef struct cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st {
1085
+ cudaMipmappedArray_t *mipmappedArray;
1086
+ cudaGraphicsResource_t resource;
1087
+ } cudaGraphicsResourceGetMappedMipmappedArray_v5000_params;
1088
+
1089
+ typedef struct cudaGetChannelDesc_v3020_params_st {
1090
+ struct cudaChannelFormatDesc *desc;
1091
+ cudaArray_const_t array;
1092
+ } cudaGetChannelDesc_v3020_params;
1093
+
1094
+ typedef struct cudaCreateChannelDesc_v3020_params_st {
1095
+ int x;
1096
+ int y;
1097
+ int z;
1098
+ int w;
1099
+ enum cudaChannelFormatKind f;
1100
+ } cudaCreateChannelDesc_v3020_params;
1101
+
1102
+ typedef struct cudaCreateTextureObject_v5000_params_st {
1103
+ cudaTextureObject_t *pTexObject;
1104
+ const struct cudaResourceDesc *pResDesc;
1105
+ const struct cudaTextureDesc *pTexDesc;
1106
+ const struct cudaResourceViewDesc *pResViewDesc;
1107
+ } cudaCreateTextureObject_v5000_params;
1108
+
1109
+ typedef struct cudaDestroyTextureObject_v5000_params_st {
1110
+ cudaTextureObject_t texObject;
1111
+ } cudaDestroyTextureObject_v5000_params;
1112
+
1113
+ typedef struct cudaGetTextureObjectResourceDesc_v5000_params_st {
1114
+ struct cudaResourceDesc *pResDesc;
1115
+ cudaTextureObject_t texObject;
1116
+ } cudaGetTextureObjectResourceDesc_v5000_params;
1117
+
1118
+ typedef struct cudaGetTextureObjectTextureDesc_v5000_params_st {
1119
+ struct cudaTextureDesc *pTexDesc;
1120
+ cudaTextureObject_t texObject;
1121
+ } cudaGetTextureObjectTextureDesc_v5000_params;
1122
+
1123
+ typedef struct cudaGetTextureObjectResourceViewDesc_v5000_params_st {
1124
+ struct cudaResourceViewDesc *pResViewDesc;
1125
+ cudaTextureObject_t texObject;
1126
+ } cudaGetTextureObjectResourceViewDesc_v5000_params;
1127
+
1128
+ typedef struct cudaCreateSurfaceObject_v5000_params_st {
1129
+ cudaSurfaceObject_t *pSurfObject;
1130
+ const struct cudaResourceDesc *pResDesc;
1131
+ } cudaCreateSurfaceObject_v5000_params;
1132
+
1133
+ typedef struct cudaDestroySurfaceObject_v5000_params_st {
1134
+ cudaSurfaceObject_t surfObject;
1135
+ } cudaDestroySurfaceObject_v5000_params;
1136
+
1137
+ typedef struct cudaGetSurfaceObjectResourceDesc_v5000_params_st {
1138
+ struct cudaResourceDesc *pResDesc;
1139
+ cudaSurfaceObject_t surfObject;
1140
+ } cudaGetSurfaceObjectResourceDesc_v5000_params;
1141
+
1142
+ typedef struct cudaDriverGetVersion_v3020_params_st {
1143
+ int *driverVersion;
1144
+ } cudaDriverGetVersion_v3020_params;
1145
+
1146
+ typedef struct cudaRuntimeGetVersion_v3020_params_st {
1147
+ int *runtimeVersion;
1148
+ } cudaRuntimeGetVersion_v3020_params;
1149
+
1150
+ typedef struct cudaGraphCreate_v10000_params_st {
1151
+ cudaGraph_t *pGraph;
1152
+ unsigned int flags;
1153
+ } cudaGraphCreate_v10000_params;
1154
+
1155
+ typedef struct cudaGraphAddKernelNode_v10000_params_st {
1156
+ cudaGraphNode_t *pGraphNode;
1157
+ cudaGraph_t graph;
1158
+ const cudaGraphNode_t *pDependencies;
1159
+ size_t numDependencies;
1160
+ const struct cudaKernelNodeParams *pNodeParams;
1161
+ } cudaGraphAddKernelNode_v10000_params;
1162
+
1163
+ typedef struct cudaGraphKernelNodeGetParams_v10000_params_st {
1164
+ cudaGraphNode_t node;
1165
+ struct cudaKernelNodeParams *pNodeParams;
1166
+ } cudaGraphKernelNodeGetParams_v10000_params;
1167
+
1168
+ typedef struct cudaGraphKernelNodeSetParams_v10000_params_st {
1169
+ cudaGraphNode_t node;
1170
+ const struct cudaKernelNodeParams *pNodeParams;
1171
+ } cudaGraphKernelNodeSetParams_v10000_params;
1172
+
1173
+ typedef struct cudaGraphKernelNodeCopyAttributes_v11000_params_st {
1174
+ cudaGraphNode_t hSrc;
1175
+ cudaGraphNode_t hDst;
1176
+ } cudaGraphKernelNodeCopyAttributes_v11000_params;
1177
+
1178
+ typedef struct cudaGraphKernelNodeGetAttribute_v11000_params_st {
1179
+ cudaGraphNode_t hNode;
1180
+ cudaKernelNodeAttrID attr;
1181
+ cudaKernelNodeAttrValue *value_out;
1182
+ } cudaGraphKernelNodeGetAttribute_v11000_params;
1183
+
1184
+ typedef struct cudaGraphKernelNodeSetAttribute_v11000_params_st {
1185
+ cudaGraphNode_t hNode;
1186
+ cudaKernelNodeAttrID attr;
1187
+ const cudaKernelNodeAttrValue *value;
1188
+ } cudaGraphKernelNodeSetAttribute_v11000_params;
1189
+
1190
+ typedef struct cudaGraphAddMemcpyNode_v10000_params_st {
1191
+ cudaGraphNode_t *pGraphNode;
1192
+ cudaGraph_t graph;
1193
+ const cudaGraphNode_t *pDependencies;
1194
+ size_t numDependencies;
1195
+ const struct cudaMemcpy3DParms *pCopyParams;
1196
+ } cudaGraphAddMemcpyNode_v10000_params;
1197
+
1198
+ typedef struct cudaGraphAddMemcpyNodeToSymbol_v11010_params_st {
1199
+ cudaGraphNode_t *pGraphNode;
1200
+ cudaGraph_t graph;
1201
+ const cudaGraphNode_t *pDependencies;
1202
+ size_t numDependencies;
1203
+ const void *symbol;
1204
+ const void *src;
1205
+ size_t count;
1206
+ size_t offset;
1207
+ enum cudaMemcpyKind kind;
1208
+ } cudaGraphAddMemcpyNodeToSymbol_v11010_params;
1209
+
1210
+ typedef struct cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st {
1211
+ cudaGraphNode_t *pGraphNode;
1212
+ cudaGraph_t graph;
1213
+ const cudaGraphNode_t *pDependencies;
1214
+ size_t numDependencies;
1215
+ void *dst;
1216
+ const void *symbol;
1217
+ size_t count;
1218
+ size_t offset;
1219
+ enum cudaMemcpyKind kind;
1220
+ } cudaGraphAddMemcpyNodeFromSymbol_v11010_params;
1221
+
1222
+ typedef struct cudaGraphAddMemcpyNode1D_v11010_params_st {
1223
+ cudaGraphNode_t *pGraphNode;
1224
+ cudaGraph_t graph;
1225
+ const cudaGraphNode_t *pDependencies;
1226
+ size_t numDependencies;
1227
+ void *dst;
1228
+ const void *src;
1229
+ size_t count;
1230
+ enum cudaMemcpyKind kind;
1231
+ } cudaGraphAddMemcpyNode1D_v11010_params;
1232
+
1233
+ typedef struct cudaGraphMemcpyNodeGetParams_v10000_params_st {
1234
+ cudaGraphNode_t node;
1235
+ struct cudaMemcpy3DParms *pNodeParams;
1236
+ } cudaGraphMemcpyNodeGetParams_v10000_params;
1237
+
1238
+ typedef struct cudaGraphMemcpyNodeSetParams_v10000_params_st {
1239
+ cudaGraphNode_t node;
1240
+ const struct cudaMemcpy3DParms *pNodeParams;
1241
+ } cudaGraphMemcpyNodeSetParams_v10000_params;
1242
+
1243
+ typedef struct cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st {
1244
+ cudaGraphNode_t node;
1245
+ const void *symbol;
1246
+ const void *src;
1247
+ size_t count;
1248
+ size_t offset;
1249
+ enum cudaMemcpyKind kind;
1250
+ } cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params;
1251
+
1252
+ typedef struct cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1253
+ cudaGraphNode_t node;
1254
+ void *dst;
1255
+ const void *symbol;
1256
+ size_t count;
1257
+ size_t offset;
1258
+ enum cudaMemcpyKind kind;
1259
+ } cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params;
1260
+
1261
+ typedef struct cudaGraphMemcpyNodeSetParams1D_v11010_params_st {
1262
+ cudaGraphNode_t node;
1263
+ void *dst;
1264
+ const void *src;
1265
+ size_t count;
1266
+ enum cudaMemcpyKind kind;
1267
+ } cudaGraphMemcpyNodeSetParams1D_v11010_params;
1268
+
1269
+ typedef struct cudaGraphAddMemsetNode_v10000_params_st {
1270
+ cudaGraphNode_t *pGraphNode;
1271
+ cudaGraph_t graph;
1272
+ const cudaGraphNode_t *pDependencies;
1273
+ size_t numDependencies;
1274
+ const struct cudaMemsetParams *pMemsetParams;
1275
+ } cudaGraphAddMemsetNode_v10000_params;
1276
+
1277
+ typedef struct cudaGraphMemsetNodeGetParams_v10000_params_st {
1278
+ cudaGraphNode_t node;
1279
+ struct cudaMemsetParams *pNodeParams;
1280
+ } cudaGraphMemsetNodeGetParams_v10000_params;
1281
+
1282
+ typedef struct cudaGraphMemsetNodeSetParams_v10000_params_st {
1283
+ cudaGraphNode_t node;
1284
+ const struct cudaMemsetParams *pNodeParams;
1285
+ } cudaGraphMemsetNodeSetParams_v10000_params;
1286
+
1287
+ typedef struct cudaGraphAddHostNode_v10000_params_st {
1288
+ cudaGraphNode_t *pGraphNode;
1289
+ cudaGraph_t graph;
1290
+ const cudaGraphNode_t *pDependencies;
1291
+ size_t numDependencies;
1292
+ const struct cudaHostNodeParams *pNodeParams;
1293
+ } cudaGraphAddHostNode_v10000_params;
1294
+
1295
+ typedef struct cudaGraphHostNodeGetParams_v10000_params_st {
1296
+ cudaGraphNode_t node;
1297
+ struct cudaHostNodeParams *pNodeParams;
1298
+ } cudaGraphHostNodeGetParams_v10000_params;
1299
+
1300
+ typedef struct cudaGraphHostNodeSetParams_v10000_params_st {
1301
+ cudaGraphNode_t node;
1302
+ const struct cudaHostNodeParams *pNodeParams;
1303
+ } cudaGraphHostNodeSetParams_v10000_params;
1304
+
1305
+ typedef struct cudaGraphAddChildGraphNode_v10000_params_st {
1306
+ cudaGraphNode_t *pGraphNode;
1307
+ cudaGraph_t graph;
1308
+ const cudaGraphNode_t *pDependencies;
1309
+ size_t numDependencies;
1310
+ cudaGraph_t childGraph;
1311
+ } cudaGraphAddChildGraphNode_v10000_params;
1312
+
1313
+ typedef struct cudaGraphChildGraphNodeGetGraph_v10000_params_st {
1314
+ cudaGraphNode_t node;
1315
+ cudaGraph_t *pGraph;
1316
+ } cudaGraphChildGraphNodeGetGraph_v10000_params;
1317
+
1318
+ typedef struct cudaGraphAddEmptyNode_v10000_params_st {
1319
+ cudaGraphNode_t *pGraphNode;
1320
+ cudaGraph_t graph;
1321
+ const cudaGraphNode_t *pDependencies;
1322
+ size_t numDependencies;
1323
+ } cudaGraphAddEmptyNode_v10000_params;
1324
+
1325
+ typedef struct cudaGraphAddEventRecordNode_v11010_params_st {
1326
+ cudaGraphNode_t *pGraphNode;
1327
+ cudaGraph_t graph;
1328
+ const cudaGraphNode_t *pDependencies;
1329
+ size_t numDependencies;
1330
+ cudaEvent_t event;
1331
+ } cudaGraphAddEventRecordNode_v11010_params;
1332
+
1333
+ typedef struct cudaGraphEventRecordNodeGetEvent_v11010_params_st {
1334
+ cudaGraphNode_t node;
1335
+ cudaEvent_t *event_out;
1336
+ } cudaGraphEventRecordNodeGetEvent_v11010_params;
1337
+
1338
+ typedef struct cudaGraphEventRecordNodeSetEvent_v11010_params_st {
1339
+ cudaGraphNode_t node;
1340
+ cudaEvent_t event;
1341
+ } cudaGraphEventRecordNodeSetEvent_v11010_params;
1342
+
1343
+ typedef struct cudaGraphAddEventWaitNode_v11010_params_st {
1344
+ cudaGraphNode_t *pGraphNode;
1345
+ cudaGraph_t graph;
1346
+ const cudaGraphNode_t *pDependencies;
1347
+ size_t numDependencies;
1348
+ cudaEvent_t event;
1349
+ } cudaGraphAddEventWaitNode_v11010_params;
1350
+
1351
+ typedef struct cudaGraphEventWaitNodeGetEvent_v11010_params_st {
1352
+ cudaGraphNode_t node;
1353
+ cudaEvent_t *event_out;
1354
+ } cudaGraphEventWaitNodeGetEvent_v11010_params;
1355
+
1356
+ typedef struct cudaGraphEventWaitNodeSetEvent_v11010_params_st {
1357
+ cudaGraphNode_t node;
1358
+ cudaEvent_t event;
1359
+ } cudaGraphEventWaitNodeSetEvent_v11010_params;
1360
+
1361
+ typedef struct cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st {
1362
+ cudaGraphNode_t *pGraphNode;
1363
+ cudaGraph_t graph;
1364
+ const cudaGraphNode_t *pDependencies;
1365
+ size_t numDependencies;
1366
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1367
+ } cudaGraphAddExternalSemaphoresSignalNode_v11020_params;
1368
+
1369
+ typedef struct cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st {
1370
+ cudaGraphNode_t hNode;
1371
+ struct cudaExternalSemaphoreSignalNodeParams *params_out;
1372
+ } cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params;
1373
+
1374
+ typedef struct cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1375
+ cudaGraphNode_t hNode;
1376
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1377
+ } cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params;
1378
+
1379
+ typedef struct cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st {
1380
+ cudaGraphNode_t *pGraphNode;
1381
+ cudaGraph_t graph;
1382
+ const cudaGraphNode_t *pDependencies;
1383
+ size_t numDependencies;
1384
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1385
+ } cudaGraphAddExternalSemaphoresWaitNode_v11020_params;
1386
+
1387
+ typedef struct cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st {
1388
+ cudaGraphNode_t hNode;
1389
+ struct cudaExternalSemaphoreWaitNodeParams *params_out;
1390
+ } cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params;
1391
+
1392
+ typedef struct cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1393
+ cudaGraphNode_t hNode;
1394
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1395
+ } cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params;
1396
+
1397
+ typedef struct cudaGraphAddMemAllocNode_v11040_params_st {
1398
+ cudaGraphNode_t *pGraphNode;
1399
+ cudaGraph_t graph;
1400
+ const cudaGraphNode_t *pDependencies;
1401
+ size_t numDependencies;
1402
+ struct cudaMemAllocNodeParams *nodeParams;
1403
+ } cudaGraphAddMemAllocNode_v11040_params;
1404
+
1405
+ typedef struct cudaGraphMemAllocNodeGetParams_v11040_params_st {
1406
+ cudaGraphNode_t node;
1407
+ struct cudaMemAllocNodeParams *params_out;
1408
+ } cudaGraphMemAllocNodeGetParams_v11040_params;
1409
+
1410
+ typedef struct cudaGraphAddMemFreeNode_v11040_params_st {
1411
+ cudaGraphNode_t *pGraphNode;
1412
+ cudaGraph_t graph;
1413
+ const cudaGraphNode_t *pDependencies;
1414
+ size_t numDependencies;
1415
+ void *dptr;
1416
+ } cudaGraphAddMemFreeNode_v11040_params;
1417
+
1418
+ typedef struct cudaGraphMemFreeNodeGetParams_v11040_params_st {
1419
+ cudaGraphNode_t node;
1420
+ void *dptr_out;
1421
+ } cudaGraphMemFreeNodeGetParams_v11040_params;
1422
+
1423
+ typedef struct cudaDeviceGraphMemTrim_v11040_params_st {
1424
+ int device;
1425
+ } cudaDeviceGraphMemTrim_v11040_params;
1426
+
1427
+ typedef struct cudaDeviceGetGraphMemAttribute_v11040_params_st {
1428
+ int device;
1429
+ enum cudaGraphMemAttributeType attr;
1430
+ void *value;
1431
+ } cudaDeviceGetGraphMemAttribute_v11040_params;
1432
+
1433
+ typedef struct cudaDeviceSetGraphMemAttribute_v11040_params_st {
1434
+ int device;
1435
+ enum cudaGraphMemAttributeType attr;
1436
+ void *value;
1437
+ } cudaDeviceSetGraphMemAttribute_v11040_params;
1438
+
1439
+ typedef struct cudaGraphClone_v10000_params_st {
1440
+ cudaGraph_t *pGraphClone;
1441
+ cudaGraph_t originalGraph;
1442
+ } cudaGraphClone_v10000_params;
1443
+
1444
+ typedef struct cudaGraphNodeFindInClone_v10000_params_st {
1445
+ cudaGraphNode_t *pNode;
1446
+ cudaGraphNode_t originalNode;
1447
+ cudaGraph_t clonedGraph;
1448
+ } cudaGraphNodeFindInClone_v10000_params;
1449
+
1450
+ typedef struct cudaGraphNodeGetType_v10000_params_st {
1451
+ cudaGraphNode_t node;
1452
+ enum cudaGraphNodeType *pType;
1453
+ } cudaGraphNodeGetType_v10000_params;
1454
+
1455
+ typedef struct cudaGraphGetNodes_v10000_params_st {
1456
+ cudaGraph_t graph;
1457
+ cudaGraphNode_t *nodes;
1458
+ size_t *numNodes;
1459
+ } cudaGraphGetNodes_v10000_params;
1460
+
1461
+ typedef struct cudaGraphGetRootNodes_v10000_params_st {
1462
+ cudaGraph_t graph;
1463
+ cudaGraphNode_t *pRootNodes;
1464
+ size_t *pNumRootNodes;
1465
+ } cudaGraphGetRootNodes_v10000_params;
1466
+
1467
+ typedef struct cudaGraphGetEdges_v10000_params_st {
1468
+ cudaGraph_t graph;
1469
+ cudaGraphNode_t *from;
1470
+ cudaGraphNode_t *to;
1471
+ size_t *numEdges;
1472
+ } cudaGraphGetEdges_v10000_params;
1473
+
1474
+ typedef struct cudaGraphGetEdges_v2_v12030_params_st {
1475
+ cudaGraph_t graph;
1476
+ cudaGraphNode_t *from;
1477
+ cudaGraphNode_t *to;
1478
+ cudaGraphEdgeData *edgeData;
1479
+ size_t *numEdges;
1480
+ } cudaGraphGetEdges_v2_v12030_params;
1481
+
1482
+ typedef struct cudaGraphNodeGetDependencies_v10000_params_st {
1483
+ cudaGraphNode_t node;
1484
+ cudaGraphNode_t *pDependencies;
1485
+ size_t *pNumDependencies;
1486
+ } cudaGraphNodeGetDependencies_v10000_params;
1487
+
1488
+ typedef struct cudaGraphNodeGetDependencies_v2_v12030_params_st {
1489
+ cudaGraphNode_t node;
1490
+ cudaGraphNode_t *pDependencies;
1491
+ cudaGraphEdgeData *edgeData;
1492
+ size_t *pNumDependencies;
1493
+ } cudaGraphNodeGetDependencies_v2_v12030_params;
1494
+
1495
+ typedef struct cudaGraphNodeGetDependentNodes_v10000_params_st {
1496
+ cudaGraphNode_t node;
1497
+ cudaGraphNode_t *pDependentNodes;
1498
+ size_t *pNumDependentNodes;
1499
+ } cudaGraphNodeGetDependentNodes_v10000_params;
1500
+
1501
+ typedef struct cudaGraphNodeGetDependentNodes_v2_v12030_params_st {
1502
+ cudaGraphNode_t node;
1503
+ cudaGraphNode_t *pDependentNodes;
1504
+ cudaGraphEdgeData *edgeData;
1505
+ size_t *pNumDependentNodes;
1506
+ } cudaGraphNodeGetDependentNodes_v2_v12030_params;
1507
+
1508
+ typedef struct cudaGraphAddDependencies_v10000_params_st {
1509
+ cudaGraph_t graph;
1510
+ const cudaGraphNode_t *from;
1511
+ const cudaGraphNode_t *to;
1512
+ size_t numDependencies;
1513
+ } cudaGraphAddDependencies_v10000_params;
1514
+
1515
+ typedef struct cudaGraphAddDependencies_v2_v12030_params_st {
1516
+ cudaGraph_t graph;
1517
+ const cudaGraphNode_t *from;
1518
+ const cudaGraphNode_t *to;
1519
+ const cudaGraphEdgeData *edgeData;
1520
+ size_t numDependencies;
1521
+ } cudaGraphAddDependencies_v2_v12030_params;
1522
+
1523
+ typedef struct cudaGraphRemoveDependencies_v10000_params_st {
1524
+ cudaGraph_t graph;
1525
+ const cudaGraphNode_t *from;
1526
+ const cudaGraphNode_t *to;
1527
+ size_t numDependencies;
1528
+ } cudaGraphRemoveDependencies_v10000_params;
1529
+
1530
+ typedef struct cudaGraphRemoveDependencies_v2_v12030_params_st {
1531
+ cudaGraph_t graph;
1532
+ const cudaGraphNode_t *from;
1533
+ const cudaGraphNode_t *to;
1534
+ const cudaGraphEdgeData *edgeData;
1535
+ size_t numDependencies;
1536
+ } cudaGraphRemoveDependencies_v2_v12030_params;
1537
+
1538
+ typedef struct cudaGraphDestroyNode_v10000_params_st {
1539
+ cudaGraphNode_t node;
1540
+ } cudaGraphDestroyNode_v10000_params;
1541
+
1542
+ typedef struct cudaGraphInstantiate_v12000_params_st {
1543
+ cudaGraphExec_t *pGraphExec;
1544
+ cudaGraph_t graph;
1545
+ unsigned long long flags;
1546
+ } cudaGraphInstantiate_v12000_params;
1547
+
1548
+ typedef struct cudaGraphInstantiateWithFlags_v11040_params_st {
1549
+ cudaGraphExec_t *pGraphExec;
1550
+ cudaGraph_t graph;
1551
+ unsigned long long flags;
1552
+ } cudaGraphInstantiateWithFlags_v11040_params;
1553
+
1554
+ typedef struct cudaGraphInstantiateWithParams_ptsz_v12000_params_st {
1555
+ cudaGraphExec_t *pGraphExec;
1556
+ cudaGraph_t graph;
1557
+ cudaGraphInstantiateParams *instantiateParams;
1558
+ } cudaGraphInstantiateWithParams_ptsz_v12000_params;
1559
+
1560
+ typedef struct cudaGraphExecGetFlags_v12000_params_st {
1561
+ cudaGraphExec_t graphExec;
1562
+ unsigned long long *flags;
1563
+ } cudaGraphExecGetFlags_v12000_params;
1564
+
1565
+ typedef struct cudaGraphExecKernelNodeSetParams_v10010_params_st {
1566
+ cudaGraphExec_t hGraphExec;
1567
+ cudaGraphNode_t node;
1568
+ const struct cudaKernelNodeParams *pNodeParams;
1569
+ } cudaGraphExecKernelNodeSetParams_v10010_params;
1570
+
1571
+ typedef struct cudaGraphExecMemcpyNodeSetParams_v10020_params_st {
1572
+ cudaGraphExec_t hGraphExec;
1573
+ cudaGraphNode_t node;
1574
+ const struct cudaMemcpy3DParms *pNodeParams;
1575
+ } cudaGraphExecMemcpyNodeSetParams_v10020_params;
1576
+
1577
+ typedef struct cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st {
1578
+ cudaGraphExec_t hGraphExec;
1579
+ cudaGraphNode_t node;
1580
+ const void *symbol;
1581
+ const void *src;
1582
+ size_t count;
1583
+ size_t offset;
1584
+ enum cudaMemcpyKind kind;
1585
+ } cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params;
1586
+
1587
+ typedef struct cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1588
+ cudaGraphExec_t hGraphExec;
1589
+ cudaGraphNode_t node;
1590
+ void *dst;
1591
+ const void *symbol;
1592
+ size_t count;
1593
+ size_t offset;
1594
+ enum cudaMemcpyKind kind;
1595
+ } cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params;
1596
+
1597
+ typedef struct cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st {
1598
+ cudaGraphExec_t hGraphExec;
1599
+ cudaGraphNode_t node;
1600
+ void *dst;
1601
+ const void *src;
1602
+ size_t count;
1603
+ enum cudaMemcpyKind kind;
1604
+ } cudaGraphExecMemcpyNodeSetParams1D_v11010_params;
1605
+
1606
+ typedef struct cudaGraphExecMemsetNodeSetParams_v10020_params_st {
1607
+ cudaGraphExec_t hGraphExec;
1608
+ cudaGraphNode_t node;
1609
+ const struct cudaMemsetParams *pNodeParams;
1610
+ } cudaGraphExecMemsetNodeSetParams_v10020_params;
1611
+
1612
+ typedef struct cudaGraphExecHostNodeSetParams_v10020_params_st {
1613
+ cudaGraphExec_t hGraphExec;
1614
+ cudaGraphNode_t node;
1615
+ const struct cudaHostNodeParams *pNodeParams;
1616
+ } cudaGraphExecHostNodeSetParams_v10020_params;
1617
+
1618
+ typedef struct cudaGraphExecChildGraphNodeSetParams_v11010_params_st {
1619
+ cudaGraphExec_t hGraphExec;
1620
+ cudaGraphNode_t node;
1621
+ cudaGraph_t childGraph;
1622
+ } cudaGraphExecChildGraphNodeSetParams_v11010_params;
1623
+
1624
+ typedef struct cudaGraphExecEventRecordNodeSetEvent_v11010_params_st {
1625
+ cudaGraphExec_t hGraphExec;
1626
+ cudaGraphNode_t hNode;
1627
+ cudaEvent_t event;
1628
+ } cudaGraphExecEventRecordNodeSetEvent_v11010_params;
1629
+
1630
+ typedef struct cudaGraphExecEventWaitNodeSetEvent_v11010_params_st {
1631
+ cudaGraphExec_t hGraphExec;
1632
+ cudaGraphNode_t hNode;
1633
+ cudaEvent_t event;
1634
+ } cudaGraphExecEventWaitNodeSetEvent_v11010_params;
1635
+
1636
+ typedef struct cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1637
+ cudaGraphExec_t hGraphExec;
1638
+ cudaGraphNode_t hNode;
1639
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1640
+ } cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params;
1641
+
1642
+ typedef struct cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1643
+ cudaGraphExec_t hGraphExec;
1644
+ cudaGraphNode_t hNode;
1645
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1646
+ } cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params;
1647
+
1648
+ typedef struct cudaGraphNodeSetEnabled_v11060_params_st {
1649
+ cudaGraphExec_t hGraphExec;
1650
+ cudaGraphNode_t hNode;
1651
+ unsigned int isEnabled;
1652
+ } cudaGraphNodeSetEnabled_v11060_params;
1653
+
1654
+ typedef struct cudaGraphNodeGetEnabled_v11060_params_st {
1655
+ cudaGraphExec_t hGraphExec;
1656
+ cudaGraphNode_t hNode;
1657
+ unsigned int *isEnabled;
1658
+ } cudaGraphNodeGetEnabled_v11060_params;
1659
+
1660
+ typedef struct cudaGraphExecUpdate_v10020_params_st {
1661
+ cudaGraphExec_t hGraphExec;
1662
+ cudaGraph_t hGraph;
1663
+ cudaGraphExecUpdateResultInfo *resultInfo;
1664
+ } cudaGraphExecUpdate_v10020_params;
1665
+
1666
+ typedef struct cudaGraphUpload_ptsz_v10000_params_st {
1667
+ cudaGraphExec_t graphExec;
1668
+ cudaStream_t stream;
1669
+ } cudaGraphUpload_ptsz_v10000_params;
1670
+
1671
+ typedef struct cudaGraphLaunch_ptsz_v10000_params_st {
1672
+ cudaGraphExec_t graphExec;
1673
+ cudaStream_t stream;
1674
+ } cudaGraphLaunch_ptsz_v10000_params;
1675
+
1676
+ typedef struct cudaGraphExecDestroy_v10000_params_st {
1677
+ cudaGraphExec_t graphExec;
1678
+ } cudaGraphExecDestroy_v10000_params;
1679
+
1680
+ typedef struct cudaGraphDestroy_v10000_params_st {
1681
+ cudaGraph_t graph;
1682
+ } cudaGraphDestroy_v10000_params;
1683
+
1684
+ typedef struct cudaGraphDebugDotPrint_v11030_params_st {
1685
+ cudaGraph_t graph;
1686
+ const char *path;
1687
+ unsigned int flags;
1688
+ } cudaGraphDebugDotPrint_v11030_params;
1689
+
1690
+ typedef struct cudaUserObjectCreate_v11030_params_st {
1691
+ cudaUserObject_t *object_out;
1692
+ void *ptr;
1693
+ cudaHostFn_t destroy;
1694
+ unsigned int initialRefcount;
1695
+ unsigned int flags;
1696
+ } cudaUserObjectCreate_v11030_params;
1697
+
1698
+ typedef struct cudaUserObjectRetain_v11030_params_st {
1699
+ cudaUserObject_t object;
1700
+ unsigned int count;
1701
+ } cudaUserObjectRetain_v11030_params;
1702
+
1703
+ typedef struct cudaUserObjectRelease_v11030_params_st {
1704
+ cudaUserObject_t object;
1705
+ unsigned int count;
1706
+ } cudaUserObjectRelease_v11030_params;
1707
+
1708
+ typedef struct cudaGraphRetainUserObject_v11030_params_st {
1709
+ cudaGraph_t graph;
1710
+ cudaUserObject_t object;
1711
+ unsigned int count;
1712
+ unsigned int flags;
1713
+ } cudaGraphRetainUserObject_v11030_params;
1714
+
1715
+ typedef struct cudaGraphReleaseUserObject_v11030_params_st {
1716
+ cudaGraph_t graph;
1717
+ cudaUserObject_t object;
1718
+ unsigned int count;
1719
+ } cudaGraphReleaseUserObject_v11030_params;
1720
+
1721
+ typedef struct cudaGraphAddNode_v12020_params_st {
1722
+ cudaGraphNode_t *pGraphNode;
1723
+ cudaGraph_t graph;
1724
+ const cudaGraphNode_t *pDependencies;
1725
+ size_t numDependencies;
1726
+ struct cudaGraphNodeParams *nodeParams;
1727
+ } cudaGraphAddNode_v12020_params;
1728
+
1729
+ typedef struct cudaGraphAddNode_v2_v12030_params_st {
1730
+ cudaGraphNode_t *pGraphNode;
1731
+ cudaGraph_t graph;
1732
+ const cudaGraphNode_t *pDependencies;
1733
+ const cudaGraphEdgeData *dependencyData;
1734
+ size_t numDependencies;
1735
+ struct cudaGraphNodeParams *nodeParams;
1736
+ } cudaGraphAddNode_v2_v12030_params;
1737
+
1738
+ typedef struct cudaGraphNodeSetParams_v12020_params_st {
1739
+ cudaGraphNode_t node;
1740
+ struct cudaGraphNodeParams *nodeParams;
1741
+ } cudaGraphNodeSetParams_v12020_params;
1742
+
1743
+ typedef struct cudaGraphExecNodeSetParams_v12020_params_st {
1744
+ cudaGraphExec_t graphExec;
1745
+ cudaGraphNode_t node;
1746
+ struct cudaGraphNodeParams *nodeParams;
1747
+ } cudaGraphExecNodeSetParams_v12020_params;
1748
+
1749
+ typedef struct cudaGraphConditionalHandleCreate_v12030_params_st {
1750
+ cudaGraphConditionalHandle *pHandle_out;
1751
+ cudaGraph_t graph;
1752
+ unsigned int defaultLaunchValue;
1753
+ unsigned int flags;
1754
+ } cudaGraphConditionalHandleCreate_v12030_params;
1755
+
1756
+ typedef struct cudaGetDriverEntryPoint_ptsz_v11030_params_st {
1757
+ const char *symbol;
1758
+ void **funcPtr;
1759
+ unsigned long long flags;
1760
+ enum cudaDriverEntryPointQueryResult *driverStatus;
1761
+ } cudaGetDriverEntryPoint_ptsz_v11030_params;
1762
+
1763
+ typedef struct cudaGetFuncBySymbol_v11000_params_st {
1764
+ cudaFunction_t *functionPtr;
1765
+ const void *symbolPtr;
1766
+ } cudaGetFuncBySymbol_v11000_params;
1767
+
1768
+ typedef struct cudaGetKernel_v12000_params_st {
1769
+ cudaKernel_t *kernelPtr;
1770
+ const void *entryFuncAddr;
1771
+ } cudaGetKernel_v12000_params;
1772
+
1773
+ typedef struct cudaMemcpy_v3020_params_st {
1774
+ void *dst;
1775
+ const void *src;
1776
+ size_t count;
1777
+ enum cudaMemcpyKind kind;
1778
+ } cudaMemcpy_v3020_params;
1779
+
1780
+ typedef struct cudaMemcpyToSymbol_v3020_params_st {
1781
+ const void *symbol;
1782
+ const void *src;
1783
+ size_t count;
1784
+ size_t offset;
1785
+ enum cudaMemcpyKind kind;
1786
+ } cudaMemcpyToSymbol_v3020_params;
1787
+
1788
+ typedef struct cudaMemcpyFromSymbol_v3020_params_st {
1789
+ void *dst;
1790
+ const void *symbol;
1791
+ size_t count;
1792
+ size_t offset;
1793
+ enum cudaMemcpyKind kind;
1794
+ } cudaMemcpyFromSymbol_v3020_params;
1795
+
1796
+ typedef struct cudaMemcpy2D_v3020_params_st {
1797
+ void *dst;
1798
+ size_t dpitch;
1799
+ const void *src;
1800
+ size_t spitch;
1801
+ size_t width;
1802
+ size_t height;
1803
+ enum cudaMemcpyKind kind;
1804
+ } cudaMemcpy2D_v3020_params;
1805
+
1806
+ typedef struct cudaMemcpyToArray_v3020_params_st {
1807
+ cudaArray_t dst;
1808
+ size_t wOffset;
1809
+ size_t hOffset;
1810
+ const void *src;
1811
+ size_t count;
1812
+ enum cudaMemcpyKind kind;
1813
+ } cudaMemcpyToArray_v3020_params;
1814
+
1815
+ typedef struct cudaMemcpy2DToArray_v3020_params_st {
1816
+ cudaArray_t dst;
1817
+ size_t wOffset;
1818
+ size_t hOffset;
1819
+ const void *src;
1820
+ size_t spitch;
1821
+ size_t width;
1822
+ size_t height;
1823
+ enum cudaMemcpyKind kind;
1824
+ } cudaMemcpy2DToArray_v3020_params;
1825
+
1826
+ typedef struct cudaMemcpyFromArray_v3020_params_st {
1827
+ void *dst;
1828
+ cudaArray_const_t src;
1829
+ size_t wOffset;
1830
+ size_t hOffset;
1831
+ size_t count;
1832
+ enum cudaMemcpyKind kind;
1833
+ } cudaMemcpyFromArray_v3020_params;
1834
+
1835
+ typedef struct cudaMemcpy2DFromArray_v3020_params_st {
1836
+ void *dst;
1837
+ size_t dpitch;
1838
+ cudaArray_const_t src;
1839
+ size_t wOffset;
1840
+ size_t hOffset;
1841
+ size_t width;
1842
+ size_t height;
1843
+ enum cudaMemcpyKind kind;
1844
+ } cudaMemcpy2DFromArray_v3020_params;
1845
+
1846
+ typedef struct cudaMemcpyArrayToArray_v3020_params_st {
1847
+ cudaArray_t dst;
1848
+ size_t wOffsetDst;
1849
+ size_t hOffsetDst;
1850
+ cudaArray_const_t src;
1851
+ size_t wOffsetSrc;
1852
+ size_t hOffsetSrc;
1853
+ size_t count;
1854
+ enum cudaMemcpyKind kind;
1855
+ } cudaMemcpyArrayToArray_v3020_params;
1856
+
1857
+ typedef struct cudaMemcpy2DArrayToArray_v3020_params_st {
1858
+ cudaArray_t dst;
1859
+ size_t wOffsetDst;
1860
+ size_t hOffsetDst;
1861
+ cudaArray_const_t src;
1862
+ size_t wOffsetSrc;
1863
+ size_t hOffsetSrc;
1864
+ size_t width;
1865
+ size_t height;
1866
+ enum cudaMemcpyKind kind;
1867
+ } cudaMemcpy2DArrayToArray_v3020_params;
1868
+
1869
+ typedef struct cudaMemcpy3D_v3020_params_st {
1870
+ const struct cudaMemcpy3DParms *p;
1871
+ } cudaMemcpy3D_v3020_params;
1872
+
1873
+ typedef struct cudaMemcpy3DPeer_v4000_params_st {
1874
+ const struct cudaMemcpy3DPeerParms *p;
1875
+ } cudaMemcpy3DPeer_v4000_params;
1876
+
1877
+ typedef struct cudaMemset_v3020_params_st {
1878
+ void *devPtr;
1879
+ int value;
1880
+ size_t count;
1881
+ } cudaMemset_v3020_params;
1882
+
1883
+ typedef struct cudaMemset2D_v3020_params_st {
1884
+ void *devPtr;
1885
+ size_t pitch;
1886
+ int value;
1887
+ size_t width;
1888
+ size_t height;
1889
+ } cudaMemset2D_v3020_params;
1890
+
1891
+ typedef struct cudaMemset3D_v3020_params_st {
1892
+ struct cudaPitchedPtr pitchedDevPtr;
1893
+ int value;
1894
+ struct cudaExtent extent;
1895
+ } cudaMemset3D_v3020_params;
1896
+
1897
+ typedef struct cudaMemcpyAsync_v3020_params_st {
1898
+ void *dst;
1899
+ const void *src;
1900
+ size_t count;
1901
+ enum cudaMemcpyKind kind;
1902
+ cudaStream_t stream;
1903
+ } cudaMemcpyAsync_v3020_params;
1904
+
1905
+ typedef struct cudaMemcpyToSymbolAsync_v3020_params_st {
1906
+ const void *symbol;
1907
+ const void *src;
1908
+ size_t count;
1909
+ size_t offset;
1910
+ enum cudaMemcpyKind kind;
1911
+ cudaStream_t stream;
1912
+ } cudaMemcpyToSymbolAsync_v3020_params;
1913
+
1914
+ typedef struct cudaMemcpyFromSymbolAsync_v3020_params_st {
1915
+ void *dst;
1916
+ const void *symbol;
1917
+ size_t count;
1918
+ size_t offset;
1919
+ enum cudaMemcpyKind kind;
1920
+ cudaStream_t stream;
1921
+ } cudaMemcpyFromSymbolAsync_v3020_params;
1922
+
1923
+ typedef struct cudaMemcpy2DAsync_v3020_params_st {
1924
+ void *dst;
1925
+ size_t dpitch;
1926
+ const void *src;
1927
+ size_t spitch;
1928
+ size_t width;
1929
+ size_t height;
1930
+ enum cudaMemcpyKind kind;
1931
+ cudaStream_t stream;
1932
+ } cudaMemcpy2DAsync_v3020_params;
1933
+
1934
+ typedef struct cudaMemcpyToArrayAsync_v3020_params_st {
1935
+ cudaArray_t dst;
1936
+ size_t wOffset;
1937
+ size_t hOffset;
1938
+ const void *src;
1939
+ size_t count;
1940
+ enum cudaMemcpyKind kind;
1941
+ cudaStream_t stream;
1942
+ } cudaMemcpyToArrayAsync_v3020_params;
1943
+
1944
+ typedef struct cudaMemcpy2DToArrayAsync_v3020_params_st {
1945
+ cudaArray_t dst;
1946
+ size_t wOffset;
1947
+ size_t hOffset;
1948
+ const void *src;
1949
+ size_t spitch;
1950
+ size_t width;
1951
+ size_t height;
1952
+ enum cudaMemcpyKind kind;
1953
+ cudaStream_t stream;
1954
+ } cudaMemcpy2DToArrayAsync_v3020_params;
1955
+
1956
+ typedef struct cudaMemcpyFromArrayAsync_v3020_params_st {
1957
+ void *dst;
1958
+ cudaArray_const_t src;
1959
+ size_t wOffset;
1960
+ size_t hOffset;
1961
+ size_t count;
1962
+ enum cudaMemcpyKind kind;
1963
+ cudaStream_t stream;
1964
+ } cudaMemcpyFromArrayAsync_v3020_params;
1965
+
1966
+ typedef struct cudaMemcpy2DFromArrayAsync_v3020_params_st {
1967
+ void *dst;
1968
+ size_t dpitch;
1969
+ cudaArray_const_t src;
1970
+ size_t wOffset;
1971
+ size_t hOffset;
1972
+ size_t width;
1973
+ size_t height;
1974
+ enum cudaMemcpyKind kind;
1975
+ cudaStream_t stream;
1976
+ } cudaMemcpy2DFromArrayAsync_v3020_params;
1977
+
1978
+ typedef struct cudaMemcpy3DAsync_v3020_params_st {
1979
+ const struct cudaMemcpy3DParms *p;
1980
+ cudaStream_t stream;
1981
+ } cudaMemcpy3DAsync_v3020_params;
1982
+
1983
+ typedef struct cudaMemcpy3DPeerAsync_v4000_params_st {
1984
+ const struct cudaMemcpy3DPeerParms *p;
1985
+ cudaStream_t stream;
1986
+ } cudaMemcpy3DPeerAsync_v4000_params;
1987
+
1988
+ typedef struct cudaMemsetAsync_v3020_params_st {
1989
+ void *devPtr;
1990
+ int value;
1991
+ size_t count;
1992
+ cudaStream_t stream;
1993
+ } cudaMemsetAsync_v3020_params;
1994
+
1995
+ typedef struct cudaMemset2DAsync_v3020_params_st {
1996
+ void *devPtr;
1997
+ size_t pitch;
1998
+ int value;
1999
+ size_t width;
2000
+ size_t height;
2001
+ cudaStream_t stream;
2002
+ } cudaMemset2DAsync_v3020_params;
2003
+
2004
+ typedef struct cudaMemset3DAsync_v3020_params_st {
2005
+ struct cudaPitchedPtr pitchedDevPtr;
2006
+ int value;
2007
+ struct cudaExtent extent;
2008
+ cudaStream_t stream;
2009
+ } cudaMemset3DAsync_v3020_params;
2010
+
2011
+ typedef struct cudaStreamQuery_v3020_params_st {
2012
+ cudaStream_t stream;
2013
+ } cudaStreamQuery_v3020_params;
2014
+
2015
+ typedef struct cudaStreamGetFlags_v5050_params_st {
2016
+ cudaStream_t hStream;
2017
+ unsigned int *flags;
2018
+ } cudaStreamGetFlags_v5050_params;
2019
+
2020
+ typedef struct cudaStreamGetId_v12000_params_st {
2021
+ cudaStream_t hStream;
2022
+ unsigned long long *streamId;
2023
+ } cudaStreamGetId_v12000_params;
2024
+
2025
+ typedef struct cudaStreamGetPriority_v5050_params_st {
2026
+ cudaStream_t hStream;
2027
+ int *priority;
2028
+ } cudaStreamGetPriority_v5050_params;
2029
+
2030
+ typedef struct cudaEventRecord_v3020_params_st {
2031
+ cudaEvent_t event;
2032
+ cudaStream_t stream;
2033
+ } cudaEventRecord_v3020_params;
2034
+
2035
+ typedef struct cudaEventRecordWithFlags_v11010_params_st {
2036
+ cudaEvent_t event;
2037
+ cudaStream_t stream;
2038
+ unsigned int flags;
2039
+ } cudaEventRecordWithFlags_v11010_params;
2040
+
2041
+ typedef struct cudaStreamWaitEvent_v3020_params_st {
2042
+ cudaStream_t stream;
2043
+ cudaEvent_t event;
2044
+ unsigned int flags;
2045
+ } cudaStreamWaitEvent_v3020_params;
2046
+
2047
+ typedef struct cudaStreamAddCallback_v5000_params_st {
2048
+ cudaStream_t stream;
2049
+ cudaStreamCallback_t callback;
2050
+ void *userData;
2051
+ unsigned int flags;
2052
+ } cudaStreamAddCallback_v5000_params;
2053
+
2054
+ typedef struct cudaStreamAttachMemAsync_v6000_params_st {
2055
+ cudaStream_t stream;
2056
+ void *devPtr;
2057
+ size_t length;
2058
+ unsigned int flags;
2059
+ } cudaStreamAttachMemAsync_v6000_params;
2060
+
2061
+ typedef struct cudaStreamSynchronize_v3020_params_st {
2062
+ cudaStream_t stream;
2063
+ } cudaStreamSynchronize_v3020_params;
2064
+
2065
+ typedef struct cudaLaunchKernel_v7000_params_st {
2066
+ const void *func;
2067
+ dim3 gridDim;
2068
+ dim3 blockDim;
2069
+ void **args;
2070
+ size_t sharedMem;
2071
+ cudaStream_t stream;
2072
+ } cudaLaunchKernel_v7000_params;
2073
+
2074
+ typedef struct cudaLaunchKernelExC_v11060_params_st {
2075
+ const cudaLaunchConfig_t *config;
2076
+ const void *func;
2077
+ void **args;
2078
+ } cudaLaunchKernelExC_v11060_params;
2079
+
2080
+ typedef struct cudaLaunchCooperativeKernel_v9000_params_st {
2081
+ const void *func;
2082
+ dim3 gridDim;
2083
+ dim3 blockDim;
2084
+ void **args;
2085
+ size_t sharedMem;
2086
+ cudaStream_t stream;
2087
+ } cudaLaunchCooperativeKernel_v9000_params;
2088
+
2089
+ typedef struct cudaLaunchHostFunc_v10000_params_st {
2090
+ cudaStream_t stream;
2091
+ cudaHostFn_t fn;
2092
+ void *userData;
2093
+ } cudaLaunchHostFunc_v10000_params;
2094
+
2095
+ typedef struct cudaMemPrefetchAsync_v8000_params_st {
2096
+ const void *devPtr;
2097
+ size_t count;
2098
+ int dstDevice;
2099
+ cudaStream_t stream;
2100
+ } cudaMemPrefetchAsync_v8000_params;
2101
+
2102
+ typedef struct cudaMemPrefetchAsync_v2_v12020_params_st {
2103
+ const void *devPtr;
2104
+ size_t count;
2105
+ struct cudaMemLocation location;
2106
+ unsigned int flags;
2107
+ cudaStream_t stream;
2108
+ } cudaMemPrefetchAsync_v2_v12020_params;
2109
+
2110
+ typedef struct cudaSignalExternalSemaphoresAsync_v10000_params_st {
2111
+ const cudaExternalSemaphore_t *extSemArray;
2112
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
2113
+ unsigned int numExtSems;
2114
+ cudaStream_t stream;
2115
+ } cudaSignalExternalSemaphoresAsync_v10000_params;
2116
+
2117
+ typedef struct cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st {
2118
+ const cudaExternalSemaphore_t *extSemArray;
2119
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
2120
+ unsigned int numExtSems;
2121
+ cudaStream_t stream;
2122
+ } cudaSignalExternalSemaphoresAsync_ptsz_v10000_params;
2123
+
2124
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_v11020_params_st {
2125
+ const cudaExternalSemaphore_t *extSemArray;
2126
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
2127
+ unsigned int numExtSems;
2128
+ cudaStream_t stream;
2129
+ } cudaSignalExternalSemaphoresAsync_v2_v11020_params;
2130
+
2131
+ typedef struct cudaWaitExternalSemaphoresAsync_v10000_params_st {
2132
+ const cudaExternalSemaphore_t *extSemArray;
2133
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
2134
+ unsigned int numExtSems;
2135
+ cudaStream_t stream;
2136
+ } cudaWaitExternalSemaphoresAsync_v10000_params;
2137
+
2138
+ typedef struct cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st {
2139
+ const cudaExternalSemaphore_t *extSemArray;
2140
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
2141
+ unsigned int numExtSems;
2142
+ cudaStream_t stream;
2143
+ } cudaWaitExternalSemaphoresAsync_ptsz_v10000_params;
2144
+
2145
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_v11020_params_st {
2146
+ const cudaExternalSemaphore_t *extSemArray;
2147
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
2148
+ unsigned int numExtSems;
2149
+ cudaStream_t stream;
2150
+ } cudaWaitExternalSemaphoresAsync_v2_v11020_params;
2151
+
2152
+ typedef struct cudaGraphInstantiateWithParams_v12000_params_st {
2153
+ cudaGraphExec_t *pGraphExec;
2154
+ cudaGraph_t graph;
2155
+ cudaGraphInstantiateParams *instantiateParams;
2156
+ } cudaGraphInstantiateWithParams_v12000_params;
2157
+
2158
+ typedef struct cudaGraphUpload_v10000_params_st {
2159
+ cudaGraphExec_t graphExec;
2160
+ cudaStream_t stream;
2161
+ } cudaGraphUpload_v10000_params;
2162
+
2163
+ typedef struct cudaGraphLaunch_v10000_params_st {
2164
+ cudaGraphExec_t graphExec;
2165
+ cudaStream_t stream;
2166
+ } cudaGraphLaunch_v10000_params;
2167
+
2168
+ typedef struct cudaStreamBeginCapture_v10000_params_st {
2169
+ cudaStream_t stream;
2170
+ enum cudaStreamCaptureMode mode;
2171
+ } cudaStreamBeginCapture_v10000_params;
2172
+
2173
+ typedef struct cudaStreamBeginCaptureToGraph_v12030_params_st {
2174
+ cudaStream_t stream;
2175
+ cudaGraph_t graph;
2176
+ const cudaGraphNode_t *dependencies;
2177
+ const cudaGraphEdgeData *dependencyData;
2178
+ size_t numDependencies;
2179
+ enum cudaStreamCaptureMode mode;
2180
+ } cudaStreamBeginCaptureToGraph_v12030_params;
2181
+
2182
+ typedef struct cudaStreamEndCapture_v10000_params_st {
2183
+ cudaStream_t stream;
2184
+ cudaGraph_t *pGraph;
2185
+ } cudaStreamEndCapture_v10000_params;
2186
+
2187
+ typedef struct cudaStreamIsCapturing_v10000_params_st {
2188
+ cudaStream_t stream;
2189
+ enum cudaStreamCaptureStatus *pCaptureStatus;
2190
+ } cudaStreamIsCapturing_v10000_params;
2191
+
2192
+ typedef struct cudaStreamGetCaptureInfo_v10010_params_st {
2193
+ cudaStream_t stream;
2194
+ enum cudaStreamCaptureStatus *captureStatus_out;
2195
+ unsigned long long *id_out;
2196
+ } cudaStreamGetCaptureInfo_v10010_params;
2197
+
2198
+ typedef struct cudaStreamGetCaptureInfo_ptsz_v10010_params_st {
2199
+ cudaStream_t stream;
2200
+ enum cudaStreamCaptureStatus *captureStatus_out;
2201
+ unsigned long long *id_out;
2202
+ } cudaStreamGetCaptureInfo_ptsz_v10010_params;
2203
+
2204
+ typedef struct cudaStreamGetCaptureInfo_v2_v11030_params_st {
2205
+ cudaStream_t stream;
2206
+ enum cudaStreamCaptureStatus *captureStatus_out;
2207
+ unsigned long long *id_out;
2208
+ cudaGraph_t *graph_out;
2209
+ const cudaGraphNode_t **dependencies_out;
2210
+ size_t *numDependencies_out;
2211
+ } cudaStreamGetCaptureInfo_v2_v11030_params;
2212
+
2213
+ typedef struct cudaStreamGetCaptureInfo_v3_v12030_params_st {
2214
+ cudaStream_t stream;
2215
+ enum cudaStreamCaptureStatus *captureStatus_out;
2216
+ unsigned long long *id_out;
2217
+ cudaGraph_t *graph_out;
2218
+ const cudaGraphNode_t **dependencies_out;
2219
+ const cudaGraphEdgeData **edgeData_out;
2220
+ size_t *numDependencies_out;
2221
+ } cudaStreamGetCaptureInfo_v3_v12030_params;
2222
+
2223
+ typedef struct cudaStreamUpdateCaptureDependencies_v11030_params_st {
2224
+ cudaStream_t stream;
2225
+ cudaGraphNode_t *dependencies;
2226
+ size_t numDependencies;
2227
+ unsigned int flags;
2228
+ } cudaStreamUpdateCaptureDependencies_v11030_params;
2229
+
2230
+ typedef struct cudaStreamUpdateCaptureDependencies_v2_v12030_params_st {
2231
+ cudaStream_t stream;
2232
+ cudaGraphNode_t *dependencies;
2233
+ const cudaGraphEdgeData *dependencyData;
2234
+ size_t numDependencies;
2235
+ unsigned int flags;
2236
+ } cudaStreamUpdateCaptureDependencies_v2_v12030_params;
2237
+
2238
+ typedef struct cudaStreamCopyAttributes_v11000_params_st {
2239
+ cudaStream_t dstStream;
2240
+ cudaStream_t srcStream;
2241
+ } cudaStreamCopyAttributes_v11000_params;
2242
+
2243
+ typedef struct cudaStreamGetAttribute_v11000_params_st {
2244
+ cudaStream_t stream;
2245
+ cudaStreamAttrID attr;
2246
+ cudaStreamAttrValue *value;
2247
+ } cudaStreamGetAttribute_v11000_params;
2248
+
2249
+ typedef struct cudaStreamSetAttribute_v11000_params_st {
2250
+ cudaStream_t stream;
2251
+ cudaStreamAttrID attr;
2252
+ const cudaStreamAttrValue *param;
2253
+ } cudaStreamSetAttribute_v11000_params;
2254
+
2255
+ typedef struct cudaMallocAsync_v11020_params_st {
2256
+ void **devPtr;
2257
+ size_t size;
2258
+ cudaStream_t hStream;
2259
+ } cudaMallocAsync_v11020_params;
2260
+
2261
+ typedef struct cudaFreeAsync_v11020_params_st {
2262
+ void *devPtr;
2263
+ cudaStream_t hStream;
2264
+ } cudaFreeAsync_v11020_params;
2265
+
2266
+ typedef struct cudaMallocFromPoolAsync_v11020_params_st {
2267
+ void **ptr;
2268
+ size_t size;
2269
+ cudaMemPool_t memPool;
2270
+ cudaStream_t stream;
2271
+ } cudaMallocFromPoolAsync_v11020_params;
2272
+
2273
+ typedef struct cudaGetDriverEntryPoint_v11030_params_st {
2274
+ const char *symbol;
2275
+ void **funcPtr;
2276
+ unsigned long long flags;
2277
+ enum cudaDriverEntryPointQueryResult *driverStatus;
2278
+ } cudaGetDriverEntryPoint_v11030_params;
2279
+
2280
+ typedef struct cudaGetDeviceProperties_v3020_params_st {
2281
+ struct cudaDeviceProp *prop;
2282
+ int device;
2283
+ } cudaGetDeviceProperties_v3020_params;
2284
+
2285
+ // Parameter trace structures for removed functions
2286
+
2287
+
2288
+ // End of parameter trace structures
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cuda_vdpau_interop_meta.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_vdpau_interop.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaVDPAUGetDevice_v3020_params_st {
12
+ int *device;
13
+ VdpDevice vdpDevice;
14
+ VdpGetProcAddress *vdpGetProcAddress;
15
+ } cudaVDPAUGetDevice_v3020_params;
16
+
17
+ typedef struct cudaVDPAUSetVDPAUDevice_v3020_params_st {
18
+ int device;
19
+ VdpDevice vdpDevice;
20
+ VdpGetProcAddress *vdpGetProcAddress;
21
+ } cudaVDPAUSetVDPAUDevice_v3020_params;
22
+
23
+ typedef struct cudaGraphicsVDPAURegisterVideoSurface_v3020_params_st {
24
+ struct cudaGraphicsResource **resource;
25
+ VdpVideoSurface vdpSurface;
26
+ unsigned int flags;
27
+ } cudaGraphicsVDPAURegisterVideoSurface_v3020_params;
28
+
29
+ typedef struct cudaGraphicsVDPAURegisterOutputSurface_v3020_params_st {
30
+ struct cudaGraphicsResource **resource;
31
+ VdpOutputSurface vdpSurface;
32
+ unsigned int flags;
33
+ } cudaGraphicsVDPAURegisterOutputSurface_v3020_params;
34
+
35
+ // Parameter trace structures for removed functions
36
+
37
+
38
+ // End of parameter trace structures
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/generated_cudart_removed_meta.h ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cudart_removed.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaStreamDestroy_v3020_params_st {
12
+ cudaStream_t stream;
13
+ } cudaStreamDestroy_v3020_params;
14
+
15
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params_st {
16
+ int *numBlocks;
17
+ const void *func;
18
+ size_t numDynamicSmemBytes;
19
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params;
20
+
21
+ typedef struct cudaConfigureCall_v3020_params_st {
22
+ dim3 gridDim;
23
+ dim3 blockDim;
24
+ size_t sharedMem __dv;
25
+ cudaStream_t stream __dv;
26
+ } cudaConfigureCall_v3020_params;
27
+
28
+ typedef struct cudaSetupArgument_v3020_params_st {
29
+ const void *arg;
30
+ size_t size;
31
+ size_t offset;
32
+ } cudaSetupArgument_v3020_params;
33
+
34
+ typedef struct cudaLaunch_v3020_params_st {
35
+ const void *func;
36
+ } cudaLaunch_v3020_params;
37
+
38
+ typedef struct cudaLaunch_ptsz_v7000_params_st {
39
+ const void *func;
40
+ } cudaLaunch_ptsz_v7000_params;
41
+
42
+ typedef struct cudaStreamSetFlags_v10200_params_st {
43
+ cudaStream_t hStream;
44
+ unsigned int flags;
45
+ } cudaStreamSetFlags_v10200_params;
46
+
47
+ typedef struct cudaStreamSetFlags_ptsz_v10200_params_st {
48
+ cudaStream_t hStream;
49
+ unsigned int flags;
50
+ } cudaStreamSetFlags_ptsz_v10200_params;
51
+
52
+ typedef struct cudaProfilerInitialize_v4000_params_st {
53
+ const char *configFile;
54
+ const char *outputFile;
55
+ cudaOutputMode_t outputMode;
56
+ } cudaProfilerInitialize_v4000_params;
57
+
58
+ typedef struct cudaThreadSetLimit_v3020_params_st {
59
+ enum cudaLimit limit;
60
+ size_t value;
61
+ } cudaThreadSetLimit_v3020_params;
62
+
63
+ typedef struct cudaThreadGetLimit_v3020_params_st {
64
+ size_t *pValue;
65
+ enum cudaLimit limit;
66
+ } cudaThreadGetLimit_v3020_params;
67
+
68
+ typedef struct cudaThreadGetCacheConfig_v3020_params_st {
69
+ enum cudaFuncCache *pCacheConfig;
70
+ } cudaThreadGetCacheConfig_v3020_params;
71
+
72
+ typedef struct cudaThreadSetCacheConfig_v3020_params_st {
73
+ enum cudaFuncCache cacheConfig;
74
+ } cudaThreadSetCacheConfig_v3020_params;
75
+
76
+ typedef struct cudaSetDoubleForDevice_v3020_params_st {
77
+ double *d;
78
+ } cudaSetDoubleForDevice_v3020_params;
79
+
80
+ typedef struct cudaSetDoubleForHost_v3020_params_st {
81
+ double *d;
82
+ } cudaSetDoubleForHost_v3020_params;
83
+
84
+ typedef struct cudaCreateTextureObject_v2_v11080_params_st {
85
+ cudaTextureObject_t *pTexObject;
86
+ const struct cudaResourceDesc *pResDesc;
87
+ const struct cudaTextureDesc *pTexDesc;
88
+ const struct cudaResourceViewDesc *pResViewDesc;
89
+ } cudaCreateTextureObject_v2_v11080_params;
90
+
91
+ typedef struct cudaGetTextureObjectTextureDesc_v2_v11080_params_st {
92
+ struct cudaTextureDesc *pTexDesc;
93
+ cudaTextureObject_t texObject;
94
+ } cudaGetTextureObjectTextureDesc_v2_v11080_params;
95
+
96
+ typedef struct cudaBindTexture_v3020_params_st {
97
+ size_t *offset;
98
+ const struct textureReference *texref;
99
+ const void *devPtr;
100
+ const struct cudaChannelFormatDesc *desc;
101
+ size_t size __dv;
102
+ } cudaBindTexture_v3020_params;
103
+
104
+ typedef struct cudaBindTexture2D_v3020_params_st {
105
+ size_t *offset;
106
+ const struct textureReference *texref;
107
+ const void *devPtr;
108
+ const struct cudaChannelFormatDesc *desc;
109
+ size_t width;
110
+ size_t height;
111
+ size_t pitch;
112
+ } cudaBindTexture2D_v3020_params;
113
+
114
+ typedef struct cudaBindTextureToArray_v3020_params_st {
115
+ const struct textureReference *texref;
116
+ cudaArray_const_t array;
117
+ const struct cudaChannelFormatDesc *desc;
118
+ } cudaBindTextureToArray_v3020_params;
119
+
120
+ typedef struct cudaBindTextureToMipmappedArray_v5000_params_st {
121
+ const struct textureReference *texref;
122
+ cudaMipmappedArray_const_t mipmappedArray;
123
+ const struct cudaChannelFormatDesc *desc;
124
+ } cudaBindTextureToMipmappedArray_v5000_params;
125
+
126
+ typedef struct cudaUnbindTexture_v3020_params_st {
127
+ const struct textureReference *texref;
128
+ } cudaUnbindTexture_v3020_params;
129
+
130
+ typedef struct cudaGetTextureAlignmentOffset_v3020_params_st {
131
+ size_t *offset;
132
+ const struct textureReference *texref;
133
+ } cudaGetTextureAlignmentOffset_v3020_params;
134
+
135
+ typedef struct cudaGetTextureReference_v3020_params_st {
136
+ const struct textureReference **texref;
137
+ const void *symbol;
138
+ } cudaGetTextureReference_v3020_params;
139
+
140
+ typedef struct cudaBindSurfaceToArray_v3020_params_st {
141
+ const struct surfaceReference *surfref;
142
+ cudaArray_const_t array;
143
+ const struct cudaChannelFormatDesc *desc;
144
+ } cudaBindSurfaceToArray_v3020_params;
145
+
146
+ typedef struct cudaGetSurfaceReference_v3020_params_st {
147
+ const struct surfaceReference **surfref;
148
+ const void *symbol;
149
+ } cudaGetSurfaceReference_v3020_params;
150
+
151
+ typedef struct cudaGraphInstantiate_v10000_params_st {
152
+ cudaGraphExec_t *pGraphExec;
153
+ cudaGraph_t graph;
154
+ cudaGraphNode_t *pErrorNode;
155
+ char *pLogBuffer;
156
+ size_t bufferSize;
157
+ } cudaGraphInstantiate_v10000_params;
158
+
159
+ // Parameter trace structures for removed functions
160
+
161
+
162
+ // End of parameter trace structures
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/sm_20_atomic_functions.hpp ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_20_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __SM_20_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
54
+ extern "C"
55
+ {
56
+ extern __device__ __device_builtin__ float __fAtomicAdd(float *address, float val);
57
+ }
58
+ #endif /* __CUDA_ARCH__ */
59
+
60
+ #if defined(__CUDACC_RTC__)
61
+ #define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__
62
+ #else /* __CUDACC_RTC__ */
63
+ #define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
64
+ #endif /* __CUDACC_RTC__ */
65
+
66
+ #if defined(__cplusplus) && defined(__CUDACC__)
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #include "cuda_runtime_api.h"
75
+
76
+ /*******************************************************************************
77
+ * *
78
+ * *
79
+ * *
80
+ *******************************************************************************/
81
+
82
+ __SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val)
83
+ {
84
+ return __fAtomicAdd(address, val);
85
+ }
86
+
87
+ #endif /* __cplusplus && __CUDACC__ */
88
+
89
+ #undef __SM_20_ATOMIC_FUNCTIONS_DECL__
90
+
91
+ #endif /* !__SM_20_ATOMIC_FUNCTIONS_HPP__ */
92
+
deepseek/lib/python3.10/site-packages/triton/backends/nvidia/include/vector_types.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__VECTOR_TYPES_H__)
51
+ #define __VECTOR_TYPES_H__
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
55
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__
56
+ #endif
57
+
58
+ /*******************************************************************************
59
+ * *
60
+ * *
61
+ * *
62
+ *******************************************************************************/
63
+
64
+ #ifndef __DOXYGEN_ONLY__
65
+ #include "crt/host_defines.h"
66
+ #endif
67
+
68
+ /* NVRTC compiler defines these instead of in the header (to reduce compile time)
69
+ */
70
+ #ifndef __CUDACC_RTC_BUILTIN_VECTOR_TYPES__
71
+
72
+ /*******************************************************************************
73
+ * *
74
+ * *
75
+ * *
76
+ *******************************************************************************/
77
+
78
+ #if !defined(__CUDACC__) && !defined(__CUDACC_RTC__) && \
79
+ defined(_WIN32) && !defined(_WIN64)
80
+
81
+ #pragma warning(push)
82
+ #pragma warning(disable: 4201 4408)
83
+
84
+ #define __cuda_builtin_vector_align8(tag, members) \
85
+ struct __device_builtin__ tag \
86
+ { \
87
+ union \
88
+ { \
89
+ struct { members }; \
90
+ struct { long long int :1,:0; }; \
91
+ }; \
92
+ }
93
+
94
+ #else /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */
95
+
96
+ #define __cuda_builtin_vector_align8(tag, members) \
97
+ struct __device_builtin__ __align__(8) tag \
98
+ { \
99
+ members \
100
+ }
101
+
102
+ #endif /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */
103
+
104
+ struct __device_builtin__ char1
105
+ {
106
+ signed char x;
107
+ };
108
+
109
+ struct __device_builtin__ uchar1
110
+ {
111
+ unsigned char x;
112
+ };
113
+
114
+
115
+ struct __device_builtin__ __align__(2) char2
116
+ {
117
+ signed char x, y;
118
+ };
119
+
120
+ struct __device_builtin__ __align__(2) uchar2
121
+ {
122
+ unsigned char x, y;
123
+ };
124
+
125
+ struct __device_builtin__ char3
126
+ {
127
+ signed char x, y, z;
128
+ };
129
+
130
+ struct __device_builtin__ uchar3
131
+ {
132
+ unsigned char x, y, z;
133
+ };
134
+
135
+ struct __device_builtin__ __align__(4) char4
136
+ {
137
+ signed char x, y, z, w;
138
+ };
139
+
140
+ struct __device_builtin__ __align__(4) uchar4
141
+ {
142
+ unsigned char x, y, z, w;
143
+ };
144
+
145
+ struct __device_builtin__ short1
146
+ {
147
+ short x;
148
+ };
149
+
150
+ struct __device_builtin__ ushort1
151
+ {
152
+ unsigned short x;
153
+ };
154
+
155
+ struct __device_builtin__ __align__(4) short2
156
+ {
157
+ short x, y;
158
+ };
159
+
160
+ struct __device_builtin__ __align__(4) ushort2
161
+ {
162
+ unsigned short x, y;
163
+ };
164
+
165
+ struct __device_builtin__ short3
166
+ {
167
+ short x, y, z;
168
+ };
169
+
170
+ struct __device_builtin__ ushort3
171
+ {
172
+ unsigned short x, y, z;
173
+ };
174
+
175
+ __cuda_builtin_vector_align8(short4, short x; short y; short z; short w;);
176
+ __cuda_builtin_vector_align8(ushort4, unsigned short x; unsigned short y; unsigned short z; unsigned short w;);
177
+
178
+ struct __device_builtin__ int1
179
+ {
180
+ int x;
181
+ };
182
+
183
+ struct __device_builtin__ uint1
184
+ {
185
+ unsigned int x;
186
+ };
187
+
188
+ __cuda_builtin_vector_align8(int2, int x; int y;);
189
+ __cuda_builtin_vector_align8(uint2, unsigned int x; unsigned int y;);
190
+
191
+ struct __device_builtin__ int3
192
+ {
193
+ int x, y, z;
194
+ };
195
+
196
+ struct __device_builtin__ uint3
197
+ {
198
+ unsigned int x, y, z;
199
+ };
200
+
201
+ struct __device_builtin__ __builtin_align__(16) int4
202
+ {
203
+ int x, y, z, w;
204
+ };
205
+
206
+ struct __device_builtin__ __builtin_align__(16) uint4
207
+ {
208
+ unsigned int x, y, z, w;
209
+ };
210
+
211
+ struct __device_builtin__ long1
212
+ {
213
+ long int x;
214
+ };
215
+
216
+ struct __device_builtin__ ulong1
217
+ {
218
+ unsigned long x;
219
+ };
220
+
221
+ #if defined(_WIN32)
222
+ __cuda_builtin_vector_align8(long2, long int x; long int y;);
223
+ __cuda_builtin_vector_align8(ulong2, unsigned long int x; unsigned long int y;);
224
+ #else /* !_WIN32 */
225
+
226
+ struct __device_builtin__ __align__(2*sizeof(long int)) long2
227
+ {
228
+ long int x, y;
229
+ };
230
+
231
+ struct __device_builtin__ __align__(2*sizeof(unsigned long int)) ulong2
232
+ {
233
+ unsigned long int x, y;
234
+ };
235
+
236
+ #endif /* _WIN32 */
237
+
238
+ struct __device_builtin__ long3
239
+ {
240
+ long int x, y, z;
241
+ };
242
+
243
+ struct __device_builtin__ ulong3
244
+ {
245
+ unsigned long int x, y, z;
246
+ };
247
+
248
+ struct __device_builtin__ __builtin_align__(16) long4
249
+ {
250
+ long int x, y, z, w;
251
+ };
252
+
253
+ struct __device_builtin__ __builtin_align__(16) ulong4
254
+ {
255
+ unsigned long int x, y, z, w;
256
+ };
257
+
258
+ struct __device_builtin__ float1
259
+ {
260
+ float x;
261
+ };
262
+
263
+ #if !defined(__CUDACC__) && defined(__arm__) && \
264
+ defined(__ARM_PCS_VFP) && __GNUC__ == 4 && __GNUC_MINOR__ == 6
265
+
266
+ #pragma GCC diagnostic push
267
+ #pragma GCC diagnostic ignored "-pedantic"
268
+
269
+ struct __device_builtin__ __attribute__((aligned(8))) float2
270
+ {
271
+ float x; float y; float __cuda_gnu_arm_ice_workaround[0];
272
+ };
273
+
274
+ #pragma GCC poison __cuda_gnu_arm_ice_workaround
275
+ #pragma GCC diagnostic pop
276
+
277
+ #else /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP &&
278
+ __GNUC__ == 4&& __GNUC_MINOR__ == 6 */
279
+
280
+ __cuda_builtin_vector_align8(float2, float x; float y;);
281
+
282
+ #endif /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP &&
283
+ __GNUC__ == 4&& __GNUC_MINOR__ == 6 */
284
+
285
+ struct __device_builtin__ float3
286
+ {
287
+ float x, y, z;
288
+ };
289
+
290
+ struct __device_builtin__ __builtin_align__(16) float4
291
+ {
292
+ float x, y, z, w;
293
+ };
294
+
295
+ struct __device_builtin__ longlong1
296
+ {
297
+ long long int x;
298
+ };
299
+
300
+ struct __device_builtin__ ulonglong1
301
+ {
302
+ unsigned long long int x;
303
+ };
304
+
305
+ struct __device_builtin__ __builtin_align__(16) longlong2
306
+ {
307
+ long long int x, y;
308
+ };
309
+
310
+ struct __device_builtin__ __builtin_align__(16) ulonglong2
311
+ {
312
+ unsigned long long int x, y;
313
+ };
314
+
315
+ struct __device_builtin__ longlong3
316
+ {
317
+ long long int x, y, z;
318
+ };
319
+
320
+ struct __device_builtin__ ulonglong3
321
+ {
322
+ unsigned long long int x, y, z;
323
+ };
324
+
325
+ struct __device_builtin__ __builtin_align__(16) longlong4
326
+ {
327
+ long long int x, y, z ,w;
328
+ };
329
+
330
+ struct __device_builtin__ __builtin_align__(16) ulonglong4
331
+ {
332
+ unsigned long long int x, y, z, w;
333
+ };
334
+
335
+ struct __device_builtin__ double1
336
+ {
337
+ double x;
338
+ };
339
+
340
+ struct __device_builtin__ __builtin_align__(16) double2
341
+ {
342
+ double x, y;
343
+ };
344
+
345
+ struct __device_builtin__ double3
346
+ {
347
+ double x, y, z;
348
+ };
349
+
350
+ struct __device_builtin__ __builtin_align__(16) double4
351
+ {
352
+ double x, y, z, w;
353
+ };
354
+
355
+ #if !defined(__CUDACC__) && defined(_WIN32) && !defined(_WIN64)
356
+
357
+ #pragma warning(pop)
358
+
359
+ #endif /* !__CUDACC__ && _WIN32 && !_WIN64 */
360
+
361
+ /*******************************************************************************
362
+ * *
363
+ * *
364
+ * *
365
+ *******************************************************************************/
366
+
367
+ typedef __device_builtin__ struct char1 char1;
368
+ typedef __device_builtin__ struct uchar1 uchar1;
369
+ typedef __device_builtin__ struct char2 char2;
370
+ typedef __device_builtin__ struct uchar2 uchar2;
371
+ typedef __device_builtin__ struct char3 char3;
372
+ typedef __device_builtin__ struct uchar3 uchar3;
373
+ typedef __device_builtin__ struct char4 char4;
374
+ typedef __device_builtin__ struct uchar4 uchar4;
375
+ typedef __device_builtin__ struct short1 short1;
376
+ typedef __device_builtin__ struct ushort1 ushort1;
377
+ typedef __device_builtin__ struct short2 short2;
378
+ typedef __device_builtin__ struct ushort2 ushort2;
379
+ typedef __device_builtin__ struct short3 short3;
380
+ typedef __device_builtin__ struct ushort3 ushort3;
381
+ typedef __device_builtin__ struct short4 short4;
382
+ typedef __device_builtin__ struct ushort4 ushort4;
383
+ typedef __device_builtin__ struct int1 int1;
384
+ typedef __device_builtin__ struct uint1 uint1;
385
+ typedef __device_builtin__ struct int2 int2;
386
+ typedef __device_builtin__ struct uint2 uint2;
387
+ typedef __device_builtin__ struct int3 int3;
388
+ typedef __device_builtin__ struct uint3 uint3;
389
+ typedef __device_builtin__ struct int4 int4;
390
+ typedef __device_builtin__ struct uint4 uint4;
391
+ typedef __device_builtin__ struct long1 long1;
392
+ typedef __device_builtin__ struct ulong1 ulong1;
393
+ typedef __device_builtin__ struct long2 long2;
394
+ typedef __device_builtin__ struct ulong2 ulong2;
395
+ typedef __device_builtin__ struct long3 long3;
396
+ typedef __device_builtin__ struct ulong3 ulong3;
397
+ typedef __device_builtin__ struct long4 long4;
398
+ typedef __device_builtin__ struct ulong4 ulong4;
399
+ typedef __device_builtin__ struct float1 float1;
400
+ typedef __device_builtin__ struct float2 float2;
401
+ typedef __device_builtin__ struct float3 float3;
402
+ typedef __device_builtin__ struct float4 float4;
403
+ typedef __device_builtin__ struct longlong1 longlong1;
404
+ typedef __device_builtin__ struct ulonglong1 ulonglong1;
405
+ typedef __device_builtin__ struct longlong2 longlong2;
406
+ typedef __device_builtin__ struct ulonglong2 ulonglong2;
407
+ typedef __device_builtin__ struct longlong3 longlong3;
408
+ typedef __device_builtin__ struct ulonglong3 ulonglong3;
409
+ typedef __device_builtin__ struct longlong4 longlong4;
410
+ typedef __device_builtin__ struct ulonglong4 ulonglong4;
411
+ typedef __device_builtin__ struct double1 double1;
412
+ typedef __device_builtin__ struct double2 double2;
413
+ typedef __device_builtin__ struct double3 double3;
414
+ typedef __device_builtin__ struct double4 double4;
415
+
416
+ #undef __cuda_builtin_vector_align8
417
+
418
+ #endif /* !defined(__CUDACC_RTC_BUILTIN_VECTOR_TYPES__) */
419
+
420
+ /*******************************************************************************
421
+ * *
422
+ * *
423
+ * *
424
+ *******************************************************************************/
425
+
426
+ struct __device_builtin__ dim3
427
+ {
428
+ unsigned int x, y, z;
429
+ #if defined(__cplusplus)
430
+ #if __cplusplus >= 201103L
431
+ __host__ __device__ constexpr dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {}
432
+ __host__ __device__ constexpr dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {}
433
+ __host__ __device__ constexpr operator uint3(void) const { return uint3{x, y, z}; }
434
+ #else
435
+ __host__ __device__ dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {}
436
+ __host__ __device__ dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {}
437
+ __host__ __device__ operator uint3(void) const { uint3 t; t.x = x; t.y = y; t.z = z; return t; }
438
+ #endif
439
+ #endif /* __cplusplus */
440
+ };
441
+
442
+ typedef __device_builtin__ struct dim3 dim3;
443
+
444
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__)
445
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
446
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__
447
+ #endif
448
+
449
+ #endif /* !__VECTOR_TYPES_H__ */
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BertGeneration model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+
19
+
20
+ class BertGenerationConfig(PretrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
23
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
24
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
25
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
26
+ architecture.
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+ Args:
32
+ vocab_size (`int`, *optional*, defaults to 50358):
33
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
34
+ `inputs_ids` passed when calling [`BertGeneration`].
35
+ hidden_size (`int`, *optional*, defaults to 1024):
36
+ Dimensionality of the encoder layers and the pooler layer.
37
+ num_hidden_layers (`int`, *optional*, defaults to 24):
38
+ Number of hidden layers in the Transformer encoder.
39
+ num_attention_heads (`int`, *optional*, defaults to 16):
40
+ Number of attention heads for each attention layer in the Transformer encoder.
41
+ intermediate_size (`int`, *optional*, defaults to 3072):
42
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
43
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
44
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
45
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
46
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
47
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
48
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
49
+ The dropout ratio for the attention probabilities.
50
+ max_position_embeddings (`int`, *optional*, defaults to 512):
51
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
52
+ just in case (e.g., 512 or 1024 or 2048).
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
58
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
59
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
60
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
61
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
62
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
63
+ is_decoder (`bool`, *optional*, defaults to `False`):
64
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+
69
+ Examples:
70
+
71
+ ```python
72
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
73
+
74
+ >>> # Initializing a BertGeneration config
75
+ >>> configuration = BertGenerationConfig()
76
+
77
+ >>> # Initializing a model (with random weights) from the config
78
+ >>> model = BertGenerationEncoder(configuration)
79
+
80
+ >>> # Accessing the model configuration
81
+ >>> configuration = model.config
82
+ ```"""
83
+ model_type = "bert-generation"
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=50358,
88
+ hidden_size=1024,
89
+ num_hidden_layers=24,
90
+ num_attention_heads=16,
91
+ intermediate_size=4096,
92
+ hidden_act="gelu",
93
+ hidden_dropout_prob=0.1,
94
+ attention_probs_dropout_prob=0.1,
95
+ max_position_embeddings=512,
96
+ initializer_range=0.02,
97
+ layer_norm_eps=1e-12,
98
+ pad_token_id=0,
99
+ bos_token_id=2,
100
+ eos_token_id=1,
101
+ position_embedding_type="absolute",
102
+ use_cache=True,
103
+ **kwargs,
104
+ ):
105
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
106
+
107
+ self.vocab_size = vocab_size
108
+ self.hidden_size = hidden_size
109
+ self.num_hidden_layers = num_hidden_layers
110
+ self.num_attention_heads = num_attention_heads
111
+ self.hidden_act = hidden_act
112
+ self.intermediate_size = intermediate_size
113
+ self.hidden_dropout_prob = hidden_dropout_prob
114
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.initializer_range = initializer_range
117
+ self.layer_norm_eps = layer_norm_eps
118
+ self.position_embedding_type = position_embedding_type
119
+ self.use_cache = use_cache
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model BertGeneration."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {
34
+ "bert_for_seq_generation": (
35
+ "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
36
+ ),
37
+ }
38
+ }
39
+
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"bert_for_seq_generation": 512}
41
+
42
+
43
+ class BertGenerationTokenizer(PreTrainedTokenizer):
44
+ """
45
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
48
+ this superclass for more information regarding those methods.
49
+
50
+ Args:
51
+ vocab_file (`str`):
52
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
53
+ contains the vocabulary necessary to instantiate a tokenizer.
54
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
55
+ The end of sequence token.
56
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
57
+ The begin of sequence token.
58
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
59
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
60
+ token instead.
61
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
62
+ The token used for padding, for example when batching sequences of different lengths.
63
+ sp_model_kwargs (`dict`, *optional*):
64
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
65
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
66
+ to set:
67
+
68
+ - `enable_sampling`: Enable subword regularization.
69
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
70
+
71
+ - `nbest_size = {0,1}`: No sampling is performed.
72
+ - `nbest_size > 1`: samples from the nbest_size results.
73
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
74
+ using forward-filtering-and-backward-sampling algorithm.
75
+
76
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
77
+ BPE-dropout.
78
+ """
79
+
80
+ vocab_files_names = VOCAB_FILES_NAMES
81
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
82
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
83
+ prefix_tokens: List[int] = []
84
+ model_input_names = ["input_ids", "attention_mask"]
85
+
86
+ def __init__(
87
+ self,
88
+ vocab_file,
89
+ bos_token="<s>",
90
+ eos_token="</s>",
91
+ unk_token="<unk>",
92
+ pad_token="<pad>",
93
+ sep_token="<::::>",
94
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
95
+ **kwargs,
96
+ ) -> None:
97
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
98
+
99
+ # Add extra_ids to the special token list
100
+ super().__init__(
101
+ bos_token=bos_token,
102
+ eos_token=eos_token,
103
+ unk_token=unk_token,
104
+ pad_token=pad_token,
105
+ sep_token=sep_token,
106
+ sp_model_kwargs=self.sp_model_kwargs,
107
+ **kwargs,
108
+ )
109
+
110
+ self.vocab_file = vocab_file
111
+
112
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
113
+ self.sp_model.Load(vocab_file)
114
+
115
+ @property
116
+ def vocab_size(self):
117
+ return self.sp_model.get_piece_size()
118
+
119
+ def get_vocab(self):
120
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
121
+ vocab.update(self.added_tokens_encoder)
122
+ return vocab
123
+
124
+ def __getstate__(self):
125
+ state = self.__dict__.copy()
126
+ state["sp_model"] = None
127
+ return state
128
+
129
+ def __setstate__(self, d):
130
+ self.__dict__ = d
131
+
132
+ # for backward compatibility
133
+ if not hasattr(self, "sp_model_kwargs"):
134
+ self.sp_model_kwargs = {}
135
+
136
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
137
+ self.sp_model.Load(self.vocab_file)
138
+
139
+ def _tokenize(self, text: str) -> List[str]:
140
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
141
+ return self.sp_model.encode(text, out_type=str)
142
+
143
+ def _convert_token_to_id(self, token):
144
+ """Converts a token (str) in an id using the vocab."""
145
+ return self.sp_model.piece_to_id(token)
146
+
147
+ def _convert_id_to_token(self, index):
148
+ """Converts an index (integer) in a token (str) using the vocab."""
149
+ token = self.sp_model.IdToPiece(index)
150
+ return token
151
+
152
+ def convert_tokens_to_string(self, tokens):
153
+ """Converts a sequence of tokens (string) in a single string."""
154
+ current_sub_tokens = []
155
+ out_string = ""
156
+ for token in tokens:
157
+ # make sure that special tokens are not decoded using sentencepiece model
158
+ if token in self.all_special_tokens:
159
+ out_string += self.sp_model.decode(current_sub_tokens) + token
160
+ current_sub_tokens = []
161
+ else:
162
+ current_sub_tokens.append(token)
163
+ out_string += self.sp_model.decode(current_sub_tokens)
164
+ return out_string.strip()
165
+
166
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
167
+ if not os.path.isdir(save_directory):
168
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
169
+ return
170
+ out_vocab_file = os.path.join(
171
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
172
+ )
173
+
174
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
175
+ copyfile(self.vocab_file, out_vocab_file)
176
+ elif not os.path.isfile(self.vocab_file):
177
+ with open(out_vocab_file, "wb") as fi:
178
+ content_spiece_model = self.sp_model.serialized_model_proto()
179
+ fi.write(content_spiece_model)
180
+
181
+ return (out_vocab_file,)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_byt5": ["ByT5Tokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_byt5 import ByT5Tokenizer
25
+ else:
26
+ import sys
27
+
28
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (492 Bytes). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The T5 authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert T5 checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
28
+ # Initialise PyTorch model
29
+ config = T5Config.from_json_file(config_file)
30
+ print(f"Building PyTorch model from configuration: {config}")
31
+ model = T5ForConditionalGeneration(config)
32
+
33
+ # Load weights from tf checkpoint
34
+ load_tf_weights_in_t5(model, config, tf_checkpoint_path)
35
+
36
+ # Save pytorch-model
37
+ print(f"Save PyTorch model to {pytorch_dump_path}")
38
+ model.save_pretrained(pytorch_dump_path)
39
+
40
+
41
+ if __name__ == "__main__":
42
+ parser = argparse.ArgumentParser()
43
+ # Required parameters
44
+ parser.add_argument(
45
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
46
+ )
47
+ parser.add_argument(
48
+ "--config_file",
49
+ default=None,
50
+ type=str,
51
+ required=True,
52
+ help=(
53
+ "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
54
+ ),
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model ByT5."""
16
+
17
+
18
+ import warnings
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class ByT5Tokenizer(PreTrainedTokenizer):
29
+ """
30
+ Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
31
+
32
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
33
+ this superclass for more information regarding those methods.
34
+
35
+ Args:
36
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
37
+ The end of sequence token.
38
+
39
+ <Tip>
40
+
41
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
42
+ The token used is the `sep_token`.
43
+
44
+ </Tip>
45
+
46
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
47
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
48
+ token instead.
49
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
50
+ The token used for padding, for example when batching sequences of different lengths.
51
+ extra_ids (`int`, *optional*, defaults to 100):
52
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
53
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
54
+ indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
55
+ like in ByT5 preprocessing see
56
+ [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
57
+ additional_special_tokens (`List[str]`, *optional*):
58
+ Additional special tokens used by the tokenizer.
59
+ """
60
+
61
+ model_input_names = ["input_ids", "attention_mask"]
62
+
63
+ def __init__(
64
+ self,
65
+ eos_token="</s>",
66
+ unk_token="<unk>",
67
+ pad_token="<pad>",
68
+ extra_ids=125,
69
+ additional_special_tokens=None,
70
+ **kwargs,
71
+ ) -> None:
72
+ # Add extra_ids to the special token list
73
+ if extra_ids > 0 and additional_special_tokens is None:
74
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
75
+ elif extra_ids > 0 and additional_special_tokens is not None:
76
+ # Check that we have the right number of extra_id special tokens
77
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
78
+ if extra_tokens != extra_ids:
79
+ raise ValueError(
80
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
81
+ " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
82
+ " extra_ids tokens"
83
+ )
84
+
85
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
86
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
87
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
88
+
89
+ super().__init__(
90
+ eos_token=eos_token,
91
+ unk_token=unk_token,
92
+ pad_token=pad_token,
93
+ extra_ids=extra_ids,
94
+ additional_special_tokens=additional_special_tokens,
95
+ **kwargs,
96
+ )
97
+
98
+ self._extra_ids = extra_ids
99
+
100
+ self._utf_vocab_size = 2**8 # utf is 8 bits
101
+
102
+ # define special tokens dict
103
+ self.special_tokens_encoder: Dict[int, str] = {
104
+ self.pad_token: 0,
105
+ self.eos_token: 1,
106
+ self.unk_token: 2,
107
+ }
108
+ self._num_special_tokens = len(self.special_tokens_encoder)
109
+ n = len(additional_special_tokens)
110
+ for i, token in enumerate(additional_special_tokens):
111
+ self.special_tokens_encoder[token] = self.vocab_size + i - n
112
+ self.special_tokens_decoder: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
113
+
114
+ @property
115
+ def vocab_size(self):
116
+ return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
117
+
118
+ def get_special_tokens_mask(
119
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
120
+ ) -> List[int]:
121
+ """
122
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
123
+ special tokens using the tokenizer `prepare_for_model` method.
124
+
125
+ Args:
126
+ token_ids_0 (`List[int]`):
127
+ List of IDs.
128
+ token_ids_1 (`List[int]`, *optional*):
129
+ Optional second list of IDs for sequence pairs.
130
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
131
+ Whether or not the token list is already formatted with special tokens for the model.
132
+
133
+ Returns:
134
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
135
+ """
136
+ if already_has_special_tokens:
137
+ return super().get_special_tokens_mask(
138
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
139
+ )
140
+
141
+ # normal case: some special tokens
142
+ if token_ids_1 is None:
143
+ return ([0] * len(token_ids_0)) + [1]
144
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
145
+
146
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
147
+ """Do not add eos again if user already added it."""
148
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
149
+ warnings.warn(
150
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
151
+ " eos tokens being added."
152
+ )
153
+ return token_ids
154
+ else:
155
+ return token_ids + [self.eos_token_id]
156
+
157
+ def create_token_type_ids_from_sequences(
158
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
159
+ ) -> List[int]:
160
+ """
161
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
162
+ make use of token type ids, therefore a list of zeros is returned.
163
+
164
+ Args:
165
+ token_ids_0 (`List[int]`):
166
+ List of IDs.
167
+ token_ids_1 (`List[int]`, *optional*):
168
+ Optional second list of IDs for sequence pairs.
169
+
170
+ Returns:
171
+ `List[int]`: List of zeros.
172
+ """
173
+ eos = [self.eos_token_id]
174
+
175
+ if token_ids_1 is None:
176
+ return len(token_ids_0 + eos) * [0]
177
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
178
+
179
+ def build_inputs_with_special_tokens(
180
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
181
+ ) -> List[int]:
182
+ """
183
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
184
+ adding special tokens. A sequence has the following format:
185
+
186
+ - single sequence: `X </s>`
187
+ - pair of sequences: `A </s> B </s>`
188
+
189
+ Args:
190
+ token_ids_0 (`List[int]`):
191
+ List of IDs to which the special tokens will be added.
192
+ token_ids_1 (`List[int]`, *optional*):
193
+ Optional second list of IDs for sequence pairs.
194
+
195
+ Returns:
196
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
197
+ """
198
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
199
+ if token_ids_1 is None:
200
+ return token_ids_0
201
+ else:
202
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
203
+ return token_ids_0 + token_ids_1
204
+
205
+ def _tokenize(self, text: str) -> List[str]:
206
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
207
+ tokens = [chr(i) for i in text.encode("utf-8")]
208
+ return tokens
209
+
210
+ def _convert_token_to_id(self, token):
211
+ """Converts a token (str) in an id using the vocab."""
212
+ if token in self.special_tokens_encoder:
213
+ token_id = self.special_tokens_encoder[token]
214
+ elif token in self.added_tokens_encoder:
215
+ token_id = self.added_tokens_encoder[token]
216
+ elif len(token) != 1:
217
+ token_id = self.unk_token_id
218
+ else:
219
+ token_id = ord(token) + self._num_special_tokens
220
+ return token_id
221
+
222
+ def _convert_id_to_token(self, index):
223
+ """Converts an index (integer) in a token (str) using the vocab."""
224
+ if index in self.special_tokens_decoder:
225
+ token = self.special_tokens_decoder[index]
226
+ else:
227
+ token = chr(index - self._num_special_tokens)
228
+ return token
229
+
230
+ def convert_tokens_to_string(self, tokens):
231
+ """Converts a sequence of tokens (string) in a single string."""
232
+ bstring = b""
233
+ for token in tokens:
234
+ if token in self.special_tokens_decoder:
235
+ tok_string = self.special_tokens_decoder[token].encode("utf-8")
236
+ elif token in self.added_tokens_decoder:
237
+ tok_string = self.special_tokens_decoder[token].encode("utf-8")
238
+ elif token in self.special_tokens_encoder:
239
+ tok_string = token.encode("utf-8")
240
+ elif token in self.added_tokens_encoder:
241
+ tok_string = token.encode("utf-8")
242
+ else:
243
+ tok_string = bytes([ord(token)])
244
+ bstring += tok_string
245
+ string = bstring.decode("utf-8", errors="ignore")
246
+ return string
247
+
248
+ # ByT5Tokenizer has no vocab file
249
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
250
+ return ()
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_chinese_clip": [
21
+ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "ChineseCLIPConfig",
23
+ "ChineseCLIPOnnxConfig",
24
+ "ChineseCLIPTextConfig",
25
+ "ChineseCLIPVisionConfig",
26
+ ],
27
+ "processing_chinese_clip": ["ChineseCLIPProcessor"],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"]
37
+ _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_chinese_clip"] = [
46
+ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "ChineseCLIPModel",
48
+ "ChineseCLIPPreTrainedModel",
49
+ "ChineseCLIPTextModel",
50
+ "ChineseCLIPVisionModel",
51
+ ]
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_chinese_clip import (
55
+ CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ ChineseCLIPConfig,
57
+ ChineseCLIPOnnxConfig,
58
+ ChineseCLIPTextConfig,
59
+ ChineseCLIPVisionConfig,
60
+ )
61
+ from .processing_chinese_clip import ChineseCLIPProcessor
62
+
63
+ try:
64
+ if not is_vision_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
70
+
71
+ try:
72
+ if not is_torch_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .modeling_chinese_clip import (
78
+ CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
79
+ ChineseCLIPModel,
80
+ ChineseCLIPPreTrainedModel,
81
+ ChineseCLIPTextModel,
82
+ ChineseCLIPVisionModel,
83
+ )
84
+
85
+ else:
86
+ import sys
87
+
88
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (4.04 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Chinese-CLIP model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import TensorType
25
+
26
+ from ...configuration_utils import PretrainedConfig
27
+ from ...onnx import OnnxConfig
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
34
+ "OFA-Sys/chinese-clip-vit-base-patch16": (
35
+ "https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/config.json"
36
+ ),
37
+ }
38
+
39
+
40
+ class ChineseCLIPTextConfig(PretrainedConfig):
41
+ r"""
42
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a
43
+ Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a
44
+ configuration with the defaults will yield a similar configuration to that of the Chinese CLIP
45
+ [OFA-Sys/chinese-clip-vit-base-patch16](https:
46
+ //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
47
+
48
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
49
+ documentation from [`PretrainedConfig`] for more information.
50
+
51
+
52
+ Args:
53
+ vocab_size (`int`, *optional*, defaults to 30522):
54
+ Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented
55
+ by the `inputs_ids` passed when calling [`ChineseCLIPModel`].
56
+ hidden_size (`int`, *optional*, defaults to 768):
57
+ Dimensionality of the encoder layers and the pooler layer.
58
+ num_hidden_layers (`int`, *optional*, defaults to 12):
59
+ Number of hidden layers in the Transformer encoder.
60
+ num_attention_heads (`int`, *optional*, defaults to 12):
61
+ Number of attention heads for each attention layer in the Transformer encoder.
62
+ intermediate_size (`int`, *optional*, defaults to 3072):
63
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
64
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
65
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
66
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
67
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
68
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
69
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
70
+ The dropout ratio for the attention probabilities.
71
+ max_position_embeddings (`int`, *optional*, defaults to 512):
72
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
73
+ just in case (e.g., 512 or 1024 or 2048).
74
+ type_vocab_size (`int`, *optional*, defaults to 2):
75
+ The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`].
76
+ initializer_range (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
79
+ The epsilon used by the layer normalization layers.
80
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
81
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
82
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
83
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
84
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
85
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
86
+ use_cache (`bool`, *optional*, defaults to `True`):
87
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
88
+ relevant if `config.is_decoder=True`.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel
94
+
95
+ >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
96
+ >>> configuration = ChineseCLIPTextConfig()
97
+
98
+ >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
99
+ >>> model = ChineseCLIPTextModel(configuration)
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config
103
+ ```"""
104
+ model_type = "chinese_clip_text_model"
105
+
106
+ def __init__(
107
+ self,
108
+ vocab_size=30522,
109
+ hidden_size=768,
110
+ num_hidden_layers=12,
111
+ num_attention_heads=12,
112
+ intermediate_size=3072,
113
+ hidden_act="gelu",
114
+ hidden_dropout_prob=0.1,
115
+ attention_probs_dropout_prob=0.1,
116
+ max_position_embeddings=512,
117
+ type_vocab_size=2,
118
+ initializer_range=0.02,
119
+ initializer_factor=1.0,
120
+ layer_norm_eps=1e-12,
121
+ pad_token_id=0,
122
+ position_embedding_type="absolute",
123
+ use_cache=True,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
127
+
128
+ self.vocab_size = vocab_size
129
+ self.hidden_size = hidden_size
130
+ self.num_hidden_layers = num_hidden_layers
131
+ self.num_attention_heads = num_attention_heads
132
+ self.hidden_act = hidden_act
133
+ self.intermediate_size = intermediate_size
134
+ self.hidden_dropout_prob = hidden_dropout_prob
135
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
136
+ self.max_position_embeddings = max_position_embeddings
137
+ self.type_vocab_size = type_vocab_size
138
+ self.initializer_range = initializer_range
139
+ self.initializer_factor = initializer_factor
140
+ self.layer_norm_eps = layer_norm_eps
141
+ self.position_embedding_type = position_embedding_type
142
+ self.use_cache = use_cache
143
+
144
+ @classmethod
145
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
146
+ cls._set_token_in_kwargs(kwargs)
147
+
148
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
149
+
150
+ # get the vision config dict if we are loading from ChineseCLIPConfig
151
+ if config_dict.get("model_type") == "chinese_clip":
152
+ config_dict = config_dict["text_config"]
153
+
154
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
155
+ logger.warning(
156
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
157
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
158
+ )
159
+
160
+ return cls.from_dict(config_dict, **kwargs)
161
+
162
+
163
+ class ChineseCLIPVisionConfig(PretrainedConfig):
164
+ r"""
165
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an
166
+ ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a
167
+ configuration with the defaults will yield a similar configuration to that of the ChineseCLIP
168
+ [OFA-Sys/chinese-clip-vit-base-patch16](https:
169
+ //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
170
+
171
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
172
+ documentation from [`PretrainedConfig`] for more information.
173
+
174
+
175
+ Args:
176
+ hidden_size (`int`, *optional*, defaults to 768):
177
+ Dimensionality of the encoder layers and the pooler layer.
178
+ intermediate_size (`int`, *optional*, defaults to 3072):
179
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
180
+ num_hidden_layers (`int`, *optional*, defaults to 12):
181
+ Number of hidden layers in the Transformer encoder.
182
+ num_attention_heads (`int`, *optional*, defaults to 12):
183
+ Number of attention heads for each attention layer in the Transformer encoder.
184
+ image_size (`int`, *optional*, defaults to 224):
185
+ The size (resolution) of each image.
186
+ patch_size (`int`, *optional*, defaults to 32):
187
+ The size (resolution) of each patch.
188
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
189
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
190
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
191
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
192
+ The epsilon used by the layer normalization layers.
193
+ attention_dropout (`float`, *optional*, defaults to 0.0):
194
+ The dropout ratio for the attention probabilities.
195
+ initializer_range (`float`, *optional*, defaults to 0.02):
196
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
197
+ initializer_factor (`float``, *optional*, defaults to 1):
198
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
199
+ testing).
200
+ Example:
201
+ ```python
202
+ >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel
203
+
204
+ >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
205
+ >>> configuration = ChineseCLIPVisionConfig()
206
+
207
+ >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
208
+ >>> model = ChineseCLIPVisionModel(configuration)
209
+
210
+ >>> # Accessing the model configuration
211
+ >>> configuration = model.config
212
+ ```"""
213
+
214
+ model_type = "chinese_clip_vision_model"
215
+
216
+ def __init__(
217
+ self,
218
+ hidden_size=768,
219
+ intermediate_size=3072,
220
+ projection_dim=512,
221
+ num_hidden_layers=12,
222
+ num_attention_heads=12,
223
+ num_channels=3,
224
+ image_size=224,
225
+ patch_size=32,
226
+ hidden_act="quick_gelu",
227
+ layer_norm_eps=1e-5,
228
+ attention_dropout=0.0,
229
+ initializer_range=0.02,
230
+ initializer_factor=1.0,
231
+ **kwargs,
232
+ ):
233
+ super().__init__(**kwargs)
234
+
235
+ self.hidden_size = hidden_size
236
+ self.intermediate_size = intermediate_size
237
+ self.projection_dim = projection_dim
238
+ self.num_hidden_layers = num_hidden_layers
239
+ self.num_attention_heads = num_attention_heads
240
+ self.num_channels = num_channels
241
+ self.patch_size = patch_size
242
+ self.image_size = image_size
243
+ self.initializer_range = initializer_range
244
+ self.initializer_factor = initializer_factor
245
+ self.attention_dropout = attention_dropout
246
+ self.layer_norm_eps = layer_norm_eps
247
+ self.hidden_act = hidden_act
248
+
249
+ @classmethod
250
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
251
+ cls._set_token_in_kwargs(kwargs)
252
+
253
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
254
+
255
+ # get the vision config dict if we are loading from ChineseCLIPConfig
256
+ if config_dict.get("model_type") == "chinese_clip":
257
+ config_dict = config_dict["vision_config"]
258
+
259
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
260
+ logger.warning(
261
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
262
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
263
+ )
264
+
265
+ return cls.from_dict(config_dict, **kwargs)
266
+
267
+
268
+ class ChineseCLIPConfig(PretrainedConfig):
269
+ r"""
270
+ [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
271
+ to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
272
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
273
+ Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
274
+ architecture.
275
+
276
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
277
+ documentation from [`PretrainedConfig`] for more information.
278
+
279
+ Args:
280
+ text_config (`dict`, *optional*):
281
+ Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
282
+ vision_config (`dict`, *optional*):
283
+ Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
284
+ projection_dim (`int`, *optional*, defaults to 512):
285
+ Dimentionality of text and vision projection layers.
286
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
287
+ The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP
288
+ implementation.
289
+ kwargs (*optional*):
290
+ Dictionary of keyword arguments.
291
+
292
+ Example:
293
+
294
+ ```python
295
+ >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
296
+
297
+ >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
298
+ >>> configuration = ChineseCLIPConfig()
299
+
300
+ >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
301
+ >>> model = ChineseCLIPModel(configuration)
302
+
303
+ >>> # Accessing the model configuration
304
+ >>> configuration = model.config
305
+
306
+ >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
307
+
308
+ >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
309
+ >>> config_text = ChineseCLIPTextConfig()
310
+ >>> config_vision = ChineseCLIPVisionConfig()
311
+
312
+ >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision)
313
+ ```"""
314
+
315
+ model_type = "chinese_clip"
316
+
317
+ def __init__(
318
+ self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
319
+ ):
320
+ # If `_config_dict` exist, we use them for the backward compatibility.
321
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
322
+ # of confusion!).
323
+ text_config_dict = kwargs.pop("text_config_dict", None)
324
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
325
+
326
+ super().__init__(**kwargs)
327
+
328
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
329
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
330
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
331
+ if text_config_dict is not None:
332
+ if text_config is None:
333
+ text_config = {}
334
+
335
+ # This is the complete result when using `text_config_dict`.
336
+ _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
337
+
338
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
339
+ for key, value in _text_config_dict.items():
340
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
341
+ # If specified in `text_config_dict`
342
+ if key in text_config_dict:
343
+ message = (
344
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
345
+ f'The value `text_config_dict["{key}"]` will be used instead.'
346
+ )
347
+ # If inferred from default argument values (just to be super careful)
348
+ else:
349
+ message = (
350
+ f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
351
+ f'The value `text_config["{key}"]` will be overriden.'
352
+ )
353
+ logger.warning(message)
354
+
355
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
356
+ text_config.update(_text_config_dict)
357
+
358
+ if vision_config_dict is not None:
359
+ if vision_config is None:
360
+ vision_config = {}
361
+
362
+ # This is the complete result when using `vision_config_dict`.
363
+ _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
364
+ # convert keys to string instead of integer
365
+ if "id2label" in _vision_config_dict:
366
+ _vision_config_dict["id2label"] = {
367
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
368
+ }
369
+
370
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
371
+ for key, value in _vision_config_dict.items():
372
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
373
+ # If specified in `vision_config_dict`
374
+ if key in vision_config_dict:
375
+ message = (
376
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
377
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
378
+ )
379
+ # If inferred from default argument values (just to be super careful)
380
+ else:
381
+ message = (
382
+ f"`vision_config_dict` is provided which will be used to initialize "
383
+ f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.'
384
+ )
385
+ logger.warning(message)
386
+
387
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
388
+ vision_config.update(_vision_config_dict)
389
+
390
+ if text_config is None:
391
+ text_config = {}
392
+ logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.")
393
+
394
+ if vision_config is None:
395
+ vision_config = {}
396
+ logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
397
+
398
+ self.text_config = ChineseCLIPTextConfig(**text_config)
399
+ self.vision_config = ChineseCLIPVisionConfig(**vision_config)
400
+
401
+ self.projection_dim = projection_dim
402
+ self.logit_scale_init_value = logit_scale_init_value
403
+ self.initializer_factor = 1.0
404
+ self.initializer_range = 0.02
405
+
406
+ @classmethod
407
+ def from_text_vision_configs(
408
+ cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs
409
+ ):
410
+ r"""
411
+ Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and
412
+ Chinese-CLIP vision model configuration. Returns:
413
+ [`ChineseCLIPConfig`]: An instance of a configuration object
414
+ """
415
+
416
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
417
+
418
+
419
+ class ChineseCLIPOnnxConfig(OnnxConfig):
420
+ @property
421
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
422
+ return OrderedDict(
423
+ [
424
+ ("input_ids", {0: "batch", 1: "sequence"}),
425
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
426
+ ("attention_mask", {0: "batch", 1: "sequence"}),
427
+ ]
428
+ )
429
+
430
+ @property
431
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
432
+ return OrderedDict(
433
+ [
434
+ ("logits_per_image", {0: "batch"}),
435
+ ("logits_per_text", {0: "batch"}),
436
+ ("text_embeds", {0: "batch"}),
437
+ ("image_embeds", {0: "batch"}),
438
+ ]
439
+ )
440
+
441
+ @property
442
+ def atol_for_validation(self) -> float:
443
+ return 1e-4
444
+
445
+ def generate_dummy_inputs(
446
+ self,
447
+ processor: "ProcessorMixin",
448
+ batch_size: int = -1,
449
+ seq_length: int = -1,
450
+ framework: Optional["TensorType"] = None,
451
+ ) -> Mapping[str, Any]:
452
+ text_input_dict = super().generate_dummy_inputs(
453
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
454
+ )
455
+ image_input_dict = super().generate_dummy_inputs(
456
+ processor.image_processor, batch_size=batch_size, framework=framework
457
+ )
458
+ return {**text_input_dict, **image_input_dict}
459
+
460
+ @property
461
+ def default_onnx_opset(self) -> int:
462
+ return 14
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+
18
+ import torch
19
+
20
+ from transformers import ChineseCLIPConfig, ChineseCLIPModel
21
+
22
+
23
+ def copy_attn_layer(hf_attn_layer, pt_weights, prefix):
24
+ q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0)
25
+ q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0)
26
+
27
+ out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"]
28
+ out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"]
29
+
30
+ hf_attn_layer.q_proj.weight.data = q_proj
31
+ hf_attn_layer.q_proj.bias.data = q_proj_bias
32
+
33
+ hf_attn_layer.k_proj.weight.data = k_proj
34
+ hf_attn_layer.k_proj.bias.data = k_proj_bias
35
+
36
+ hf_attn_layer.v_proj.weight.data = v_proj
37
+ hf_attn_layer.v_proj.bias.data = v_proj_bias
38
+
39
+ hf_attn_layer.out_proj.weight.data = out_proj_weights
40
+ hf_attn_layer.out_proj.bias.data = out_proj_bias
41
+
42
+
43
+ def copy_mlp(hf_mlp, pt_weights, prefix):
44
+ copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc")
45
+ copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj")
46
+
47
+
48
+ def copy_linear(hf_linear, pt_weights, prefix):
49
+ hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data
50
+ hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data
51
+
52
+
53
+ def copy_layer(hf_layer, pt_weights, prefix):
54
+ # copy layer norms
55
+ copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1")
56
+ copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2")
57
+
58
+ # copy MLP
59
+ copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp")
60
+
61
+ # copy attn
62
+ copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn")
63
+
64
+
65
+ def copy_layers(hf_layers, pt_weights, prefix):
66
+ for layer_id, hf_layer in enumerate(hf_layers):
67
+ copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}")
68
+
69
+
70
+ def copy_text_model_and_projection(hf_model, pt_weights):
71
+ # copy projection
72
+ hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T
73
+
74
+ # copy text encoder
75
+ for name, param in hf_model.text_model.named_parameters():
76
+ param.data = pt_weights[f"bert.{name}"].data
77
+
78
+
79
+ def copy_vision_model_and_projection(hf_model, pt_weights):
80
+ # copy projection
81
+ hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T
82
+
83
+ # copy layer norms
84
+ copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre")
85
+ copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post")
86
+
87
+ # copy embeddings
88
+ hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data
89
+ hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data
90
+ hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data
91
+
92
+ # copy encoder
93
+ copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks")
94
+
95
+
96
+ @torch.no_grad()
97
+ def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
98
+ """
99
+ Copy/paste/tweak model's weights to transformers design.
100
+ """
101
+
102
+ assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size."
103
+ config = ChineseCLIPConfig.from_pretrained(config_path)
104
+
105
+ hf_model = ChineseCLIPModel(config).eval()
106
+
107
+ pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
108
+ pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()}
109
+
110
+ copy_text_model_and_projection(hf_model, pt_weights)
111
+ copy_vision_model_and_projection(hf_model, pt_weights)
112
+ hf_model.logit_scale.data = pt_weights["logit_scale"].data
113
+
114
+ hf_model.save_pretrained(pytorch_dump_folder_path)
115
+
116
+
117
+ if __name__ == "__main__":
118
+ parser = argparse.ArgumentParser()
119
+ parser.add_argument(
120
+ "--pytorch_dump_folder_path",
121
+ default=None,
122
+ type=str,
123
+ help="Path to the output folder storing converted hf PyTorch model.",
124
+ )
125
+ parser.add_argument(
126
+ "--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint."
127
+ )
128
+ parser.add_argument(
129
+ "--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert."
130
+ )
131
+ args = parser.parse_args()
132
+
133
+ convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
134
+ print("The conversion is finished!")
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Chinese-CLIP."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_chinese_clip import ChineseCLIPImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ChineseCLIPImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Chinese-CLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ if is_vision_available():
47
+ import PIL
48
+
49
+
50
+ class ChineseCLIPImageProcessor(BaseImageProcessor):
51
+ r"""
52
+ Constructs a Chinese-CLIP image processor.
53
+
54
+ Args:
55
+ do_resize (`bool`, *optional*, defaults to `True`):
56
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
57
+ `do_resize` in the `preprocess` method.
58
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
59
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
60
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
61
+ method.
62
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
63
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
64
+ do_center_crop (`bool`, *optional*, defaults to `True`):
65
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
66
+ `preprocess` method.
67
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
68
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
69
+ method.
70
+ do_rescale (`bool`, *optional*, defaults to `True`):
71
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
72
+ the `preprocess` method.
73
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
74
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
75
+ method.
76
+ do_normalize:
77
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
78
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
79
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
80
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
81
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
82
+ Image standard deviation.
83
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ """
87
+
88
+ model_input_names = ["pixel_values"]
89
+
90
+ def __init__(
91
+ self,
92
+ do_resize: bool = True,
93
+ size: Dict[str, int] = None,
94
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
95
+ do_center_crop: bool = True,
96
+ crop_size: Dict[str, int] = None,
97
+ do_rescale: bool = True,
98
+ rescale_factor: Union[int, float] = 1 / 255,
99
+ do_normalize: bool = True,
100
+ image_mean: Optional[Union[float, List[float]]] = None,
101
+ image_std: Optional[Union[float, List[float]]] = None,
102
+ do_convert_rgb: bool = True,
103
+ **kwargs,
104
+ ) -> None:
105
+ super().__init__(**kwargs)
106
+ size = size if size is not None else {"shortest_edge": 224}
107
+ size = get_size_dict(size, default_to_square=False)
108
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
109
+ crop_size = get_size_dict(crop_size)
110
+
111
+ self.do_resize = do_resize
112
+ self.size = size
113
+ self.resample = resample
114
+ self.do_center_crop = do_center_crop
115
+ self.crop_size = crop_size
116
+ self.do_rescale = do_rescale
117
+ self.rescale_factor = rescale_factor
118
+ self.do_normalize = do_normalize
119
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
120
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
121
+ self.do_convert_rgb = do_convert_rgb
122
+
123
+ def resize(
124
+ self,
125
+ image: np.ndarray,
126
+ size: Dict[str, int],
127
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
128
+ data_format: Optional[Union[str, ChannelDimension]] = None,
129
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
130
+ **kwargs,
131
+ ) -> np.ndarray:
132
+ """
133
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
134
+ resized to keep the input aspect ratio.
135
+
136
+ Args:
137
+ image (`np.ndarray`):
138
+ Image to resize.
139
+ size (`Dict[str, int]`):
140
+ Size of the output image.
141
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
142
+ Resampling filter to use when resiizing the image.
143
+ data_format (`str` or `ChannelDimension`, *optional*):
144
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
145
+ input_data_format (`ChannelDimension` or `str`, *optional*):
146
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
147
+ image.
148
+ """
149
+ size = get_size_dict(size, default_to_square=False)
150
+ output_size = get_resize_output_image_size(
151
+ image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
152
+ )
153
+ return resize(
154
+ image,
155
+ size=output_size,
156
+ resample=resample,
157
+ data_format=data_format,
158
+ input_data_format=input_data_format,
159
+ **kwargs,
160
+ )
161
+
162
+ def preprocess(
163
+ self,
164
+ images: ImageInput,
165
+ do_resize: bool = None,
166
+ size: Dict[str, int] = None,
167
+ resample: PILImageResampling = None,
168
+ do_center_crop: bool = None,
169
+ crop_size: int = None,
170
+ do_rescale: bool = None,
171
+ rescale_factor: float = None,
172
+ do_normalize: bool = None,
173
+ image_mean: Optional[Union[float, List[float]]] = None,
174
+ image_std: Optional[Union[float, List[float]]] = None,
175
+ do_convert_rgb: bool = None,
176
+ return_tensors: Optional[Union[str, TensorType]] = None,
177
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
178
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
179
+ **kwargs,
180
+ ) -> PIL.Image.Image:
181
+ """
182
+ Preprocess an image or batch of images.
183
+
184
+ Args:
185
+ images (`ImageInput`):
186
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
187
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
188
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
189
+ Whether to resize the image.
190
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
191
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
192
+ the longest edge resized to keep the input aspect ratio.
193
+ resample (`int`, *optional*, defaults to `self.resample`):
194
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
195
+ has an effect if `do_resize` is set to `True`.
196
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
197
+ Whether to center crop the image.
198
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
199
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
200
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
201
+ Whether to rescale the image.
202
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
203
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
204
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
205
+ Whether to normalize the image.
206
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
207
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
208
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
209
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
210
+ `True`.
211
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
212
+ Whether to convert the image to RGB.
213
+ return_tensors (`str` or `TensorType`, *optional*):
214
+ The type of tensors to return. Can be one of:
215
+ - Unset: Return a list of `np.ndarray`.
216
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
217
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
218
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
219
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
220
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
221
+ The channel dimension format for the output image. Can be one of:
222
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
223
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
224
+ - Unset: Use the channel dimension format of the input image.
225
+ input_data_format (`ChannelDimension` or `str`, *optional*):
226
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
227
+ from the input image. Can be one of:
228
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
229
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
230
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
231
+ """
232
+ do_resize = do_resize if do_resize is not None else self.do_resize
233
+ size = size if size is not None else self.size
234
+ size = get_size_dict(size, default_to_square=False)
235
+ resample = resample if resample is not None else self.resample
236
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
237
+ crop_size = crop_size if crop_size is not None else self.crop_size
238
+ crop_size = get_size_dict(crop_size)
239
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
240
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
241
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
242
+ image_mean = image_mean if image_mean is not None else self.image_mean
243
+ image_std = image_std if image_std is not None else self.image_std
244
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
245
+
246
+ images = make_list_of_images(images)
247
+
248
+ if not valid_images(images):
249
+ raise ValueError(
250
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
251
+ "torch.Tensor, tf.Tensor or jax.ndarray."
252
+ )
253
+
254
+ if do_resize and size is None:
255
+ raise ValueError("Size must be specified if do_resize is True.")
256
+
257
+ if do_center_crop and crop_size is None:
258
+ raise ValueError("Crop size must be specified if do_center_crop is True.")
259
+
260
+ if do_rescale and rescale_factor is None:
261
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
262
+
263
+ if do_normalize and (image_mean is None or image_std is None):
264
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
265
+
266
+ # PIL RGBA images are converted to RGB
267
+ if do_convert_rgb:
268
+ images = [convert_to_rgb(image) for image in images]
269
+
270
+ # All transformations expect numpy arrays.
271
+ images = [to_numpy_array(image) for image in images]
272
+
273
+ if is_scaled_image(images[0]) and do_rescale:
274
+ logger.warning_once(
275
+ "It looks like you are trying to rescale already rescaled images. If the input"
276
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
277
+ )
278
+
279
+ if input_data_format is None:
280
+ # We assume that all images have the same channel dimension format.
281
+ input_data_format = infer_channel_dimension_format(images[0])
282
+
283
+ if do_resize:
284
+ images = [
285
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
286
+ for image in images
287
+ ]
288
+
289
+ if do_center_crop:
290
+ images = [
291
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
292
+ ]
293
+
294
+ if do_rescale:
295
+ images = [
296
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
297
+ for image in images
298
+ ]
299
+
300
+ if do_normalize:
301
+ images = [
302
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
303
+ for image in images
304
+ ]
305
+
306
+ images = [
307
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
308
+ ]
309
+
310
+ data = {"pixel_values": images}
311
+ return BatchFeature(data=data, tensor_type=return_tensors)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py ADDED
@@ -0,0 +1,1581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Chinese-CLIP model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPooling,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_code_sample_docstrings,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16"
49
+ _CONFIG_FOR_DOC = "ChineseCLIPConfig"
50
+
51
+ CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
52
+ "OFA-Sys/chinese-clip-vit-base-patch16",
53
+ # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip
54
+ ]
55
+
56
+
57
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
58
+ # Copied from transformers.models.clip.modeling_clip.contrastive_loss
59
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
60
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
61
+
62
+
63
+ def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor:
64
+ caption_loss = contrastive_loss(similarity)
65
+ image_loss = contrastive_loss(similarity.t())
66
+ return (caption_loss + image_loss) / 2.0
67
+
68
+
69
+ @dataclass
70
+ class ChineseCLIPOutput(ModelOutput):
71
+ """
72
+ Args:
73
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
74
+ Contrastive loss for image-text similarity.
75
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
76
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
77
+ similarity scores.
78
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
79
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
80
+ similarity scores.
81
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
82
+ The text embeddings obtained by applying the projection layer to the pooled output of
83
+ [`ChineseCLIPTextModel`].
84
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
85
+ The image embeddings obtained by applying the projection layer to the pooled output of
86
+ [`ChineseCLIPVisionModel`].
87
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
88
+ The output of the [`ChineseCLIPTextModel`].
89
+ vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
90
+ The output of the [`ChineseCLIPVisionModel`].
91
+ """
92
+
93
+ loss: Optional[torch.FloatTensor] = None
94
+ logits_per_image: torch.FloatTensor = None
95
+ logits_per_text: torch.FloatTensor = None
96
+ text_embeds: torch.FloatTensor = None
97
+ image_embeds: torch.FloatTensor = None
98
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
99
+ vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
100
+
101
+ def to_tuple(self) -> Tuple[Any]:
102
+ return tuple(
103
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
104
+ for k in self.keys()
105
+ )
106
+
107
+
108
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText
109
+ class ChineseCLIPTextEmbeddings(nn.Module):
110
+ """Construct the embeddings from word, position and token_type embeddings."""
111
+
112
+ def __init__(self, config):
113
+ super().__init__()
114
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
115
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
116
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
117
+
118
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
119
+ # any TensorFlow checkpoint file
120
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
121
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
122
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
123
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
124
+ self.register_buffer(
125
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
126
+ )
127
+ self.register_buffer(
128
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
129
+ )
130
+
131
+ def forward(
132
+ self,
133
+ input_ids: Optional[torch.LongTensor] = None,
134
+ token_type_ids: Optional[torch.LongTensor] = None,
135
+ position_ids: Optional[torch.LongTensor] = None,
136
+ inputs_embeds: Optional[torch.FloatTensor] = None,
137
+ past_key_values_length: int = 0,
138
+ ) -> torch.Tensor:
139
+ if input_ids is not None:
140
+ input_shape = input_ids.size()
141
+ else:
142
+ input_shape = inputs_embeds.size()[:-1]
143
+
144
+ seq_length = input_shape[1]
145
+
146
+ if position_ids is None:
147
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
148
+
149
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
150
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
151
+ # issue #5664
152
+ if token_type_ids is None:
153
+ if hasattr(self, "token_type_ids"):
154
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
155
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
156
+ token_type_ids = buffered_token_type_ids_expanded
157
+ else:
158
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
159
+
160
+ if inputs_embeds is None:
161
+ inputs_embeds = self.word_embeddings(input_ids)
162
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
163
+
164
+ embeddings = inputs_embeds + token_type_embeddings
165
+ if self.position_embedding_type == "absolute":
166
+ position_embeddings = self.position_embeddings(position_ids)
167
+ embeddings += position_embeddings
168
+ embeddings = self.LayerNorm(embeddings)
169
+ embeddings = self.dropout(embeddings)
170
+ return embeddings
171
+
172
+
173
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP
174
+ class ChineseCLIPVisionEmbeddings(nn.Module):
175
+ def __init__(self, config: ChineseCLIPVisionConfig):
176
+ super().__init__()
177
+ self.config = config
178
+ self.embed_dim = config.hidden_size
179
+ self.image_size = config.image_size
180
+ self.patch_size = config.patch_size
181
+
182
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
183
+
184
+ self.patch_embedding = nn.Conv2d(
185
+ in_channels=config.num_channels,
186
+ out_channels=self.embed_dim,
187
+ kernel_size=self.patch_size,
188
+ stride=self.patch_size,
189
+ bias=False,
190
+ )
191
+
192
+ self.num_patches = (self.image_size // self.patch_size) ** 2
193
+ self.num_positions = self.num_patches + 1
194
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
195
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
196
+
197
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
198
+ batch_size = pixel_values.shape[0]
199
+ target_dtype = self.patch_embedding.weight.dtype
200
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
201
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
202
+
203
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
204
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
205
+ embeddings = embeddings + self.position_embedding(self.position_ids)
206
+ return embeddings
207
+
208
+
209
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText
210
+ class ChineseCLIPTextSelfAttention(nn.Module):
211
+ def __init__(self, config, position_embedding_type=None):
212
+ super().__init__()
213
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
214
+ raise ValueError(
215
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
216
+ f"heads ({config.num_attention_heads})"
217
+ )
218
+
219
+ self.num_attention_heads = config.num_attention_heads
220
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
221
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
222
+
223
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
224
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
225
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
226
+
227
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
228
+ self.position_embedding_type = position_embedding_type or getattr(
229
+ config, "position_embedding_type", "absolute"
230
+ )
231
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
232
+ self.max_position_embeddings = config.max_position_embeddings
233
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
234
+
235
+ self.is_decoder = config.is_decoder
236
+
237
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
238
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
239
+ x = x.view(new_x_shape)
240
+ return x.permute(0, 2, 1, 3)
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ attention_mask: Optional[torch.FloatTensor] = None,
246
+ head_mask: Optional[torch.FloatTensor] = None,
247
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
248
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
249
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
250
+ output_attentions: Optional[bool] = False,
251
+ ) -> Tuple[torch.Tensor]:
252
+ mixed_query_layer = self.query(hidden_states)
253
+
254
+ # If this is instantiated as a cross-attention module, the keys
255
+ # and values come from an encoder; the attention mask needs to be
256
+ # such that the encoder's padding tokens are not attended to.
257
+ is_cross_attention = encoder_hidden_states is not None
258
+
259
+ if is_cross_attention and past_key_value is not None:
260
+ # reuse k,v, cross_attentions
261
+ key_layer = past_key_value[0]
262
+ value_layer = past_key_value[1]
263
+ attention_mask = encoder_attention_mask
264
+ elif is_cross_attention:
265
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
266
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
267
+ attention_mask = encoder_attention_mask
268
+ elif past_key_value is not None:
269
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
270
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
271
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
272
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
273
+ else:
274
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
275
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
276
+
277
+ query_layer = self.transpose_for_scores(mixed_query_layer)
278
+
279
+ use_cache = past_key_value is not None
280
+ if self.is_decoder:
281
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
282
+ # Further calls to cross_attention layer can then reuse all cross-attention
283
+ # key/value_states (first "if" case)
284
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
285
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
286
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
287
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
288
+ past_key_value = (key_layer, value_layer)
289
+
290
+ # Take the dot product between "query" and "key" to get the raw attention scores.
291
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
292
+
293
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
294
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
295
+ if use_cache:
296
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
297
+ -1, 1
298
+ )
299
+ else:
300
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+
304
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
305
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
306
+
307
+ if self.position_embedding_type == "relative_key":
308
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
309
+ attention_scores = attention_scores + relative_position_scores
310
+ elif self.position_embedding_type == "relative_key_query":
311
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
312
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
313
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
314
+
315
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
316
+ if attention_mask is not None:
317
+ # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function)
318
+ attention_scores = attention_scores + attention_mask
319
+
320
+ # Normalize the attention scores to probabilities.
321
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
322
+
323
+ # This is actually dropping out entire tokens to attend to, which might
324
+ # seem a bit unusual, but is taken from the original Transformer paper.
325
+ attention_probs = self.dropout(attention_probs)
326
+
327
+ # Mask heads if we want to
328
+ if head_mask is not None:
329
+ attention_probs = attention_probs * head_mask
330
+
331
+ context_layer = torch.matmul(attention_probs, value_layer)
332
+
333
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
334
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
335
+ context_layer = context_layer.view(new_context_layer_shape)
336
+
337
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
338
+
339
+ if self.is_decoder:
340
+ outputs = outputs + (past_key_value,)
341
+ return outputs
342
+
343
+
344
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText
345
+ class ChineseCLIPTextSelfOutput(nn.Module):
346
+ def __init__(self, config):
347
+ super().__init__()
348
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
349
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
350
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
351
+
352
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
353
+ hidden_states = self.dense(hidden_states)
354
+ hidden_states = self.dropout(hidden_states)
355
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
356
+ return hidden_states
357
+
358
+
359
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText
360
+ class ChineseCLIPTextAttention(nn.Module):
361
+ def __init__(self, config, position_embedding_type=None):
362
+ super().__init__()
363
+ self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type)
364
+ self.output = ChineseCLIPTextSelfOutput(config)
365
+ self.pruned_heads = set()
366
+
367
+ def prune_heads(self, heads):
368
+ if len(heads) == 0:
369
+ return
370
+ heads, index = find_pruneable_heads_and_indices(
371
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
372
+ )
373
+
374
+ # Prune linear layers
375
+ self.self.query = prune_linear_layer(self.self.query, index)
376
+ self.self.key = prune_linear_layer(self.self.key, index)
377
+ self.self.value = prune_linear_layer(self.self.value, index)
378
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
379
+
380
+ # Update hyper params and store pruned heads
381
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
382
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
383
+ self.pruned_heads = self.pruned_heads.union(heads)
384
+
385
+ def forward(
386
+ self,
387
+ hidden_states: torch.Tensor,
388
+ attention_mask: Optional[torch.FloatTensor] = None,
389
+ head_mask: Optional[torch.FloatTensor] = None,
390
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
391
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
392
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
393
+ output_attentions: Optional[bool] = False,
394
+ ) -> Tuple[torch.Tensor]:
395
+ self_outputs = self.self(
396
+ hidden_states,
397
+ attention_mask,
398
+ head_mask,
399
+ encoder_hidden_states,
400
+ encoder_attention_mask,
401
+ past_key_value,
402
+ output_attentions,
403
+ )
404
+ attention_output = self.output(self_outputs[0], hidden_states)
405
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
406
+ return outputs
407
+
408
+
409
+ class ChineseCLIPVisionAttention(nn.Module):
410
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
411
+
412
+ def __init__(self, config):
413
+ super().__init__()
414
+ self.config = config
415
+ self.embed_dim = config.hidden_size
416
+ self.num_heads = config.num_attention_heads
417
+ self.head_dim = self.embed_dim // self.num_heads
418
+ if self.head_dim * self.num_heads != self.embed_dim:
419
+ raise ValueError(
420
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
421
+ f" {self.num_heads})."
422
+ )
423
+ self.scale = self.head_dim**-0.5
424
+ self.dropout = config.attention_dropout
425
+
426
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
427
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
428
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
429
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
430
+
431
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
432
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
433
+
434
+ def forward(
435
+ self,
436
+ hidden_states: torch.Tensor,
437
+ output_attentions: Optional[bool] = False,
438
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
439
+ """Input shape: Batch x Time x Channel"""
440
+
441
+ bsz, tgt_len, embed_dim = hidden_states.size()
442
+
443
+ # get query proj
444
+ query_states = self.q_proj(hidden_states) * self.scale
445
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
446
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
447
+
448
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
449
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
450
+ key_states = key_states.view(*proj_shape)
451
+ value_states = value_states.view(*proj_shape)
452
+
453
+ src_len = key_states.size(1)
454
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
455
+
456
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
457
+ raise ValueError(
458
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
459
+ f" {attn_weights.size()}"
460
+ )
461
+
462
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
463
+
464
+ if output_attentions:
465
+ # this operation is a bit akward, but it's required to
466
+ # make sure that attn_weights keeps its gradient.
467
+ # In order to do so, attn_weights have to reshaped
468
+ # twice and have to be reused in the following
469
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
470
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
471
+ else:
472
+ attn_weights_reshaped = None
473
+
474
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
475
+
476
+ attn_output = torch.bmm(attn_probs, value_states)
477
+
478
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
479
+ raise ValueError(
480
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
481
+ f" {attn_output.size()}"
482
+ )
483
+
484
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
485
+ attn_output = attn_output.transpose(1, 2)
486
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
487
+
488
+ attn_output = self.out_proj(attn_output)
489
+
490
+ return attn_output, attn_weights_reshaped
491
+
492
+
493
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText
494
+ class ChineseCLIPTextIntermediate(nn.Module):
495
+ def __init__(self, config):
496
+ super().__init__()
497
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
498
+ if isinstance(config.hidden_act, str):
499
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
500
+ else:
501
+ self.intermediate_act_fn = config.hidden_act
502
+
503
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
504
+ hidden_states = self.dense(hidden_states)
505
+ hidden_states = self.intermediate_act_fn(hidden_states)
506
+ return hidden_states
507
+
508
+
509
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText
510
+ class ChineseCLIPTextOutput(nn.Module):
511
+ def __init__(self, config):
512
+ super().__init__()
513
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
514
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
515
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
516
+
517
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
518
+ hidden_states = self.dense(hidden_states)
519
+ hidden_states = self.dropout(hidden_states)
520
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
521
+ return hidden_states
522
+
523
+
524
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision
525
+ class ChineseCLIPVisionMLP(nn.Module):
526
+ def __init__(self, config):
527
+ super().__init__()
528
+ self.config = config
529
+ self.activation_fn = ACT2FN[config.hidden_act]
530
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
531
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
532
+
533
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
534
+ hidden_states = self.fc1(hidden_states)
535
+ hidden_states = self.activation_fn(hidden_states)
536
+ hidden_states = self.fc2(hidden_states)
537
+ return hidden_states
538
+
539
+
540
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText
541
+ class ChineseCLIPTextLayer(nn.Module):
542
+ def __init__(self, config):
543
+ super().__init__()
544
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
545
+ self.seq_len_dim = 1
546
+ self.attention = ChineseCLIPTextAttention(config)
547
+ self.is_decoder = config.is_decoder
548
+ self.add_cross_attention = config.add_cross_attention
549
+ if self.add_cross_attention:
550
+ if not self.is_decoder:
551
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
552
+ self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute")
553
+ self.intermediate = ChineseCLIPTextIntermediate(config)
554
+ self.output = ChineseCLIPTextOutput(config)
555
+
556
+ def forward(
557
+ self,
558
+ hidden_states: torch.Tensor,
559
+ attention_mask: Optional[torch.FloatTensor] = None,
560
+ head_mask: Optional[torch.FloatTensor] = None,
561
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
562
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
563
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
564
+ output_attentions: Optional[bool] = False,
565
+ ) -> Tuple[torch.Tensor]:
566
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
567
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
568
+ self_attention_outputs = self.attention(
569
+ hidden_states,
570
+ attention_mask,
571
+ head_mask,
572
+ output_attentions=output_attentions,
573
+ past_key_value=self_attn_past_key_value,
574
+ )
575
+ attention_output = self_attention_outputs[0]
576
+
577
+ # if decoder, the last output is tuple of self-attn cache
578
+ if self.is_decoder:
579
+ outputs = self_attention_outputs[1:-1]
580
+ present_key_value = self_attention_outputs[-1]
581
+ else:
582
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
583
+
584
+ cross_attn_present_key_value = None
585
+ if self.is_decoder and encoder_hidden_states is not None:
586
+ if not hasattr(self, "crossattention"):
587
+ raise ValueError(
588
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
589
+ " by setting `config.add_cross_attention=True`"
590
+ )
591
+
592
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
593
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
594
+ cross_attention_outputs = self.crossattention(
595
+ attention_output,
596
+ attention_mask,
597
+ head_mask,
598
+ encoder_hidden_states,
599
+ encoder_attention_mask,
600
+ cross_attn_past_key_value,
601
+ output_attentions,
602
+ )
603
+ attention_output = cross_attention_outputs[0]
604
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
605
+
606
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
607
+ cross_attn_present_key_value = cross_attention_outputs[-1]
608
+ present_key_value = present_key_value + cross_attn_present_key_value
609
+
610
+ layer_output = apply_chunking_to_forward(
611
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
612
+ )
613
+ outputs = (layer_output,) + outputs
614
+
615
+ # if decoder, return the attn key/values as the last output
616
+ if self.is_decoder:
617
+ outputs = outputs + (present_key_value,)
618
+
619
+ return outputs
620
+
621
+ def feed_forward_chunk(self, attention_output):
622
+ intermediate_output = self.intermediate(attention_output)
623
+ layer_output = self.output(intermediate_output, attention_output)
624
+ return layer_output
625
+
626
+
627
+ class ChineseCLIPVisionLayer(nn.Module):
628
+ def __init__(self, config: ChineseCLIPConfig):
629
+ super().__init__()
630
+ self.embed_dim = config.hidden_size
631
+ self.self_attn = ChineseCLIPVisionAttention(config)
632
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
633
+ self.mlp = ChineseCLIPVisionMLP(config)
634
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
635
+
636
+ def forward(
637
+ self,
638
+ hidden_states: torch.Tensor,
639
+ output_attentions: Optional[bool] = False,
640
+ ) -> Tuple[torch.FloatTensor]:
641
+ """
642
+ Args:
643
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
644
+ output_attentions (`bool`, *optional*):
645
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
646
+ returned tensors for more detail.
647
+ """
648
+ residual = hidden_states
649
+
650
+ hidden_states = self.layer_norm1(hidden_states)
651
+ hidden_states, attn_weights = self.self_attn(
652
+ hidden_states=hidden_states,
653
+ output_attentions=output_attentions,
654
+ )
655
+ hidden_states = residual + hidden_states
656
+
657
+ residual = hidden_states
658
+ hidden_states = self.layer_norm2(hidden_states)
659
+ hidden_states = self.mlp(hidden_states)
660
+ hidden_states = residual + hidden_states
661
+
662
+ outputs = (hidden_states,)
663
+
664
+ if output_attentions:
665
+ outputs += (attn_weights,)
666
+
667
+ return outputs
668
+
669
+
670
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText
671
+ class ChineseCLIPTextPooler(nn.Module):
672
+ def __init__(self, config):
673
+ super().__init__()
674
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
675
+ self.activation = nn.Tanh()
676
+
677
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
678
+ # We "pool" the model by simply taking the hidden state corresponding
679
+ # to the first token.
680
+ first_token_tensor = hidden_states[:, 0]
681
+ pooled_output = self.dense(first_token_tensor)
682
+ pooled_output = self.activation(pooled_output)
683
+ return pooled_output
684
+
685
+
686
+ class ChineseCLIPPreTrainedModel(PreTrainedModel):
687
+ """
688
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
689
+ models.
690
+ """
691
+
692
+ config_class = ChineseCLIPConfig
693
+ base_model_prefix = "chinese_clip"
694
+ supports_gradient_checkpointing = True
695
+
696
+ def _init_weights(self, module):
697
+ """Initialize the weights"""
698
+ factor = self.config.initializer_factor
699
+ if isinstance(module, ChineseCLIPVisionEmbeddings):
700
+ factor = self.config.initializer_factor
701
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
702
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
703
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
704
+ elif isinstance(module, ChineseCLIPTextEmbeddings):
705
+ nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
706
+ nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
707
+ nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
708
+ for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
709
+ if embedding.padding_idx is not None:
710
+ embedding.weight.data[embedding.padding_idx].zero_()
711
+ elif isinstance(module, ChineseCLIPVisionAttention):
712
+ factor = self.config.initializer_factor
713
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
714
+ out_proj_std = (module.embed_dim**-0.5) * factor
715
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
716
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
717
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
718
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
719
+ elif isinstance(module, ChineseCLIPVisionMLP):
720
+ factor = self.config.initializer_factor
721
+ in_proj_std = (
722
+ (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
723
+ )
724
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
725
+ nn.init.normal_(module.fc1.weight, std=fc_std)
726
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
727
+ elif isinstance(module, ChineseCLIPModel):
728
+ nn.init.normal_(
729
+ module.text_projection.weight,
730
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
731
+ )
732
+ nn.init.normal_(
733
+ module.visual_projection.weight,
734
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
735
+ )
736
+
737
+ if isinstance(module, nn.LayerNorm):
738
+ module.bias.data.zero_()
739
+ module.weight.data.fill_(1.0)
740
+ if isinstance(module, nn.Linear):
741
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
742
+ if module.bias is not None:
743
+ module.bias.data.zero_()
744
+
745
+ def _set_gradient_checkpointing(self, module, value=False):
746
+ if isinstance(module, ChineseCLIPVisionEncoder) or isinstance(module, ChineseCLIPTextEncoder):
747
+ module.gradient_checkpointing = value
748
+
749
+
750
+ CHINESE_CLIP_START_DOCSTRING = r"""
751
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
752
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
753
+ behavior.
754
+
755
+ Parameters:
756
+ config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.
757
+ Initializing with a config file does not load the weights associated with the model, only the
758
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
759
+ """
760
+
761
+ CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r"""
762
+ Args:
763
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
764
+ Indices of input sequence tokens in the vocabulary.
765
+
766
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
767
+ [`PreTrainedTokenizer.__call__`] for details.
768
+
769
+ [What are input IDs?](../glossary#input-ids)
770
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
771
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
772
+
773
+ - 1 for tokens that are **not masked**,
774
+ - 0 for tokens that are **masked**.
775
+
776
+ [What are attention masks?](../glossary#attention-mask)
777
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
778
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
779
+ 1]`:
780
+
781
+ - 0 corresponds to a *sentence A* token,
782
+ - 1 corresponds to a *sentence B* token.
783
+
784
+ [What are token type IDs?](../glossary#token-type-ids)
785
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
786
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
787
+ config.max_position_embeddings - 1]`.
788
+
789
+ [What are position IDs?](../glossary#position-ids)
790
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
791
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
792
+
793
+ - 1 indicates the head is **not masked**,
794
+ - 0 indicates the head is **masked**.
795
+
796
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
797
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
798
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
799
+ model's internal embedding lookup matrix.
800
+ output_attentions (`bool`, *optional*):
801
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
802
+ tensors for more detail.
803
+ output_hidden_states (`bool`, *optional*):
804
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
805
+ more detail.
806
+ return_dict (`bool`, *optional*):
807
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
808
+ """
809
+
810
+ CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r"""
811
+ Args:
812
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
813
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
814
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
815
+ output_attentions (`bool`, *optional*):
816
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
817
+ tensors for more detail.
818
+ output_hidden_states (`bool`, *optional*):
819
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
820
+ more detail.
821
+ return_dict (`bool`, *optional*):
822
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
823
+ """
824
+
825
+ CHINESE_CLIP_INPUTS_DOCSTRING = r"""
826
+ Args:
827
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
828
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
829
+ it.
830
+
831
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
832
+ [`PreTrainedTokenizer.__call__`] for details.
833
+
834
+ [What are input IDs?](../glossary#input-ids)
835
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
836
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
837
+
838
+ - 1 for tokens that are **not masked**,
839
+ - 0 for tokens that are **masked**.
840
+
841
+ [What are attention masks?](../glossary#attention-mask)
842
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
843
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
844
+ 1]`:
845
+
846
+ - 0 corresponds to a *sentence A* token,
847
+ - 1 corresponds to a *sentence B* token.
848
+
849
+ [What are token type IDs?](../glossary#token-type-ids)
850
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
851
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
852
+ config.max_position_embeddings - 1]`.
853
+
854
+ [What are position IDs?](../glossary#position-ids)
855
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
856
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
857
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
858
+ return_loss (`bool`, *optional*):
859
+ Whether or not to return the contrastive loss.
860
+ output_attentions (`bool`, *optional*):
861
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
862
+ tensors for more detail.
863
+ output_hidden_states (`bool`, *optional*):
864
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
865
+ more detail.
866
+ return_dict (`bool`, *optional*):
867
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
868
+ """
869
+
870
+
871
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText
872
+ class ChineseCLIPTextEncoder(nn.Module):
873
+ def __init__(self, config):
874
+ super().__init__()
875
+ self.config = config
876
+ self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)])
877
+ self.gradient_checkpointing = False
878
+
879
+ def forward(
880
+ self,
881
+ hidden_states: torch.Tensor,
882
+ attention_mask: Optional[torch.FloatTensor] = None,
883
+ head_mask: Optional[torch.FloatTensor] = None,
884
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
885
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
886
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
887
+ use_cache: Optional[bool] = None,
888
+ output_attentions: Optional[bool] = False,
889
+ output_hidden_states: Optional[bool] = False,
890
+ return_dict: Optional[bool] = True,
891
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
892
+ all_hidden_states = () if output_hidden_states else None
893
+ all_self_attentions = () if output_attentions else None
894
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
895
+
896
+ if self.gradient_checkpointing and self.training:
897
+ if use_cache:
898
+ logger.warning_once(
899
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
900
+ )
901
+ use_cache = False
902
+
903
+ next_decoder_cache = () if use_cache else None
904
+ for i, layer_module in enumerate(self.layer):
905
+ if output_hidden_states:
906
+ all_hidden_states = all_hidden_states + (hidden_states,)
907
+
908
+ layer_head_mask = head_mask[i] if head_mask is not None else None
909
+ past_key_value = past_key_values[i] if past_key_values is not None else None
910
+
911
+ if self.gradient_checkpointing and self.training:
912
+
913
+ def create_custom_forward(module):
914
+ def custom_forward(*inputs):
915
+ return module(*inputs, past_key_value, output_attentions)
916
+
917
+ return custom_forward
918
+
919
+ layer_outputs = torch.utils.checkpoint.checkpoint(
920
+ create_custom_forward(layer_module),
921
+ hidden_states,
922
+ attention_mask,
923
+ layer_head_mask,
924
+ encoder_hidden_states,
925
+ encoder_attention_mask,
926
+ )
927
+ else:
928
+ layer_outputs = layer_module(
929
+ hidden_states,
930
+ attention_mask,
931
+ layer_head_mask,
932
+ encoder_hidden_states,
933
+ encoder_attention_mask,
934
+ past_key_value,
935
+ output_attentions,
936
+ )
937
+
938
+ hidden_states = layer_outputs[0]
939
+ if use_cache:
940
+ next_decoder_cache += (layer_outputs[-1],)
941
+ if output_attentions:
942
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
943
+ if self.config.add_cross_attention:
944
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
945
+
946
+ if output_hidden_states:
947
+ all_hidden_states = all_hidden_states + (hidden_states,)
948
+
949
+ if not return_dict:
950
+ return tuple(
951
+ v
952
+ for v in [
953
+ hidden_states,
954
+ next_decoder_cache,
955
+ all_hidden_states,
956
+ all_self_attentions,
957
+ all_cross_attentions,
958
+ ]
959
+ if v is not None
960
+ )
961
+ return BaseModelOutputWithPastAndCrossAttentions(
962
+ last_hidden_state=hidden_states,
963
+ past_key_values=next_decoder_cache,
964
+ hidden_states=all_hidden_states,
965
+ attentions=all_self_attentions,
966
+ cross_attentions=all_cross_attentions,
967
+ )
968
+
969
+
970
+ class ChineseCLIPVisionEncoder(nn.Module):
971
+ """
972
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
973
+ [`ChineseCLIPVisionEncoderLayer`].
974
+
975
+ Args:
976
+ config: ChineseCLIPConfig
977
+ """
978
+
979
+ def __init__(self, config: ChineseCLIPConfig):
980
+ super().__init__()
981
+ self.config = config
982
+ self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])
983
+ self.gradient_checkpointing = False
984
+
985
+ def forward(
986
+ self,
987
+ inputs_embeds,
988
+ output_attentions: Optional[bool] = None,
989
+ output_hidden_states: Optional[bool] = None,
990
+ return_dict: Optional[bool] = None,
991
+ ) -> Union[Tuple, BaseModelOutput]:
992
+ r"""
993
+ Args:
994
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
995
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
996
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
997
+ than the model's internal embedding lookup matrix.
998
+ output_attentions (`bool`, *optional*):
999
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1000
+ returned tensors for more detail.
1001
+ output_hidden_states (`bool`, *optional*):
1002
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1003
+ for more detail.
1004
+ return_dict (`bool`, *optional*):
1005
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1006
+ """
1007
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1008
+ output_hidden_states = (
1009
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1010
+ )
1011
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1012
+
1013
+ encoder_states = () if output_hidden_states else None
1014
+ all_attentions = () if output_attentions else None
1015
+
1016
+ hidden_states = inputs_embeds
1017
+ for idx, encoder_layer in enumerate(self.layers):
1018
+ if output_hidden_states:
1019
+ encoder_states = encoder_states + (hidden_states,)
1020
+ if self.gradient_checkpointing and self.training:
1021
+
1022
+ def create_custom_forward(module):
1023
+ def custom_forward(*inputs):
1024
+ return module(*inputs, output_attentions)
1025
+
1026
+ return custom_forward
1027
+
1028
+ layer_outputs = torch.utils.checkpoint.checkpoint(
1029
+ create_custom_forward(encoder_layer),
1030
+ hidden_states,
1031
+ )
1032
+ else:
1033
+ layer_outputs = encoder_layer(
1034
+ hidden_states,
1035
+ output_attentions=output_attentions,
1036
+ )
1037
+
1038
+ hidden_states = layer_outputs[0]
1039
+
1040
+ if output_attentions:
1041
+ all_attentions = all_attentions + (layer_outputs[1],)
1042
+
1043
+ if output_hidden_states:
1044
+ encoder_states = encoder_states + (hidden_states,)
1045
+
1046
+ if not return_dict:
1047
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1048
+ return BaseModelOutput(
1049
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1050
+ )
1051
+
1052
+
1053
+ class ChineseCLIPVisionTransformer(nn.Module):
1054
+ def __init__(self, config: ChineseCLIPVisionConfig):
1055
+ super().__init__()
1056
+ self.config = config
1057
+ embed_dim = config.hidden_size
1058
+
1059
+ self.embeddings = ChineseCLIPVisionEmbeddings(config)
1060
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1061
+ self.encoder = ChineseCLIPVisionEncoder(config)
1062
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1063
+
1064
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1065
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
1066
+ def forward(
1067
+ self,
1068
+ pixel_values: Optional[torch.FloatTensor] = None,
1069
+ output_attentions: Optional[bool] = None,
1070
+ output_hidden_states: Optional[bool] = None,
1071
+ return_dict: Optional[bool] = None,
1072
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1073
+ r"""
1074
+ Returns:
1075
+ """
1076
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1077
+ output_hidden_states = (
1078
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1079
+ )
1080
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1081
+
1082
+ if pixel_values is None:
1083
+ raise ValueError("You have to specify pixel_values")
1084
+
1085
+ hidden_states = self.embeddings(pixel_values)
1086
+ hidden_states = self.pre_layrnorm(hidden_states)
1087
+
1088
+ encoder_outputs = self.encoder(
1089
+ inputs_embeds=hidden_states,
1090
+ output_attentions=output_attentions,
1091
+ output_hidden_states=output_hidden_states,
1092
+ return_dict=return_dict,
1093
+ )
1094
+
1095
+ last_hidden_state = encoder_outputs[0]
1096
+ pooled_output = last_hidden_state[:, 0, :]
1097
+ pooled_output = self.post_layernorm(pooled_output)
1098
+
1099
+ if not return_dict:
1100
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1101
+
1102
+ return BaseModelOutputWithPooling(
1103
+ last_hidden_state=last_hidden_state,
1104
+ pooler_output=pooled_output,
1105
+ hidden_states=encoder_outputs.hidden_states,
1106
+ attentions=encoder_outputs.attentions,
1107
+ )
1108
+
1109
+
1110
+ @add_start_docstrings(
1111
+ "The text model from CHINESE_CLIP without any head or projection on top.",
1112
+ CHINESE_CLIP_START_DOCSTRING,
1113
+ )
1114
+ class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel):
1115
+ """
1116
+
1117
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
1118
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
1119
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
1120
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
1121
+
1122
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
1123
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
1124
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
1125
+ """
1126
+
1127
+ config_class = ChineseCLIPTextConfig
1128
+
1129
+ def __init__(self, config, add_pooling_layer=True):
1130
+ super().__init__(config)
1131
+ self.config = config
1132
+
1133
+ self.embeddings = ChineseCLIPTextEmbeddings(config)
1134
+ self.encoder = ChineseCLIPTextEncoder(config)
1135
+
1136
+ self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
1137
+
1138
+ # Initialize weights and apply final processing
1139
+ self.post_init()
1140
+
1141
+ def get_input_embeddings(self):
1142
+ return self.embeddings.word_embeddings
1143
+
1144
+ def set_input_embeddings(self, value):
1145
+ self.embeddings.word_embeddings = value
1146
+
1147
+ def _prune_heads(self, heads_to_prune):
1148
+ """
1149
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1150
+ class PreTrainedModel
1151
+ """
1152
+ for layer, heads in heads_to_prune.items():
1153
+ self.encoder.layer[layer].attention.prune_heads(heads)
1154
+
1155
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1156
+ @add_code_sample_docstrings(
1157
+ checkpoint=_CHECKPOINT_FOR_DOC,
1158
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
1159
+ config_class=_CONFIG_FOR_DOC,
1160
+ )
1161
+ def forward(
1162
+ self,
1163
+ input_ids: Optional[torch.Tensor] = None,
1164
+ attention_mask: Optional[torch.Tensor] = None,
1165
+ token_type_ids: Optional[torch.Tensor] = None,
1166
+ position_ids: Optional[torch.Tensor] = None,
1167
+ head_mask: Optional[torch.Tensor] = None,
1168
+ inputs_embeds: Optional[torch.Tensor] = None,
1169
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1170
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1171
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1172
+ use_cache: Optional[bool] = None,
1173
+ output_attentions: Optional[bool] = None,
1174
+ output_hidden_states: Optional[bool] = None,
1175
+ return_dict: Optional[bool] = None,
1176
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1177
+ r"""
1178
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1179
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1180
+ the model is configured as a decoder.
1181
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1182
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1183
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1184
+
1185
+ - 1 for tokens that are **not masked**,
1186
+ - 0 for tokens that are **masked**.
1187
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1188
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1189
+
1190
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1191
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1192
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1193
+ use_cache (`bool`, *optional*):
1194
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1195
+ `past_key_values`).
1196
+ """
1197
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1198
+ output_hidden_states = (
1199
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1200
+ )
1201
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1202
+
1203
+ if self.config.is_decoder:
1204
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1205
+ else:
1206
+ use_cache = False
1207
+
1208
+ if input_ids is not None and inputs_embeds is not None:
1209
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1210
+ elif input_ids is not None:
1211
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1212
+ input_shape = input_ids.size()
1213
+ elif inputs_embeds is not None:
1214
+ input_shape = inputs_embeds.size()[:-1]
1215
+ else:
1216
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1217
+
1218
+ batch_size, seq_length = input_shape
1219
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1220
+
1221
+ # past_key_values_length
1222
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1223
+
1224
+ if attention_mask is None:
1225
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1226
+
1227
+ if token_type_ids is None:
1228
+ if hasattr(self.embeddings, "token_type_ids"):
1229
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1230
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1231
+ token_type_ids = buffered_token_type_ids_expanded
1232
+ else:
1233
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1234
+
1235
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1236
+ # ourselves in which case we just need to make it broadcastable to all heads.
1237
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1238
+
1239
+ # If a 2D or 3D attention mask is provided for the cross-attention
1240
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1241
+ if self.config.is_decoder and encoder_hidden_states is not None:
1242
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1243
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1244
+ if encoder_attention_mask is None:
1245
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1246
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1247
+ else:
1248
+ encoder_extended_attention_mask = None
1249
+
1250
+ # Prepare head mask if needed
1251
+ # 1.0 in head_mask indicate we keep the head
1252
+ # attention_probs has shape bsz x n_heads x N x N
1253
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1254
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1255
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1256
+
1257
+ embedding_output = self.embeddings(
1258
+ input_ids=input_ids,
1259
+ position_ids=position_ids,
1260
+ token_type_ids=token_type_ids,
1261
+ inputs_embeds=inputs_embeds,
1262
+ past_key_values_length=past_key_values_length,
1263
+ )
1264
+ encoder_outputs = self.encoder(
1265
+ embedding_output,
1266
+ attention_mask=extended_attention_mask,
1267
+ head_mask=head_mask,
1268
+ encoder_hidden_states=encoder_hidden_states,
1269
+ encoder_attention_mask=encoder_extended_attention_mask,
1270
+ past_key_values=past_key_values,
1271
+ use_cache=use_cache,
1272
+ output_attentions=output_attentions,
1273
+ output_hidden_states=output_hidden_states,
1274
+ return_dict=return_dict,
1275
+ )
1276
+ sequence_output = encoder_outputs[0]
1277
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1278
+
1279
+ if not return_dict:
1280
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1281
+
1282
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1283
+ last_hidden_state=sequence_output,
1284
+ pooler_output=pooled_output,
1285
+ past_key_values=encoder_outputs.past_key_values,
1286
+ hidden_states=encoder_outputs.hidden_states,
1287
+ attentions=encoder_outputs.attentions,
1288
+ cross_attentions=encoder_outputs.cross_attentions,
1289
+ )
1290
+
1291
+
1292
+ @add_start_docstrings(
1293
+ """The vision model from CHINESE_CLIP without any head or projection on top.""",
1294
+ CHINESE_CLIP_START_DOCSTRING,
1295
+ )
1296
+ class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel):
1297
+ config_class = ChineseCLIPVisionConfig
1298
+ main_input_name = "pixel_values"
1299
+
1300
+ def __init__(self, config: ChineseCLIPVisionConfig):
1301
+ super().__init__(config)
1302
+ self.vision_model = ChineseCLIPVisionTransformer(config)
1303
+ # Initialize weights and apply final processing
1304
+ self.post_init()
1305
+
1306
+ def get_input_embeddings(self) -> nn.Module:
1307
+ return self.vision_model.embeddings.patch_embedding
1308
+
1309
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1310
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
1311
+ def forward(
1312
+ self,
1313
+ pixel_values: Optional[torch.FloatTensor] = None,
1314
+ output_attentions: Optional[bool] = None,
1315
+ output_hidden_states: Optional[bool] = None,
1316
+ return_dict: Optional[bool] = None,
1317
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1318
+ r"""
1319
+ Returns:
1320
+
1321
+ Examples:
1322
+
1323
+ ```python
1324
+ >>> from PIL import Image
1325
+ >>> import requests
1326
+ >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
1327
+
1328
+ >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1329
+ >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1330
+
1331
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1332
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1333
+
1334
+ >>> inputs = processor(images=image, return_tensors="pt")
1335
+
1336
+ >>> outputs = model(**inputs)
1337
+ >>> last_hidden_state = outputs.last_hidden_state
1338
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1339
+ ```"""
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+
1342
+ return self.vision_model(
1343
+ pixel_values=pixel_values,
1344
+ output_attentions=output_attentions,
1345
+ output_hidden_states=output_hidden_states,
1346
+ return_dict=return_dict,
1347
+ )
1348
+
1349
+
1350
+ @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING)
1351
+ class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
1352
+ config_class = ChineseCLIPConfig
1353
+
1354
+ def __init__(self, config: ChineseCLIPConfig):
1355
+ super().__init__(config)
1356
+
1357
+ if not isinstance(config.text_config, ChineseCLIPTextConfig):
1358
+ raise ValueError(
1359
+ "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
1360
+ f" {type(config.text_config)}."
1361
+ )
1362
+
1363
+ if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
1364
+ raise ValueError(
1365
+ "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
1366
+ f" {type(config.vision_config)}."
1367
+ )
1368
+
1369
+ text_config = config.text_config
1370
+ vision_config = config.vision_config
1371
+
1372
+ self.projection_dim = config.projection_dim
1373
+ self.text_embed_dim = text_config.hidden_size
1374
+ self.vision_embed_dim = vision_config.hidden_size
1375
+
1376
+ self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False)
1377
+ self.vision_model = ChineseCLIPVisionTransformer(vision_config)
1378
+
1379
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
1380
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
1381
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1382
+
1383
+ # Initialize weights and apply final processing
1384
+ self.post_init()
1385
+
1386
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING)
1387
+ def get_text_features(
1388
+ self,
1389
+ input_ids: Optional[torch.Tensor] = None,
1390
+ attention_mask: Optional[torch.Tensor] = None,
1391
+ token_type_ids: Optional[torch.Tensor] = None,
1392
+ position_ids: Optional[torch.Tensor] = None,
1393
+ output_attentions: Optional[bool] = None,
1394
+ output_hidden_states: Optional[bool] = None,
1395
+ return_dict: Optional[bool] = None,
1396
+ ) -> torch.FloatTensor:
1397
+ r"""
1398
+ Returns:
1399
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1400
+ applying the projection layer to the final [CLS] hidden state of Text-Transformer.
1401
+
1402
+ Examples:
1403
+
1404
+ ```python
1405
+ >>> from transformers import AutoTokenizer, ChineseCLIPModel
1406
+
1407
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1408
+ >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1409
+
1410
+ >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt")
1411
+ >>> text_features = model.get_text_features(**inputs)
1412
+ >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
1413
+ ```"""
1414
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1415
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1416
+ output_hidden_states = (
1417
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1418
+ )
1419
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1420
+
1421
+ text_outputs = self.text_model(
1422
+ input_ids=input_ids,
1423
+ attention_mask=attention_mask,
1424
+ token_type_ids=token_type_ids,
1425
+ position_ids=position_ids,
1426
+ output_attentions=output_attentions,
1427
+ output_hidden_states=output_hidden_states,
1428
+ return_dict=return_dict,
1429
+ )
1430
+
1431
+ pooled_output = text_outputs[0][:, 0, :]
1432
+ text_features = self.text_projection(pooled_output)
1433
+
1434
+ return text_features
1435
+
1436
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1437
+ def get_image_features(
1438
+ self,
1439
+ pixel_values: Optional[torch.FloatTensor] = None,
1440
+ output_attentions: Optional[bool] = None,
1441
+ output_hidden_states: Optional[bool] = None,
1442
+ return_dict: Optional[bool] = None,
1443
+ ) -> torch.FloatTensor:
1444
+ r"""
1445
+ Returns:
1446
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1447
+ applying the projection layer to the final [CLS] hidden state of Vision-Transformer.
1448
+
1449
+ Examples:
1450
+
1451
+ ```python
1452
+ >>> from PIL import Image
1453
+ >>> import requests
1454
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
1455
+
1456
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1457
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1458
+
1459
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1460
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1461
+
1462
+ >>> inputs = processor(images=image, return_tensors="pt")
1463
+
1464
+ >>> image_features = model.get_image_features(**inputs)
1465
+ >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
1466
+ ```"""
1467
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1468
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1469
+ output_hidden_states = (
1470
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1471
+ )
1472
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1473
+
1474
+ vision_outputs = self.vision_model(
1475
+ pixel_values=pixel_values,
1476
+ output_attentions=output_attentions,
1477
+ output_hidden_states=output_hidden_states,
1478
+ return_dict=return_dict,
1479
+ )
1480
+
1481
+ pooled_output = vision_outputs[1] # pooled_output
1482
+ image_features = self.visual_projection(pooled_output)
1483
+
1484
+ return image_features
1485
+
1486
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING)
1487
+ @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig)
1488
+ def forward(
1489
+ self,
1490
+ input_ids: Optional[torch.LongTensor] = None,
1491
+ pixel_values: Optional[torch.FloatTensor] = None,
1492
+ attention_mask: Optional[torch.Tensor] = None,
1493
+ token_type_ids: Optional[torch.Tensor] = None,
1494
+ position_ids: Optional[torch.LongTensor] = None,
1495
+ return_loss: Optional[bool] = None,
1496
+ output_attentions: Optional[bool] = None,
1497
+ output_hidden_states: Optional[bool] = None,
1498
+ return_dict: Optional[bool] = None,
1499
+ ) -> Union[Tuple, ChineseCLIPOutput]:
1500
+ r"""
1501
+ Returns:
1502
+
1503
+ Examples:
1504
+
1505
+ ```python
1506
+ >>> from PIL import Image
1507
+ >>> import requests
1508
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
1509
+
1510
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1511
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1512
+
1513
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1514
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1515
+
1516
+ >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True)
1517
+
1518
+ >>> outputs = model(**inputs)
1519
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1520
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1521
+ ```"""
1522
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1523
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1524
+ output_hidden_states = (
1525
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1526
+ )
1527
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1528
+
1529
+ vision_outputs = self.vision_model(
1530
+ pixel_values=pixel_values,
1531
+ output_attentions=output_attentions,
1532
+ output_hidden_states=output_hidden_states,
1533
+ return_dict=return_dict,
1534
+ )
1535
+
1536
+ text_outputs = self.text_model(
1537
+ input_ids=input_ids,
1538
+ attention_mask=attention_mask,
1539
+ token_type_ids=token_type_ids,
1540
+ position_ids=position_ids,
1541
+ output_attentions=output_attentions,
1542
+ output_hidden_states=output_hidden_states,
1543
+ return_dict=return_dict,
1544
+ )
1545
+
1546
+ image_embeds = vision_outputs[1]
1547
+ image_embeds = self.visual_projection(image_embeds)
1548
+
1549
+ text_embeds = text_outputs[0][:, 0, :]
1550
+ text_embeds = self.text_projection(text_embeds)
1551
+
1552
+ # normalized features
1553
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1554
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1555
+
1556
+ # cosine similarity as logits
1557
+ logit_scale = self.logit_scale.exp()
1558
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1559
+ logits_per_image = logits_per_text.t()
1560
+
1561
+ loss = None
1562
+ if return_loss:
1563
+ loss = chinese_clip_loss(logits_per_text)
1564
+
1565
+ if not return_dict:
1566
+ # fix the None pooled_output of text_outputs to conform with dict_output
1567
+ pooled_output = text_outputs[1]
1568
+ if pooled_output is None:
1569
+ text_outputs = (text_outputs[0],) + text_outputs[2:]
1570
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1571
+ return ((loss,) + output) if loss is not None else output
1572
+
1573
+ return ChineseCLIPOutput(
1574
+ loss=loss,
1575
+ logits_per_image=logits_per_image,
1576
+ logits_per_text=logits_per_text,
1577
+ text_embeds=text_embeds,
1578
+ image_embeds=image_embeds,
1579
+ text_model_output=text_outputs,
1580
+ vision_model_output=vision_outputs,
1581
+ )
evalkit_tf433/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for Chinese-CLIP
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding
23
+
24
+
25
+ class ChineseCLIPProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
28
+ single processor.
29
+
30
+ [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
31
+ See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
32
+
33
+ Args:
34
+ image_processor ([`ChineseCLIPImageProcessor`]):
35
+ The image processor is a required input.
36
+ tokenizer ([`BertTokenizerFast`]):
37
+ The tokenizer is a required input.
38
+ """
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "ChineseCLIPImageProcessor"
41
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+ self.current_processor = self.image_processor
61
+
62
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
63
+ """
64
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
65
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
66
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
67
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
68
+ of the above two methods for more information.
69
+
70
+ Args:
71
+ text (`str`, `List[str]`, `List[List[str]]`):
72
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
73
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
74
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
75
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
76
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
77
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
78
+ number of channels, H and W are image height and width.
79
+
80
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
81
+ If set, will return tensors of a particular framework. Acceptable values are:
82
+
83
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
84
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
85
+ - `'np'`: Return NumPy `np.ndarray` objects.
86
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
87
+
88
+ Returns:
89
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
90
+
91
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
92
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
93
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
94
+ `None`).
95
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
96
+ """
97
+
98
+ if text is None and images is None:
99
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
100
+
101
+ if text is not None:
102
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
103
+
104
+ if images is not None:
105
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
106
+
107
+ if text is not None and images is not None:
108
+ encoding["pixel_values"] = image_features.pixel_values
109
+ return encoding
110
+ elif text is not None:
111
+ return encoding
112
+ else:
113
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
114
+
115
+ def batch_decode(self, *args, **kwargs):
116
+ """
117
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
118
+ refer to the docstring of this method for more information.
119
+ """
120
+ return self.tokenizer.batch_decode(*args, **kwargs)
121
+
122
+ def decode(self, *args, **kwargs):
123
+ """
124
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
125
+ the docstring of this method for more information.
126
+ """
127
+ return self.tokenizer.decode(*args, **kwargs)
128
+
129
+ @property
130
+ def model_input_names(self):
131
+ tokenizer_input_names = self.tokenizer.model_input_names
132
+ image_processor_input_names = self.image_processor.model_input_names
133
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
134
+
135
+ @property
136
+ def feature_extractor_class(self):
137
+ warnings.warn(
138
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
139
+ FutureWarning,
140
+ )
141
+ return self.image_processor_class
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_cvt"] = [
29
+ "CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "CvtForImageClassification",
31
+ "CvtModel",
32
+ "CvtPreTrainedModel",
33
+ ]
34
+
35
+ try:
36
+ if not is_tf_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_tf_cvt"] = [
42
+ "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "TFCvtForImageClassification",
44
+ "TFCvtModel",
45
+ "TFCvtPreTrainedModel",
46
+ ]
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_cvt import (
58
+ CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ CvtForImageClassification,
60
+ CvtModel,
61
+ CvtPreTrainedModel,
62
+ )
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_tf_cvt import (
71
+ TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ TFCvtForImageClassification,
73
+ TFCvtModel,
74
+ TFCvtPreTrainedModel,
75
+ )
76
+
77
+
78
+ else:
79
+ import sys
80
+
81
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc ADDED
Binary file (28.7 kB). View file