ZTWHHH commited on
Commit
127836c
·
verified ·
1 Parent(s): 6f19e01

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. llava_next/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117.so +3 -0
  3. videollama2/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py +0 -0
  4. videollama2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py +0 -0
  5. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc +0 -0
  6. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h +658 -0
  7. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h +658 -0
  8. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h +571 -0
  9. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h +219 -0
  10. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h +501 -0
  11. videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h +109 -0
  12. videollama2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 +3 -0
  13. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_all_methods.cpython-310.pyc +0 -0
  14. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply.cpython-310.pyc +0 -0
  15. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply_mutate.cpython-310.pyc +0 -0
  16. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_bin_groupby.cpython-310.pyc +0 -0
  17. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_categorical.cpython-310.pyc +0 -0
  18. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_counting.cpython-310.pyc +0 -0
  19. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_cumulative.cpython-310.pyc +0 -0
  20. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_filters.cpython-310.pyc +0 -0
  21. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby.cpython-310.pyc +0 -0
  22. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby_dropna.cpython-310.pyc +0 -0
  23. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby_subclass.cpython-310.pyc +0 -0
  24. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_grouping.cpython-310.pyc +0 -0
  25. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_indexing.cpython-310.pyc +0 -0
  26. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_libgroupby.cpython-310.pyc +0 -0
  27. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_missing.cpython-310.pyc +0 -0
  28. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_numba.cpython-310.pyc +0 -0
  29. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_raises.cpython-310.pyc +0 -0
  30. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_reductions.cpython-310.pyc +0 -0
  31. vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_timegrouper.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py +0 -0
  33. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc +0 -0
  35. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc +0 -0
  36. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py +1672 -0
  39. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py +435 -0
  40. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py +392 -0
  41. vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py +675 -0
  42. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__init__.py +0 -0
  43. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/__init__.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_corrwith.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_describe.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_groupby_shift_diff.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_is_monotonic.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nlargest_nsmallest.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nth.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_quantile.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -938,3 +938,5 @@ vllm/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-31
938
  parrot/lib/python3.10/lib-dynload/zlib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
939
  parrot/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
940
  parrot/lib/python3.10/lib-dynload/math.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
938
  parrot/lib/python3.10/lib-dynload/zlib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
939
  parrot/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
940
  parrot/lib/python3.10/lib-dynload/math.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
941
+ llava_next/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117.so filter=lfs diff=lfs merge=lfs -text
942
+ videollama2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e594e06f801e1988357e026761e27d3d6938a0838cf9fcf524b1a6121955295b
3
+ size 20814400
videollama2/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py ADDED
File without changes
videollama2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py ADDED
File without changes
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #if !defined(CUDNN_CNN_INFER_H_)
55
+ #define CUDNN_CNN_INFER_H_
56
+
57
+ #pragma once
58
+ #include <cuda_runtime.h>
59
+ #include <stdint.h>
60
+
61
+ #include "cudnn_version.h"
62
+ #include "cudnn_ops_infer.h"
63
+
64
+ /* These version numbers are autogenerated, do not edit manually. */
65
+ #define CUDNN_CNN_INFER_MAJOR 8
66
+ #define CUDNN_CNN_INFER_MINOR 9
67
+ #define CUDNN_CNN_INFER_PATCH 2
68
+
69
+ #if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \
70
+ (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL)
71
+ #error Version mismatch in cuDNN CNN INFER!!!
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ extern "C" {
76
+ #endif
77
+
78
+ typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t;
79
+
80
+ /*
81
+ * convolution mode
82
+ */
83
+ typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t;
84
+
85
+ /*
86
+ * CUDNN Reorder
87
+ */
88
+ typedef enum {
89
+ CUDNN_DEFAULT_REORDER = 0,
90
+ CUDNN_NO_REORDER = 1,
91
+ } cudnnReorderType_t;
92
+
93
+ typedef struct cudnnConvolutionFwdAlgoPerfStruct {
94
+ cudnnConvolutionFwdAlgo_t algo;
95
+ cudnnStatus_t status;
96
+ float time;
97
+ size_t memory;
98
+ cudnnDeterminism_t determinism;
99
+ cudnnMathType_t mathType;
100
+ int reserved[3];
101
+ } cudnnConvolutionFwdAlgoPerf_t;
102
+
103
+ /* Create an instance of convolution descriptor */
104
+ cudnnStatus_t CUDNNWINAPI
105
+ cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc);
106
+
107
+ /* Destroy an instance of convolution descriptor */
108
+ cudnnStatus_t CUDNNWINAPI
109
+ cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc);
110
+
111
+ cudnnStatus_t CUDNNWINAPI
112
+ cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType);
113
+
114
+ cudnnStatus_t CUDNNWINAPI
115
+ cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount);
119
+
120
+ cudnnStatus_t CUDNNWINAPI
121
+ cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount);
122
+
123
+ cudnnStatus_t CUDNNWINAPI
124
+ cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType);
125
+
126
+ cudnnStatus_t CUDNNWINAPI
127
+ cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType);
128
+
129
+ cudnnStatus_t CUDNNWINAPI
130
+ cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc,
131
+ int pad_h, /* zero-padding height */
132
+ int pad_w, /* zero-padding width */
133
+ int u, /* vertical filter stride */
134
+ int v, /* horizontal filter stride */
135
+ int dilation_h, /* filter dilation in the vertical dimension */
136
+ int dilation_w, /* filter dilation in the horizontal dimension */
137
+ cudnnConvolutionMode_t mode,
138
+ cudnnDataType_t computeType);
139
+
140
+ cudnnStatus_t CUDNNWINAPI
141
+ cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc,
142
+ int *pad_h, /* zero-padding height */
143
+ int *pad_w, /* zero-padding width */
144
+ int *u, /* vertical filter stride */
145
+ int *v, /* horizontal filter stride */
146
+ int *dilation_h, /* filter dilation in the vertical dimension */
147
+ int *dilation_w, /* filter dilation in the horizontal dimension */
148
+ cudnnConvolutionMode_t *mode,
149
+ cudnnDataType_t *computeType);
150
+
151
+ cudnnStatus_t CUDNNWINAPI
152
+ cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc,
153
+ int arrayLength, /* nbDims-2 size */
154
+ const int padA[],
155
+ const int filterStrideA[],
156
+ const int dilationA[],
157
+ cudnnConvolutionMode_t mode,
158
+ cudnnDataType_t computeType); /* convolution data type */
159
+
160
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
161
+ cudnnStatus_t CUDNNWINAPI
162
+ cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc,
163
+ int arrayLengthRequested,
164
+ int *arrayLength,
165
+ int padA[],
166
+ int strideA[],
167
+ int dilationA[],
168
+ cudnnConvolutionMode_t *mode,
169
+ cudnnDataType_t *computeType); /* convolution data type */
170
+
171
+ cudnnStatus_t CUDNNWINAPI
172
+ cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
173
+ const cudnnTensorDescriptor_t inputTensorDesc,
174
+ const cudnnFilterDescriptor_t filterDesc,
175
+ int *n,
176
+ int *c,
177
+ int *h,
178
+ int *w);
179
+
180
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
181
+ cudnnStatus_t CUDNNWINAPI
182
+ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
183
+ const cudnnTensorDescriptor_t inputTensorDesc,
184
+ const cudnnFilterDescriptor_t filterDesc,
185
+ int nbDims,
186
+ int tensorOuputDimA[]);
187
+
188
+ /* helper function to provide the convolution forward algo that fit best the requirement */
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count);
191
+
192
+ cudnnStatus_t CUDNNWINAPI
193
+ cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle,
194
+ const cudnnTensorDescriptor_t srcDesc,
195
+ const cudnnFilterDescriptor_t filterDesc,
196
+ const cudnnConvolutionDescriptor_t convDesc,
197
+ const cudnnTensorDescriptor_t destDesc,
198
+ const int requestedAlgoCount,
199
+ int *returnedAlgoCount,
200
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle,
204
+ const cudnnTensorDescriptor_t xDesc,
205
+ const cudnnFilterDescriptor_t wDesc,
206
+ const cudnnConvolutionDescriptor_t convDesc,
207
+ const cudnnTensorDescriptor_t yDesc,
208
+ const int requestedAlgoCount,
209
+ int *returnedAlgoCount,
210
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
211
+
212
+ cudnnStatus_t CUDNNWINAPI
213
+ cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle,
214
+ const cudnnTensorDescriptor_t xDesc,
215
+ const void *x,
216
+ const cudnnFilterDescriptor_t wDesc,
217
+ const void *w,
218
+ const cudnnConvolutionDescriptor_t convDesc,
219
+ const cudnnTensorDescriptor_t yDesc,
220
+ void *y,
221
+ const int requestedAlgoCount,
222
+ int *returnedAlgoCount,
223
+ cudnnConvolutionFwdAlgoPerf_t *perfResults,
224
+ void *workSpace,
225
+ size_t workSpaceSizeInBytes);
226
+
227
+ cudnnStatus_t CUDNNWINAPI
228
+ cudnnIm2Col(cudnnHandle_t handle,
229
+ const cudnnTensorDescriptor_t xDesc,
230
+ const void *x,
231
+ const cudnnFilterDescriptor_t wDesc,
232
+ const cudnnConvolutionDescriptor_t convDesc,
233
+ void *colBuffer);
234
+
235
+ cudnnStatus_t CUDNNWINAPI
236
+ cudnnReorderFilterAndBias(cudnnHandle_t handle,
237
+ const cudnnFilterDescriptor_t filterDesc,
238
+ cudnnReorderType_t reorderType,
239
+ const void *filterData,
240
+ void *reorderedFilterData,
241
+ int reorderBias,
242
+ const void *biasData,
243
+ void *reorderedBiasData);
244
+
245
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
246
+ cudnnStatus_t CUDNNWINAPI
247
+ cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle,
248
+ const cudnnTensorDescriptor_t xDesc,
249
+ const cudnnFilterDescriptor_t wDesc,
250
+ const cudnnConvolutionDescriptor_t convDesc,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ cudnnConvolutionFwdAlgo_t algo,
253
+ size_t *sizeInBytes);
254
+
255
+ /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
256
+
257
+ /* Function to perform the forward pass for batch convolution */
258
+ cudnnStatus_t CUDNNWINAPI
259
+ cudnnConvolutionForward(cudnnHandle_t handle,
260
+ const void *alpha,
261
+ const cudnnTensorDescriptor_t xDesc,
262
+ const void *x,
263
+ const cudnnFilterDescriptor_t wDesc,
264
+ const void *w,
265
+ const cudnnConvolutionDescriptor_t convDesc,
266
+ cudnnConvolutionFwdAlgo_t algo,
267
+ void *workSpace,
268
+ size_t workSpaceSizeInBytes,
269
+ const void *beta,
270
+ const cudnnTensorDescriptor_t yDesc,
271
+ void *y);
272
+
273
+ /* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */
274
+ cudnnStatus_t CUDNNWINAPI
275
+ cudnnConvolutionBiasActivationForward(cudnnHandle_t handle,
276
+ const void *alpha1,
277
+ const cudnnTensorDescriptor_t xDesc,
278
+ const void *x,
279
+ const cudnnFilterDescriptor_t wDesc,
280
+ const void *w,
281
+ const cudnnConvolutionDescriptor_t convDesc,
282
+ cudnnConvolutionFwdAlgo_t algo,
283
+ void *workSpace,
284
+ size_t workSpaceSizeInBytes,
285
+ const void *alpha2,
286
+ const cudnnTensorDescriptor_t zDesc,
287
+ const void *z,
288
+ const cudnnTensorDescriptor_t biasDesc,
289
+ const void *bias,
290
+ const cudnnActivationDescriptor_t activationDesc,
291
+ const cudnnTensorDescriptor_t yDesc,
292
+ void *y);
293
+
294
+ /* helper function to provide the convolution backward data algo that fit best the requirement */
295
+
296
+ typedef struct cudnnConvolutionBwdDataAlgoPerfStruct {
297
+ cudnnConvolutionBwdDataAlgo_t algo;
298
+ cudnnStatus_t status;
299
+ float time;
300
+ size_t memory;
301
+ cudnnDeterminism_t determinism;
302
+ cudnnMathType_t mathType;
303
+ int reserved[3];
304
+ } cudnnConvolutionBwdDataAlgoPerf_t;
305
+
306
+ cudnnStatus_t CUDNNWINAPI
307
+ cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count);
308
+
309
+ cudnnStatus_t CUDNNWINAPI
310
+ cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
311
+ const cudnnFilterDescriptor_t wDesc,
312
+ const cudnnTensorDescriptor_t dyDesc,
313
+ const cudnnConvolutionDescriptor_t convDesc,
314
+ const cudnnTensorDescriptor_t dxDesc,
315
+ const int requestedAlgoCount,
316
+ int *returnedAlgoCount,
317
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
318
+
319
+ cudnnStatus_t CUDNNWINAPI
320
+ cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle,
321
+ const cudnnFilterDescriptor_t wDesc,
322
+ const void *w,
323
+ const cudnnTensorDescriptor_t dyDesc,
324
+ const void *dy,
325
+ const cudnnConvolutionDescriptor_t convDesc,
326
+ const cudnnTensorDescriptor_t dxDesc,
327
+ void *dx,
328
+ const int requestedAlgoCount,
329
+ int *returnedAlgoCount,
330
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults,
331
+ void *workSpace,
332
+ size_t workSpaceSizeInBytes);
333
+
334
+ cudnnStatus_t CUDNNWINAPI
335
+ cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle,
336
+ const cudnnFilterDescriptor_t filterDesc,
337
+ const cudnnTensorDescriptor_t diffDesc,
338
+ const cudnnConvolutionDescriptor_t convDesc,
339
+ const cudnnTensorDescriptor_t gradDesc,
340
+ const int requestedAlgoCount,
341
+ int *returnedAlgoCount,
342
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
343
+
344
+ /*
345
+ * convolution algorithm (which requires potentially some workspace)
346
+ */
347
+
348
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
349
+ cudnnStatus_t CUDNNWINAPI
350
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle,
351
+ const cudnnFilterDescriptor_t wDesc,
352
+ const cudnnTensorDescriptor_t dyDesc,
353
+ const cudnnConvolutionDescriptor_t convDesc,
354
+ const cudnnTensorDescriptor_t dxDesc,
355
+ cudnnConvolutionBwdDataAlgo_t algo,
356
+ size_t *sizeInBytes);
357
+
358
+ cudnnStatus_t CUDNNWINAPI
359
+ cudnnConvolutionBackwardData(cudnnHandle_t handle,
360
+ const void *alpha,
361
+ const cudnnFilterDescriptor_t wDesc,
362
+ const void *w,
363
+ const cudnnTensorDescriptor_t dyDesc,
364
+ const void *dy,
365
+ const cudnnConvolutionDescriptor_t convDesc,
366
+ cudnnConvolutionBwdDataAlgo_t algo,
367
+ void *workSpace,
368
+ size_t workSpaceSizeInBytes,
369
+ const void *beta,
370
+ const cudnnTensorDescriptor_t dxDesc,
371
+ void *dx);
372
+
373
+ /* Helper function to calculate folding descriptors for dgrad */
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle,
376
+ const cudnnFilterDescriptor_t filterDesc,
377
+ const cudnnTensorDescriptor_t diffDesc,
378
+ const cudnnConvolutionDescriptor_t convDesc,
379
+ const cudnnTensorDescriptor_t gradDesc,
380
+ const cudnnTensorFormat_t transformFormat,
381
+ cudnnFilterDescriptor_t foldedFilterDesc,
382
+ cudnnTensorDescriptor_t paddedDiffDesc,
383
+ cudnnConvolutionDescriptor_t foldedConvDesc,
384
+ cudnnTensorDescriptor_t foldedGradDesc,
385
+ cudnnTensorTransformDescriptor_t filterFoldTransDesc,
386
+ cudnnTensorTransformDescriptor_t diffPadTransDesc,
387
+ cudnnTensorTransformDescriptor_t gradFoldTransDesc,
388
+ cudnnTensorTransformDescriptor_t gradUnfoldTransDesc);
389
+
390
+ /* cudnnFusedOps... */
391
+ struct cudnnFusedOpsConstParamStruct;
392
+ typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t;
393
+
394
+ struct cudnnFusedOpsVariantParamStruct;
395
+ typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t;
396
+
397
+ struct cudnnFusedOpsPlanStruct;
398
+ typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t;
399
+
400
+ typedef enum {
401
+ /* each op in [ ] can be disabled by passing NULL ptr */
402
+ /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */
403
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0,
404
+ /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */
405
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1,
406
+ /* utility for BN training in BN-conv fusion */
407
+ /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */
408
+ /* optionally update running stats and generate saved stats */
409
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2,
410
+ /* utility for BN inference in BN-conv fusion */
411
+ /* computes the equivalent scale and bias from learned running stats and learned scale, bias */
412
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3,
413
+ /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */
414
+ CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4,
415
+ /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */
416
+ CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5,
417
+ /* reserved for future use */
418
+ CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6,
419
+ } cudnnFusedOps_t;
420
+
421
+ typedef enum {
422
+ /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */
423
+ /* get XDESC: pass previously created cudnnTensorDescriptor_t */
424
+ CUDNN_PARAM_XDESC = 0,
425
+ /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
426
+ CUDNN_PARAM_XDATA_PLACEHOLDER = 1,
427
+ /* set/get BN_MODE: pass cudnnBatchNormMode_t* */
428
+ CUDNN_PARAM_BN_MODE = 2,
429
+ /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
430
+ /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
431
+ CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3,
432
+ /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
433
+ CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4,
434
+ /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
435
+ CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5,
436
+ /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */
437
+ /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */
438
+ CUDNN_PARAM_ACTIVATION_DESC = 6,
439
+ /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */
440
+ /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */
441
+ CUDNN_PARAM_CONV_DESC = 7,
442
+ /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */
443
+ /* get WDESC: pass previously created cudnnFilterDescriptor_t */
444
+ CUDNN_PARAM_WDESC = 8,
445
+ /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
446
+ CUDNN_PARAM_WDATA_PLACEHOLDER = 9,
447
+ /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */
448
+ /* get DWDESC: pass previously created cudnnFilterDescriptor_t */
449
+ CUDNN_PARAM_DWDESC = 10,
450
+ /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
451
+ CUDNN_PARAM_DWDATA_PLACEHOLDER = 11,
452
+ /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */
453
+ /* get YDESC: pass previously created cudnnTensorDescriptor_t */
454
+ CUDNN_PARAM_YDESC = 12,
455
+ /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
456
+ CUDNN_PARAM_YDATA_PLACEHOLDER = 13,
457
+ /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */
458
+ /* get DYDESC: pass previously created cudnnTensorDescriptor_t */
459
+ CUDNN_PARAM_DYDESC = 14,
460
+ /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
461
+ CUDNN_PARAM_DYDATA_PLACEHOLDER = 15,
462
+ /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */
463
+ /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */
464
+ CUDNN_PARAM_YSTATS_DESC = 16,
465
+ /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
466
+ CUDNN_PARAM_YSUM_PLACEHOLDER = 17,
467
+ /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
468
+ CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18,
469
+ /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */
470
+ /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */
471
+ CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19,
472
+ /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
473
+ CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20,
474
+ /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
475
+ CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21,
476
+ /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
477
+ CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22,
478
+ /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
479
+ CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23,
480
+ /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
481
+ CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24,
482
+ /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
483
+ CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25,
484
+
485
+ /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */
486
+ /* get ZDESC: pass previously created cudnnTensorDescriptor_t */
487
+ CUDNN_PARAM_ZDESC = 26,
488
+ /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
489
+ CUDNN_PARAM_ZDATA_PLACEHOLDER = 27,
490
+ /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
491
+ /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
492
+ CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28,
493
+ /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
494
+ CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29,
495
+ /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
496
+ CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30,
497
+
498
+ /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */
499
+ /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */
500
+ CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31,
501
+ /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
502
+ CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32,
503
+
504
+ /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */
505
+ /* get DXDESC: pass previously created cudnnTensorDescriptor_t */
506
+ CUDNN_PARAM_DXDESC = 33,
507
+ /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
508
+ CUDNN_PARAM_DXDATA_PLACEHOLDER = 34,
509
+ /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */
510
+ /* get DZDESC: pass previously created cudnnTensorDescriptor_t */
511
+ CUDNN_PARAM_DZDESC = 35,
512
+ /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
513
+ CUDNN_PARAM_DZDATA_PLACEHOLDER = 36,
514
+ /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
515
+ CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37,
516
+ /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
517
+ CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38,
518
+ } cudnnFusedOpsConstParamLabel_t;
519
+
520
+ typedef enum {
521
+ CUDNN_PTR_NULL = 0,
522
+ CUDNN_PTR_ELEM_ALIGNED = 1,
523
+ CUDNN_PTR_16B_ALIGNED = 2,
524
+ } cudnnFusedOpsPointerPlaceHolder_t;
525
+
526
+ typedef enum {
527
+ /* set: pass void* pointing to dev memory */
528
+ /* get: pass void** pointing to host memory */
529
+ CUDNN_PTR_XDATA = 0,
530
+ CUDNN_PTR_BN_EQSCALE = 1,
531
+ CUDNN_PTR_BN_EQBIAS = 2,
532
+ CUDNN_PTR_WDATA = 3,
533
+ CUDNN_PTR_DWDATA = 4,
534
+ CUDNN_PTR_YDATA = 5,
535
+ CUDNN_PTR_DYDATA = 6,
536
+ CUDNN_PTR_YSUM = 7,
537
+ CUDNN_PTR_YSQSUM = 8,
538
+ CUDNN_PTR_WORKSPACE = 9,
539
+ CUDNN_PTR_BN_SCALE = 10,
540
+ CUDNN_PTR_BN_BIAS = 11,
541
+ CUDNN_PTR_BN_SAVED_MEAN = 12,
542
+ CUDNN_PTR_BN_SAVED_INVSTD = 13,
543
+ CUDNN_PTR_BN_RUNNING_MEAN = 14,
544
+ CUDNN_PTR_BN_RUNNING_VAR = 15,
545
+ CUDNN_PTR_ZDATA = 16,
546
+ CUDNN_PTR_BN_Z_EQSCALE = 17,
547
+ CUDNN_PTR_BN_Z_EQBIAS = 18,
548
+ CUDNN_PTR_ACTIVATION_BITMASK = 19,
549
+ CUDNN_PTR_DXDATA = 20,
550
+ CUDNN_PTR_DZDATA = 21,
551
+ CUDNN_PTR_BN_DSCALE = 22,
552
+ CUDNN_PTR_BN_DBIAS = 23,
553
+
554
+ /* set/get: pass size_t* pointing to host memory */
555
+ CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100,
556
+ /* set/get: pass int64_t* pointing to host memory */
557
+ CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101,
558
+ /* set/get: pass double* pointing to host memory */
559
+ CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102,
560
+ /* set/get: pass double* pointing to host memory */
561
+ CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103,
562
+ } cudnnFusedOpsVariantParamLabel_t;
563
+
564
+ cudnnStatus_t CUDNNWINAPI
565
+ cudnnCnnInferVersionCheck(void);
566
+
567
+ #if defined(__cplusplus)
568
+ }
569
+ #endif
570
+
571
+ #endif /* CUDNN_CNN_INFER_H_ */
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #pragma once
55
+ #include <cuda_runtime.h>
56
+ #include <stdint.h>
57
+
58
+ #include "cudnn_version.h"
59
+ #include "cudnn_ops_infer.h"
60
+ #include "cudnn_ops_train.h"
61
+ #include "cudnn_cnn_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_CNN_TRAIN_MAJOR 8
65
+ #define CUDNN_CNN_TRAIN_MINOR 9
66
+ #define CUDNN_CNN_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN CNN INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* helper function to provide the convolution backward filter algo that fit best the requirement */
78
+
79
+ typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct {
80
+ cudnnConvolutionBwdFilterAlgo_t algo;
81
+ cudnnStatus_t status;
82
+ float time;
83
+ size_t memory;
84
+ cudnnDeterminism_t determinism;
85
+ cudnnMathType_t mathType;
86
+ int reserved[3];
87
+ } cudnnConvolutionBwdFilterAlgoPerf_t;
88
+
89
+ cudnnStatus_t CUDNNWINAPI
90
+ cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count);
91
+
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
94
+ const cudnnTensorDescriptor_t xDesc,
95
+ const cudnnTensorDescriptor_t dyDesc,
96
+ const cudnnConvolutionDescriptor_t convDesc,
97
+ const cudnnFilterDescriptor_t dwDesc,
98
+ const int requestedAlgoCount,
99
+ int *returnedAlgoCount,
100
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
101
+
102
+ cudnnStatus_t CUDNNWINAPI
103
+ cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle,
104
+ const cudnnTensorDescriptor_t xDesc,
105
+ const void *x,
106
+ const cudnnTensorDescriptor_t dyDesc,
107
+ const void *y,
108
+ const cudnnConvolutionDescriptor_t convDesc,
109
+ const cudnnFilterDescriptor_t dwDesc,
110
+ void *dw,
111
+ const int requestedAlgoCount,
112
+ int *returnedAlgoCount,
113
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults,
114
+ void *workSpace,
115
+ size_t workSpaceSizeInBytes);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle,
119
+ const cudnnTensorDescriptor_t srcDesc,
120
+ const cudnnTensorDescriptor_t diffDesc,
121
+ const cudnnConvolutionDescriptor_t convDesc,
122
+ const cudnnFilterDescriptor_t gradDesc,
123
+ const int requestedAlgoCount,
124
+ int *returnedAlgoCount,
125
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
126
+
127
+ /*
128
+ * convolution algorithm (which requires potentially some workspace)
129
+ */
130
+
131
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
132
+ cudnnStatus_t CUDNNWINAPI
133
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle,
134
+ const cudnnTensorDescriptor_t xDesc,
135
+ const cudnnTensorDescriptor_t dyDesc,
136
+ const cudnnConvolutionDescriptor_t convDesc,
137
+ const cudnnFilterDescriptor_t gradDesc,
138
+ cudnnConvolutionBwdFilterAlgo_t algo,
139
+ size_t *sizeInBytes);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnConvolutionBackwardFilter(cudnnHandle_t handle,
143
+ const void *alpha,
144
+ const cudnnTensorDescriptor_t xDesc,
145
+ const void *x,
146
+ const cudnnTensorDescriptor_t dyDesc,
147
+ const void *dy,
148
+ const cudnnConvolutionDescriptor_t convDesc,
149
+ cudnnConvolutionBwdFilterAlgo_t algo,
150
+ void *workSpace,
151
+ size_t workSpaceSizeInBytes,
152
+ const void *beta,
153
+ const cudnnFilterDescriptor_t dwDesc,
154
+ void *dw);
155
+
156
+ /* Function to compute the bias gradient for batch convolution */
157
+ cudnnStatus_t CUDNNWINAPI
158
+ cudnnConvolutionBackwardBias(cudnnHandle_t handle,
159
+ const void *alpha,
160
+ const cudnnTensorDescriptor_t dyDesc,
161
+ const void *dy,
162
+ const void *beta,
163
+ const cudnnTensorDescriptor_t dbDesc,
164
+ void *db);
165
+
166
+ cudnnStatus_t CUDNNWINAPI
167
+ cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops);
168
+
169
+ cudnnStatus_t CUDNNWINAPI
170
+ cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack);
171
+
172
+ cudnnStatus_t CUDNNWINAPI
173
+ cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack,
174
+ cudnnFusedOpsConstParamLabel_t paramLabel,
175
+ const void *param);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack,
179
+ cudnnFusedOpsConstParamLabel_t paramLabel,
180
+ void *param,
181
+ int *isNULL);
182
+
183
+ cudnnStatus_t CUDNNWINAPI
184
+ cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops);
185
+
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack);
188
+
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack,
191
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
192
+ void *ptr);
193
+
194
+ cudnnStatus_t CUDNNWINAPI
195
+ cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack,
196
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
197
+ void *ptr);
198
+
199
+ cudnnStatus_t CUDNNWINAPI
200
+ cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan);
204
+
205
+ cudnnStatus_t CUDNNWINAPI
206
+ cudnnMakeFusedOpsPlan(cudnnHandle_t handle,
207
+ cudnnFusedOpsPlan_t plan,
208
+ const cudnnFusedOpsConstParamPack_t constPack,
209
+ size_t *workspaceSizeInBytes);
210
+
211
+ cudnnStatus_t CUDNNWINAPI
212
+ cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack);
213
+
214
+ cudnnStatus_t CUDNNWINAPI
215
+ cudnnCnnTrainVersionCheck(void);
216
+
217
+ #if defined(__cplusplus)
218
+ }
219
+ #endif
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_train : cuDNN's basic training operations and algorithms.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_TRAIN_H_)
55
+ #define CUDNN_OPS_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_OPS_TRAIN_MAJOR 8
65
+ #define CUDNN_OPS_TRAIN_MINOR 9
66
+ #define CUDNN_OPS_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN OPS TRAIN!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* Function to perform backward softmax */
78
+ cudnnStatus_t CUDNNWINAPI
79
+ cudnnSoftmaxBackward(cudnnHandle_t handle,
80
+ cudnnSoftmaxAlgorithm_t algo,
81
+ cudnnSoftmaxMode_t mode,
82
+ const void *alpha,
83
+ const cudnnTensorDescriptor_t yDesc,
84
+ const void *y,
85
+ const cudnnTensorDescriptor_t dyDesc,
86
+ const void *dy,
87
+ const void *beta,
88
+ const cudnnTensorDescriptor_t dxDesc,
89
+ void *dx);
90
+
91
+ /* Function to perform backward pooling */
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnPoolingBackward(cudnnHandle_t handle,
94
+ const cudnnPoolingDescriptor_t poolingDesc,
95
+ const void *alpha,
96
+ const cudnnTensorDescriptor_t yDesc,
97
+ const void *y,
98
+ const cudnnTensorDescriptor_t dyDesc,
99
+ const void *dy,
100
+ const cudnnTensorDescriptor_t xDesc,
101
+ const void *x,
102
+ const void *beta,
103
+ const cudnnTensorDescriptor_t dxDesc,
104
+ void *dx);
105
+
106
+ /* Function to perform backward activation */
107
+ cudnnStatus_t CUDNNWINAPI
108
+ cudnnActivationBackward(cudnnHandle_t handle,
109
+ cudnnActivationDescriptor_t activationDesc,
110
+ const void *alpha,
111
+ const cudnnTensorDescriptor_t yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t xDesc,
116
+ const void *x,
117
+ const void *beta,
118
+ const cudnnTensorDescriptor_t dxDesc,
119
+ void *dx);
120
+
121
+ /* LRN cross-channel backward computation. Double parameters cast to tensor data type */
122
+ cudnnStatus_t CUDNNWINAPI
123
+ cudnnLRNCrossChannelBackward(cudnnHandle_t handle,
124
+ cudnnLRNDescriptor_t normDesc,
125
+ cudnnLRNMode_t lrnMode,
126
+ const void *alpha,
127
+ const cudnnTensorDescriptor_t yDesc,
128
+ const void *y,
129
+ const cudnnTensorDescriptor_t dyDesc,
130
+ const void *dy,
131
+ const cudnnTensorDescriptor_t xDesc,
132
+ const void *x,
133
+ const void *beta,
134
+ const cudnnTensorDescriptor_t dxDesc,
135
+ void *dx);
136
+
137
+ cudnnStatus_t CUDNNWINAPI
138
+ cudnnDivisiveNormalizationBackward(cudnnHandle_t handle,
139
+ cudnnLRNDescriptor_t normDesc,
140
+ cudnnDivNormMode_t mode,
141
+ const void *alpha,
142
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */
143
+ const void *x,
144
+ const void *means, /* if NULL, means are assumed to be zero */
145
+ const void *dy,
146
+ void *temp,
147
+ void *temp2,
148
+ const void *beta,
149
+ const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */
150
+ void *dx, /* output x differential */
151
+ void *dMeans); /* output means differential, can be NULL */
152
+
153
+ cudnnStatus_t CUDNNWINAPI
154
+ cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle,
155
+ cudnnBatchNormMode_t mode,
156
+ cudnnBatchNormOps_t bnOps,
157
+ const cudnnTensorDescriptor_t xDesc,
158
+ const cudnnTensorDescriptor_t zDesc,
159
+ const cudnnTensorDescriptor_t yDesc,
160
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
161
+ const cudnnActivationDescriptor_t activationDesc,
162
+ size_t *sizeInBytes);
163
+
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle,
166
+ cudnnBatchNormMode_t mode,
167
+ cudnnBatchNormOps_t bnOps,
168
+ const cudnnTensorDescriptor_t xDesc,
169
+ const cudnnTensorDescriptor_t yDesc,
170
+ const cudnnTensorDescriptor_t dyDesc,
171
+ const cudnnTensorDescriptor_t dzDesc,
172
+ const cudnnTensorDescriptor_t dxDesc,
173
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
174
+ const cudnnActivationDescriptor_t activationDesc,
175
+ size_t *sizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle,
179
+ cudnnBatchNormMode_t mode,
180
+ cudnnBatchNormOps_t bnOps,
181
+ const cudnnActivationDescriptor_t activationDesc,
182
+ const cudnnTensorDescriptor_t xDesc,
183
+ size_t *sizeInBytes);
184
+
185
+ /* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnBatchNormalizationForwardTraining(
188
+ cudnnHandle_t handle,
189
+ cudnnBatchNormMode_t mode,
190
+
191
+ const void *alpha, /* alpha[0] = result blend factor */
192
+ const void *beta, /* beta[0] = dest layer blend factor */
193
+
194
+ const cudnnTensorDescriptor_t xDesc,
195
+ const void *x, /* NxCxHxW */
196
+ const cudnnTensorDescriptor_t yDesc,
197
+ void *y, /* NxCxHxW */
198
+
199
+ /* Shared desc for the next 6 tensors in the argument list.
200
+ Data type to be set as follows:
201
+ type = (typeOf(x) == double) ? double : float
202
+ Dimensions for this descriptor depend on normalization mode
203
+ - Spatial Normalization : tensors are expected to have dims 1xCx1x1
204
+ (normalization is performed across NxHxW)
205
+ - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW
206
+ (normalization is performed across N) */
207
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
208
+
209
+ /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */
210
+ const void *bnScale,
211
+ const void *bnBias,
212
+
213
+ /* MUST use factor=1 in the very first call of a complete training cycle.
214
+ Use a factor=1/(1+n) at N-th call to the function to get
215
+ Cumulative Moving Average (CMA) behavior
216
+ CMA[n] = (x[1]+...+x[n])/n
217
+ Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) =
218
+ ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) =
219
+ CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */
220
+ double exponentialAverageFactor,
221
+
222
+ /* Used in Training phase only.
223
+ runningMean = newMean*factor + runningMean*(1-factor) */
224
+ void *resultRunningMean,
225
+ /* Output in training mode, input in inference. Is the moving average
226
+ of variance[x] (factor is applied in the same way as for runningMean) */
227
+ void *resultRunningVariance,
228
+
229
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
230
+ double epsilon,
231
+
232
+ /* Optionally save intermediate results from the forward pass here
233
+ - can be reused to speed up backward pass. NULL if unused */
234
+ void *resultSaveMean,
235
+ void *resultSaveInvVariance);
236
+
237
+ /* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnBatchNormalizationForwardTrainingEx(
240
+ cudnnHandle_t handle,
241
+ cudnnBatchNormMode_t mode,
242
+ cudnnBatchNormOps_t bnOps,
243
+
244
+ const void *alpha, /* alpha[0] = result blend factor */
245
+ const void *beta, /* beta[0] = dest layer blend factor */
246
+
247
+ const cudnnTensorDescriptor_t xDesc,
248
+ const void *xData,
249
+ const cudnnTensorDescriptor_t zDesc,
250
+ const void *zData,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ void *yData,
253
+
254
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
255
+ const void *bnScale,
256
+ const void *bnBias,
257
+
258
+ double exponentialAverageFactor,
259
+ void *resultRunningMean,
260
+ void *resultRunningVariance,
261
+
262
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
263
+ double epsilon,
264
+
265
+ /* Optionally save intermediate results from the forward pass here
266
+ - can be reused to speed up backward pass. NULL if unused */
267
+ void *resultSaveMean,
268
+ void *resultSaveInvVariance,
269
+
270
+ cudnnActivationDescriptor_t activationDesc,
271
+ void *workspace,
272
+ size_t workSpaceSizeInBytes,
273
+ void *reserveSpace,
274
+ size_t reserveSpaceSizeInBytes);
275
+
276
+ /* Performs backward pass of Batch Normalization layer. Returns x gradient,
277
+ * bnScale gradient and bnBias gradient */
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnBatchNormalizationBackward(cudnnHandle_t handle,
280
+ cudnnBatchNormMode_t mode,
281
+ const void *alphaDataDiff,
282
+ const void *betaDataDiff,
283
+ const void *alphaParamDiff,
284
+ const void *betaParamDiff,
285
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */
286
+ const void *x,
287
+ const cudnnTensorDescriptor_t dyDesc,
288
+ const void *dy,
289
+ const cudnnTensorDescriptor_t dxDesc,
290
+ void *dx,
291
+ /* Shared tensor desc for the 4 tensors below */
292
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
293
+ const void *bnScale, /* bnBias doesn't affect backpropagation */
294
+ /* scale and bias diff are not backpropagated below this layer */
295
+ void *dBnScaleResult,
296
+ void *dBnBiasResult,
297
+ /* Same epsilon as forward pass */
298
+ double epsilon,
299
+
300
+ /* Optionally cached intermediate results from
301
+ forward pass */
302
+ const void *savedMean,
303
+ const void *savedInvVariance);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle,
307
+ cudnnBatchNormMode_t mode,
308
+ cudnnBatchNormOps_t bnOps,
309
+
310
+ const void *alphaDataDiff,
311
+ const void *betaDataDiff,
312
+ const void *alphaParamDiff,
313
+ const void *betaParamDiff,
314
+ const cudnnTensorDescriptor_t xDesc,
315
+ const void *xData,
316
+ const cudnnTensorDescriptor_t yDesc,
317
+ const void *yData,
318
+ const cudnnTensorDescriptor_t dyDesc,
319
+ const void *dyData,
320
+ const cudnnTensorDescriptor_t dzDesc,
321
+ void *dzData,
322
+ const cudnnTensorDescriptor_t dxDesc,
323
+ void *dxData,
324
+
325
+ /* Shared tensor desc for the 4 tensors below */
326
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
327
+ const void *bnScaleData,
328
+ const void *bnBiasData, /* needed if there is activation */
329
+ void *dBnScaleData,
330
+ void *dBnBiasData,
331
+ double epsilon, /* Same epsilon as forward pass */
332
+
333
+ /* Optionally cached intermediate results from
334
+ forward pass */
335
+ const void *savedMean,
336
+ const void *savedInvVariance,
337
+ cudnnActivationDescriptor_t activationDesc,
338
+ void *workSpace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle,
345
+ cudnnNormMode_t mode,
346
+ cudnnNormOps_t normOps,
347
+ cudnnNormAlgo_t algo,
348
+ const cudnnTensorDescriptor_t xDesc,
349
+ const cudnnTensorDescriptor_t zDesc,
350
+ const cudnnTensorDescriptor_t yDesc,
351
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
352
+ const cudnnActivationDescriptor_t activationDesc,
353
+ const cudnnTensorDescriptor_t normMeanVarDesc,
354
+ size_t *sizeInBytes,
355
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
356
+
357
+ cudnnStatus_t CUDNNWINAPI
358
+ cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle,
359
+ cudnnNormMode_t mode,
360
+ cudnnNormOps_t normOps,
361
+ cudnnNormAlgo_t algo,
362
+ const cudnnTensorDescriptor_t xDesc,
363
+ const cudnnTensorDescriptor_t yDesc,
364
+ const cudnnTensorDescriptor_t dyDesc,
365
+ const cudnnTensorDescriptor_t dzDesc,
366
+ const cudnnTensorDescriptor_t dxDesc,
367
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
368
+ const cudnnActivationDescriptor_t activationDesc,
369
+ const cudnnTensorDescriptor_t normMeanVarDesc,
370
+ size_t *sizeInBytes,
371
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
372
+
373
+ cudnnStatus_t CUDNNWINAPI
374
+ cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle,
375
+ cudnnNormMode_t mode,
376
+ cudnnNormOps_t normOps,
377
+ cudnnNormAlgo_t algo,
378
+ const cudnnActivationDescriptor_t activationDesc,
379
+ const cudnnTensorDescriptor_t xDesc,
380
+ size_t *sizeInBytes,
381
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
382
+
383
+ /* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */
384
+ cudnnStatus_t CUDNNWINAPI
385
+ cudnnNormalizationForwardTraining(cudnnHandle_t handle,
386
+ cudnnNormMode_t mode,
387
+ cudnnNormOps_t normOps,
388
+ cudnnNormAlgo_t algo,
389
+ const void *alpha, /* alpha[0] = result blend factor */
390
+ const void *beta, /* beta[0] = dest layer blend factor */
391
+ const cudnnTensorDescriptor_t xDesc,
392
+ const void *xData,
393
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
394
+ const void *normScale,
395
+ const void *normBias,
396
+ double exponentialAverageFactor,
397
+ const cudnnTensorDescriptor_t normMeanVarDesc,
398
+ void *resultRunningMean,
399
+ void *resultRunningVariance,
400
+ /* Has to be >= 0. Should be the same in forward and backward functions. */
401
+ double epsilon,
402
+ /* Optionally save intermediate results from the forward pass here
403
+ - can be reused to speed up backward pass. NULL if unused */
404
+ void *resultSaveMean,
405
+ void *resultSaveInvVariance,
406
+ cudnnActivationDescriptor_t activationDesc,
407
+ const cudnnTensorDescriptor_t zDesc,
408
+ const void *zData,
409
+ const cudnnTensorDescriptor_t yDesc,
410
+ void *yData,
411
+ void *workspace,
412
+ size_t workSpaceSizeInBytes,
413
+ void *reserveSpace,
414
+ size_t reserveSpaceSizeInBytes,
415
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
416
+
417
+ cudnnStatus_t CUDNNWINAPI
418
+ cudnnNormalizationBackward(cudnnHandle_t handle,
419
+ cudnnNormMode_t mode,
420
+ cudnnNormOps_t normOps,
421
+ cudnnNormAlgo_t algo,
422
+ const void *alphaDataDiff,
423
+ const void *betaDataDiff,
424
+ const void *alphaParamDiff,
425
+ const void *betaParamDiff,
426
+ const cudnnTensorDescriptor_t xDesc,
427
+ const void *xData,
428
+ const cudnnTensorDescriptor_t yDesc,
429
+ const void *yData,
430
+ const cudnnTensorDescriptor_t dyDesc,
431
+ const void *dyData,
432
+ const cudnnTensorDescriptor_t dzDesc,
433
+ void *dzData,
434
+ const cudnnTensorDescriptor_t dxDesc,
435
+ void *dxData,
436
+ /* Shared tensor desc for the 4 tensors below */
437
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
438
+ const void *normScaleData,
439
+ const void *normBiasData, /* needed if there is activation */
440
+ void *dNormScaleData,
441
+ void *dNormBiasData,
442
+ double epsilon, /* Same epsilon as forward pass */
443
+ const cudnnTensorDescriptor_t normMeanVarDesc,
444
+ /* Optionally cached intermediate results from
445
+ forward pass */
446
+ const void *savedMean,
447
+ const void *savedInvVariance,
448
+ cudnnActivationDescriptor_t activationDesc,
449
+ void *workSpace,
450
+ size_t workSpaceSizeInBytes,
451
+ void *reserveSpace,
452
+ size_t reserveSpaceSizeInBytes,
453
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
454
+
455
+ cudnnStatus_t CUDNNWINAPI
456
+ cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle,
457
+ const cudnnSpatialTransformerDescriptor_t stDesc,
458
+ const void *dgrid,
459
+ void *dtheta);
460
+
461
+ cudnnStatus_t CUDNNWINAPI
462
+ cudnnSpatialTfSamplerBackward(cudnnHandle_t handle,
463
+ cudnnSpatialTransformerDescriptor_t stDesc,
464
+ const void *alpha,
465
+ const cudnnTensorDescriptor_t xDesc,
466
+ const void *x,
467
+ const void *beta,
468
+ const cudnnTensorDescriptor_t dxDesc,
469
+ void *dx,
470
+ const void *alphaDgrid,
471
+ const cudnnTensorDescriptor_t dyDesc,
472
+ const void *dy,
473
+ const void *grid,
474
+ const void *betaDgrid,
475
+ void *dgrid);
476
+
477
+ cudnnStatus_t CUDNNWINAPI
478
+ cudnnDropoutBackward(cudnnHandle_t handle,
479
+ const cudnnDropoutDescriptor_t dropoutDesc,
480
+ const cudnnTensorDescriptor_t dydesc,
481
+ const void *dy,
482
+ const cudnnTensorDescriptor_t dxdesc,
483
+ void *dx,
484
+ void *reserveSpace,
485
+ size_t reserveSpaceSizeInBytes);
486
+
487
+ /*
488
+ * \brief Cross-library version checker.
489
+ * This function is implemented differently in each sub-library. Each sublib
490
+ * checks whether its own version matches that of its dependencies.
491
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
492
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
493
+ */
494
+ cudnnStatus_t CUDNNWINAPI
495
+ cudnnOpsTrainVersionCheck(void);
496
+
497
+ #if defined(__cplusplus)
498
+ }
499
+ #endif
500
+
501
+ #endif /* CUDNN_OPS_TRAIN_H_ */
videollama2/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * \file: The master cuDNN version file.
52
+ */
53
+
54
+ #ifndef CUDNN_VERSION_H_
55
+ #define CUDNN_VERSION_H_
56
+
57
+ #define CUDNN_MAJOR 8
58
+ #define CUDNN_MINOR 9
59
+ #define CUDNN_PATCHLEVEL 2
60
+
61
+ #define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL)
62
+
63
+ /* cannot use constexpr here since this is a C-only file */
64
+ /* Below is the max SM version this cuDNN library is aware of and supports natively */
65
+
66
+ #define CUDNN_MAX_SM_MAJOR_NUMBER 9
67
+ #define CUDNN_MAX_SM_MINOR_NUMBER 0
68
+ #define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10)
69
+
70
+ /* Here are constants for each of the SM Architectures we support to use in code where device version checks must be
71
+ * made */
72
+
73
+ /* MAXWELL SM 50 52 53 */
74
+ #define CUDNN_SM_50 500
75
+ #define CUDNN_SM_52 520
76
+ #define CUDNN_SM_53 530
77
+
78
+ /* PASCAL SM 60 61 62 */
79
+ #define CUDNN_SM_60 600
80
+ #define CUDNN_SM_61 610
81
+ #define CUDNN_SM_62 620
82
+
83
+ /* VOLTA SM 70 72 */
84
+ #define CUDNN_SM_70 700
85
+ #define CUDNN_SM_72 720
86
+
87
+ /* TURING SM 75 */
88
+ #define CUDNN_SM_75 750
89
+
90
+ /* AMPERE SM 80 86 87 */
91
+ #define CUDNN_SM_80 800
92
+ #define CUDNN_SM_86 860
93
+ #define CUDNN_SM_87 870
94
+
95
+ /* ADA LOVELACE SM 89 */
96
+ #define CUDNN_SM_89 890
97
+
98
+ /* HOPPER SM 90 */
99
+ #define CUDNN_SM_90 900
100
+
101
+ /* END MARKER for last known version.
102
+ * This can be replaced after support for 1000 is added
103
+ */
104
+ #define CUDNN_SM_9X_END 999
105
+
106
+ /* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */
107
+ #define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50
108
+
109
+ #endif /* CUDNN_VERSION_H */
videollama2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8aa8f749e69cff5e83058495fc5595d1e6aa63ff26f27f745521b6e334ecce
3
+ size 150200
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_all_methods.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply.cpython-310.pyc ADDED
Binary file (52.3 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply_mutate.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_bin_groupby.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (50.3 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_counting.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_cumulative.cpython-310.pyc ADDED
Binary file (9.19 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_filters.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby.cpython-310.pyc ADDED
Binary file (95.3 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby_dropna.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby_subclass.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_grouping.cpython-310.pyc ADDED
Binary file (38 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_indexing.cpython-310.pyc ADDED
Binary file (9.74 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_libgroupby.cpython-310.pyc ADDED
Binary file (9.94 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_missing.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_numba.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_raises.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_reductions.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_timegrouper.cpython-310.pyc ADDED
Binary file (23.6 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc ADDED
Binary file (52.1 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py ADDED
@@ -0,0 +1,1672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test .agg behavior / note that .apply is tested generally in test_groupby.py
3
+ """
4
+ import datetime
5
+ import functools
6
+ from functools import partial
7
+ import re
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from pandas.errors import SpecificationError
13
+
14
+ from pandas.core.dtypes.common import is_integer_dtype
15
+
16
+ import pandas as pd
17
+ from pandas import (
18
+ DataFrame,
19
+ Index,
20
+ MultiIndex,
21
+ Series,
22
+ concat,
23
+ to_datetime,
24
+ )
25
+ import pandas._testing as tm
26
+ from pandas.core.groupby.grouper import Grouping
27
+
28
+
29
+ def test_groupby_agg_no_extra_calls():
30
+ # GH#31760
31
+ df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
32
+ gb = df.groupby("key")["value"]
33
+
34
+ def dummy_func(x):
35
+ assert len(x) != 0
36
+ return x.sum()
37
+
38
+ gb.agg(dummy_func)
39
+
40
+
41
+ def test_agg_regression1(tsframe):
42
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
43
+ result = grouped.agg("mean")
44
+ expected = grouped.mean()
45
+ tm.assert_frame_equal(result, expected)
46
+
47
+
48
+ def test_agg_must_agg(df):
49
+ grouped = df.groupby("A")["C"]
50
+
51
+ msg = "Must produce aggregated value"
52
+ with pytest.raises(Exception, match=msg):
53
+ grouped.agg(lambda x: x.describe())
54
+ with pytest.raises(Exception, match=msg):
55
+ grouped.agg(lambda x: x.index[:2])
56
+
57
+
58
+ def test_agg_ser_multi_key(df):
59
+ f = lambda x: x.sum()
60
+ results = df.C.groupby([df.A, df.B]).aggregate(f)
61
+ expected = df.groupby(["A", "B"]).sum()["C"]
62
+ tm.assert_series_equal(results, expected)
63
+
64
+
65
+ def test_groupby_aggregation_mixed_dtype():
66
+ # GH 6212
67
+ expected = DataFrame(
68
+ {
69
+ "v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
70
+ "v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
71
+ },
72
+ index=MultiIndex.from_tuples(
73
+ [
74
+ (1, 95),
75
+ (1, 99),
76
+ (2, 95),
77
+ (2, 99),
78
+ ("big", "damp"),
79
+ ("blue", "dry"),
80
+ ("red", "red"),
81
+ ("red", "wet"),
82
+ ],
83
+ names=["by1", "by2"],
84
+ ),
85
+ )
86
+
87
+ df = DataFrame(
88
+ {
89
+ "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
90
+ "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
91
+ "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
92
+ "by2": [
93
+ "wet",
94
+ "dry",
95
+ 99,
96
+ 95,
97
+ np.nan,
98
+ "damp",
99
+ 95,
100
+ 99,
101
+ "red",
102
+ 99,
103
+ np.nan,
104
+ np.nan,
105
+ ],
106
+ }
107
+ )
108
+
109
+ g = df.groupby(["by1", "by2"])
110
+ result = g[["v1", "v2"]].mean()
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+
114
+ def test_groupby_aggregation_multi_level_column():
115
+ # GH 29772
116
+ lst = [
117
+ [True, True, True, False],
118
+ [True, False, np.nan, False],
119
+ [True, True, np.nan, False],
120
+ [True, True, np.nan, False],
121
+ ]
122
+ df = DataFrame(
123
+ data=lst,
124
+ columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
125
+ )
126
+
127
+ msg = "DataFrame.groupby with axis=1 is deprecated"
128
+ with tm.assert_produces_warning(FutureWarning, match=msg):
129
+ gb = df.groupby(level=1, axis=1)
130
+ result = gb.sum(numeric_only=False)
131
+ expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]})
132
+
133
+ tm.assert_frame_equal(result, expected)
134
+
135
+
136
+ def test_agg_apply_corner(ts, tsframe):
137
+ # nothing to group, all NA
138
+ grouped = ts.groupby(ts * np.nan, group_keys=False)
139
+ assert ts.dtype == np.float64
140
+
141
+ # groupby float64 values results in a float64 Index
142
+ exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
143
+ tm.assert_series_equal(grouped.sum(), exp)
144
+ tm.assert_series_equal(grouped.agg("sum"), exp)
145
+ tm.assert_series_equal(grouped.apply("sum"), exp, check_index_type=False)
146
+
147
+ # DataFrame
148
+ grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)
149
+ exp_df = DataFrame(
150
+ columns=tsframe.columns,
151
+ dtype=float,
152
+ index=Index([], name="A", dtype=np.float64),
153
+ )
154
+ tm.assert_frame_equal(grouped.sum(), exp_df)
155
+ tm.assert_frame_equal(grouped.agg("sum"), exp_df)
156
+
157
+ msg = "The behavior of DataFrame.sum with axis=None is deprecated"
158
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
159
+ res = grouped.apply(np.sum)
160
+ tm.assert_frame_equal(res, exp_df)
161
+
162
+
163
+ def test_agg_grouping_is_list_tuple(ts):
164
+ df = DataFrame(
165
+ np.random.default_rng(2).standard_normal((30, 4)),
166
+ columns=Index(list("ABCD"), dtype=object),
167
+ index=pd.date_range("2000-01-01", periods=30, freq="B"),
168
+ )
169
+
170
+ grouped = df.groupby(lambda x: x.year)
171
+ grouper = grouped._grouper.groupings[0].grouping_vector
172
+ grouped._grouper.groupings[0] = Grouping(ts.index, list(grouper))
173
+
174
+ result = grouped.agg("mean")
175
+ expected = grouped.mean()
176
+ tm.assert_frame_equal(result, expected)
177
+
178
+ grouped._grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
179
+
180
+ result = grouped.agg("mean")
181
+ expected = grouped.mean()
182
+ tm.assert_frame_equal(result, expected)
183
+
184
+
185
+ def test_agg_python_multiindex(multiindex_dataframe_random_data):
186
+ grouped = multiindex_dataframe_random_data.groupby(["A", "B"])
187
+
188
+ result = grouped.agg("mean")
189
+ expected = grouped.mean()
190
+ tm.assert_frame_equal(result, expected)
191
+
192
+
193
+ @pytest.mark.parametrize(
194
+ "groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
195
+ )
196
+ def test_aggregate_str_func(tsframe, groupbyfunc):
197
+ grouped = tsframe.groupby(groupbyfunc)
198
+
199
+ # single series
200
+ result = grouped["A"].agg("std")
201
+ expected = grouped["A"].std()
202
+ tm.assert_series_equal(result, expected)
203
+
204
+ # group frame by function name
205
+ result = grouped.aggregate("var")
206
+ expected = grouped.var()
207
+ tm.assert_frame_equal(result, expected)
208
+
209
+ # group frame by function dict
210
+ result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
211
+ expected = DataFrame(
212
+ {
213
+ "A": grouped["A"].var(),
214
+ "B": grouped["B"].std(),
215
+ "C": grouped["C"].mean(),
216
+ "D": grouped["D"].sem(),
217
+ }
218
+ )
219
+ tm.assert_frame_equal(result, expected)
220
+
221
+
222
+ def test_std_masked_dtype(any_numeric_ea_dtype):
223
+ # GH#35516
224
+ df = DataFrame(
225
+ {
226
+ "a": [2, 1, 1, 1, 2, 2, 1],
227
+ "b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"),
228
+ }
229
+ )
230
+ result = df.groupby("a").std()
231
+ expected = DataFrame(
232
+ {"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64"
233
+ )
234
+ tm.assert_frame_equal(result, expected)
235
+
236
+
237
+ def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):
238
+ gb = df.groupby(level=0)
239
+ warn_msg = f"DataFrameGroupBy.{reduction_func} with axis=1 is deprecated"
240
+ if reduction_func in ("idxmax", "idxmin"):
241
+ error = TypeError
242
+ msg = "'[<>]' not supported between instances of 'float' and 'str'"
243
+ warn = FutureWarning
244
+ else:
245
+ error = ValueError
246
+ msg = f"Operation {reduction_func} does not support axis=1"
247
+ warn = None
248
+ with pytest.raises(error, match=msg):
249
+ with tm.assert_produces_warning(warn, match=warn_msg):
250
+ gb.agg(reduction_func, axis=1)
251
+
252
+
253
+ @pytest.mark.parametrize(
254
+ "func, expected, dtype, result_dtype_dict",
255
+ [
256
+ ("sum", [5, 7, 9], "int64", {}),
257
+ ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),
258
+ ("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),
259
+ ("sum", [5, 7, 9], "Int64", {"j": "int64"}),
260
+ ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
261
+ ("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),
262
+ ],
263
+ )
264
+ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):
265
+ # GH#43209
266
+ df = DataFrame(
267
+ [[1, 2, 3, 4, 5, 6]] * 3,
268
+ columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),
269
+ ).astype({("a", "j"): dtype, ("b", "j"): dtype})
270
+
271
+ msg = "DataFrame.groupby with axis=1 is deprecated"
272
+ with tm.assert_produces_warning(FutureWarning, match=msg):
273
+ gb = df.groupby(level=1, axis=1)
274
+ result = gb.agg(func)
275
+ expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
276
+ result_dtype_dict
277
+ )
278
+
279
+ tm.assert_frame_equal(result, expected)
280
+
281
+
282
+ @pytest.mark.parametrize(
283
+ "func, expected_data, result_dtype_dict",
284
+ [
285
+ ("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),
286
+ # std should ideally return Int64 / Float64 #43330
287
+ ("std", [[2**0.5] * 2] * 3, "float64"),
288
+ ("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),
289
+ ],
290
+ )
291
+ def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
292
+ # GH#43209
293
+ df = DataFrame(
294
+ np.arange(12).reshape(3, 4),
295
+ index=Index([0, 1, 0], name="y"),
296
+ columns=Index([10, 20, 10, 20], name="x"),
297
+ dtype="int64",
298
+ ).astype({10: "Int64"})
299
+
300
+ msg = "DataFrame.groupby with axis=1 is deprecated"
301
+ with tm.assert_produces_warning(FutureWarning, match=msg):
302
+ gb = df.groupby("x", axis=1)
303
+ result = gb.agg(func)
304
+ expected = DataFrame(
305
+ data=expected_data,
306
+ index=Index([0, 1, 0], name="y"),
307
+ columns=Index([10, 20], name="x"),
308
+ ).astype(result_dtype_dict)
309
+ tm.assert_frame_equal(result, expected)
310
+
311
+
312
+ def test_aggregate_item_by_item(df):
313
+ grouped = df.groupby("A")
314
+
315
+ aggfun_0 = lambda ser: ser.size
316
+ result = grouped.agg(aggfun_0)
317
+ foosum = (df.A == "foo").sum()
318
+ barsum = (df.A == "bar").sum()
319
+ K = len(result.columns)
320
+
321
+ # GH5782
322
+ exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")
323
+ tm.assert_series_equal(result.xs("foo"), exp)
324
+
325
+ exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")
326
+ tm.assert_almost_equal(result.xs("bar"), exp)
327
+
328
+ def aggfun_1(ser):
329
+ return ser.size
330
+
331
+ result = DataFrame().groupby(df.A).agg(aggfun_1)
332
+ assert isinstance(result, DataFrame)
333
+ assert len(result) == 0
334
+
335
+
336
+ def test_wrap_agg_out(three_group):
337
+ grouped = three_group.groupby(["A", "B"])
338
+
339
+ def func(ser):
340
+ if ser.dtype == object:
341
+ raise TypeError("Test error message")
342
+ return ser.sum()
343
+
344
+ with pytest.raises(TypeError, match="Test error message"):
345
+ grouped.aggregate(func)
346
+ result = grouped[["D", "E", "F"]].aggregate(func)
347
+ exp_grouped = three_group.loc[:, ["A", "B", "D", "E", "F"]]
348
+ expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
349
+ tm.assert_frame_equal(result, expected)
350
+
351
+
352
+ def test_agg_multiple_functions_maintain_order(df):
353
+ # GH #610
354
+ funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
355
+ msg = "is currently using SeriesGroupBy.mean"
356
+ with tm.assert_produces_warning(FutureWarning, match=msg):
357
+ result = df.groupby("A")["C"].agg(funcs)
358
+ exp_cols = Index(["mean", "max", "min"])
359
+
360
+ tm.assert_index_equal(result.columns, exp_cols)
361
+
362
+
363
+ def test_series_index_name(df):
364
+ grouped = df.loc[:, ["C"]].groupby(df["A"])
365
+ result = grouped.agg(lambda x: x.mean())
366
+ assert result.index.name == "A"
367
+
368
+
369
+ def test_agg_multiple_functions_same_name():
370
+ # GH 30880
371
+ df = DataFrame(
372
+ np.random.default_rng(2).standard_normal((1000, 3)),
373
+ index=pd.date_range("1/1/2012", freq="s", periods=1000),
374
+ columns=["A", "B", "C"],
375
+ )
376
+ result = df.resample("3min").agg(
377
+ {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
378
+ )
379
+ expected_index = pd.date_range("1/1/2012", freq="3min", periods=6)
380
+ expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
381
+ expected_values = np.array(
382
+ [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
383
+ ).T
384
+ expected = DataFrame(
385
+ expected_values, columns=expected_columns, index=expected_index
386
+ )
387
+ tm.assert_frame_equal(result, expected)
388
+
389
+
390
+ def test_agg_multiple_functions_same_name_with_ohlc_present():
391
+ # GH 30880
392
+ # ohlc expands dimensions, so different test to the above is required.
393
+ df = DataFrame(
394
+ np.random.default_rng(2).standard_normal((1000, 3)),
395
+ index=pd.date_range("1/1/2012", freq="s", periods=1000, name="dti"),
396
+ columns=Index(["A", "B", "C"], name="alpha"),
397
+ )
398
+ result = df.resample("3min").agg(
399
+ {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
400
+ )
401
+ expected_index = pd.date_range("1/1/2012", freq="3min", periods=6, name="dti")
402
+ expected_columns = MultiIndex.from_tuples(
403
+ [
404
+ ("A", "ohlc", "open"),
405
+ ("A", "ohlc", "high"),
406
+ ("A", "ohlc", "low"),
407
+ ("A", "ohlc", "close"),
408
+ ("A", "quantile", "A"),
409
+ ("A", "quantile", "A"),
410
+ ],
411
+ names=["alpha", None, None],
412
+ )
413
+ non_ohlc_expected_values = np.array(
414
+ [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
415
+ ).T
416
+ expected_values = np.hstack(
417
+ [df.resample("3min").A.ohlc(), non_ohlc_expected_values]
418
+ )
419
+ expected = DataFrame(
420
+ expected_values, columns=expected_columns, index=expected_index
421
+ )
422
+ tm.assert_frame_equal(result, expected)
423
+
424
+
425
+ def test_multiple_functions_tuples_and_non_tuples(df):
426
+ # #1359
427
+ # Columns B and C would cause partial failure
428
+ df = df.drop(columns=["B", "C"])
429
+
430
+ funcs = [("foo", "mean"), "std"]
431
+ ex_funcs = [("foo", "mean"), ("std", "std")]
432
+
433
+ result = df.groupby("A")["D"].agg(funcs)
434
+ expected = df.groupby("A")["D"].agg(ex_funcs)
435
+ tm.assert_frame_equal(result, expected)
436
+
437
+ result = df.groupby("A").agg(funcs)
438
+ expected = df.groupby("A").agg(ex_funcs)
439
+ tm.assert_frame_equal(result, expected)
440
+
441
+
442
+ def test_more_flexible_frame_multi_function(df):
443
+ grouped = df.groupby("A")
444
+
445
+ exmean = grouped.agg({"C": "mean", "D": "mean"})
446
+ exstd = grouped.agg({"C": "std", "D": "std"})
447
+
448
+ expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
449
+ expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
450
+
451
+ d = {"C": ["mean", "std"], "D": ["mean", "std"]}
452
+ result = grouped.aggregate(d)
453
+
454
+ tm.assert_frame_equal(result, expected)
455
+
456
+ # be careful
457
+ result = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
458
+ expected = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
459
+ tm.assert_frame_equal(result, expected)
460
+
461
+ def numpymean(x):
462
+ return np.mean(x)
463
+
464
+ def numpystd(x):
465
+ return np.std(x, ddof=1)
466
+
467
+ # this uses column selection & renaming
468
+ msg = r"nested renamer is not supported"
469
+ with pytest.raises(SpecificationError, match=msg):
470
+ d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}
471
+ grouped.aggregate(d)
472
+
473
+ # But without renaming, these functions are OK
474
+ d = {"C": ["mean"], "D": [numpymean, numpystd]}
475
+ grouped.aggregate(d)
476
+
477
+
478
+ def test_multi_function_flexible_mix(df):
479
+ # GH #1268
480
+ grouped = df.groupby("A")
481
+
482
+ # Expected
483
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
484
+ # this uses column selection & renaming
485
+ msg = r"nested renamer is not supported"
486
+ with pytest.raises(SpecificationError, match=msg):
487
+ grouped.aggregate(d)
488
+
489
+ # Test 1
490
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
491
+ # this uses column selection & renaming
492
+ with pytest.raises(SpecificationError, match=msg):
493
+ grouped.aggregate(d)
494
+
495
+ # Test 2
496
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
497
+ # this uses column selection & renaming
498
+ with pytest.raises(SpecificationError, match=msg):
499
+ grouped.aggregate(d)
500
+
501
+
502
+ def test_groupby_agg_coercing_bools():
503
+ # issue 14873
504
+ dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
505
+ gp = dat.groupby("a")
506
+
507
+ index = Index([1, 2], name="a")
508
+
509
+ result = gp["b"].aggregate(lambda x: (x != 0).all())
510
+ expected = Series([False, True], index=index, name="b")
511
+ tm.assert_series_equal(result, expected)
512
+
513
+ result = gp["c"].aggregate(lambda x: x.isnull().all())
514
+ expected = Series([True, False], index=index, name="c")
515
+ tm.assert_series_equal(result, expected)
516
+
517
+
518
+ def test_groupby_agg_dict_with_getitem():
519
+ # issue 25471
520
+ dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]})
521
+ result = dat.groupby("A")[["B"]].agg({"B": "sum"})
522
+
523
+ expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0)
524
+
525
+ tm.assert_frame_equal(result, expected)
526
+
527
+
528
+ def test_groupby_agg_dict_dup_columns():
529
+ # GH#55006
530
+ df = DataFrame(
531
+ [[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]],
532
+ columns=["a", "b", "c", "c"],
533
+ )
534
+ gb = df.groupby("a")
535
+ result = gb.agg({"b": "sum"})
536
+ expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a"))
537
+ tm.assert_frame_equal(result, expected)
538
+
539
+
540
+ @pytest.mark.parametrize(
541
+ "op",
542
+ [
543
+ lambda x: x.sum(),
544
+ lambda x: x.cumsum(),
545
+ lambda x: x.transform("sum"),
546
+ lambda x: x.transform("cumsum"),
547
+ lambda x: x.agg("sum"),
548
+ lambda x: x.agg("cumsum"),
549
+ ],
550
+ )
551
+ def test_bool_agg_dtype(op):
552
+ # GH 7001
553
+ # Bool sum aggregations result in int
554
+ df = DataFrame({"a": [1, 1], "b": [False, True]})
555
+ s = df.set_index("a")["b"]
556
+
557
+ result = op(df.groupby("a"))["b"].dtype
558
+ assert is_integer_dtype(result)
559
+
560
+ result = op(s.groupby("a")).dtype
561
+ assert is_integer_dtype(result)
562
+
563
+
564
+ @pytest.mark.parametrize(
565
+ "keys, agg_index",
566
+ [
567
+ (["a"], Index([1], name="a")),
568
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
569
+ ],
570
+ )
571
+ @pytest.mark.parametrize(
572
+ "input_dtype", ["bool", "int32", "int64", "float32", "float64"]
573
+ )
574
+ @pytest.mark.parametrize(
575
+ "result_dtype", ["bool", "int32", "int64", "float32", "float64"]
576
+ )
577
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
578
+ def test_callable_result_dtype_frame(
579
+ keys, agg_index, input_dtype, result_dtype, method
580
+ ):
581
+ # GH 21240
582
+ df = DataFrame({"a": [1], "b": [2], "c": [True]})
583
+ df["c"] = df["c"].astype(input_dtype)
584
+ op = getattr(df.groupby(keys)[["c"]], method)
585
+ result = op(lambda x: x.astype(result_dtype).iloc[0])
586
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
587
+ expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(
588
+ result_dtype
589
+ )
590
+ if method == "apply":
591
+ expected.columns.names = [0]
592
+ tm.assert_frame_equal(result, expected)
593
+
594
+
595
+ @pytest.mark.parametrize(
596
+ "keys, agg_index",
597
+ [
598
+ (["a"], Index([1], name="a")),
599
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
600
+ ],
601
+ )
602
+ @pytest.mark.parametrize("input", [True, 1, 1.0])
603
+ @pytest.mark.parametrize("dtype", [bool, int, float])
604
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
605
+ def test_callable_result_dtype_series(keys, agg_index, input, dtype, method):
606
+ # GH 21240
607
+ df = DataFrame({"a": [1], "b": [2], "c": [input]})
608
+ op = getattr(df.groupby(keys)["c"], method)
609
+ result = op(lambda x: x.astype(dtype).iloc[0])
610
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
611
+ expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)
612
+ tm.assert_series_equal(result, expected)
613
+
614
+
615
+ def test_order_aggregate_multiple_funcs():
616
+ # GH 25692
617
+ df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
618
+
619
+ res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
620
+ result = res.columns.levels[1]
621
+
622
+ expected = Index(["sum", "max", "mean", "ohlc", "min"])
623
+
624
+ tm.assert_index_equal(result, expected)
625
+
626
+
627
+ def test_ohlc_ea_dtypes(any_numeric_ea_dtype):
628
+ # GH#37493
629
+ df = DataFrame(
630
+ {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]},
631
+ dtype=any_numeric_ea_dtype,
632
+ )
633
+ gb = df.groupby("a")
634
+ result = gb.ohlc()
635
+ expected = DataFrame(
636
+ [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4],
637
+ columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]),
638
+ index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"),
639
+ dtype=any_numeric_ea_dtype,
640
+ )
641
+ tm.assert_frame_equal(result, expected)
642
+
643
+ gb2 = df.groupby("a", as_index=False)
644
+ result2 = gb2.ohlc()
645
+ expected2 = expected.reset_index()
646
+ tm.assert_frame_equal(result2, expected2)
647
+
648
+
649
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64])
650
+ @pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
651
+ def test_uint64_type_handling(dtype, how):
652
+ # GH 26310
653
+ df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
654
+ expected = df.groupby("y").agg({"x": how})
655
+ df.x = df.x.astype(dtype)
656
+ result = df.groupby("y").agg({"x": how})
657
+ if how not in ("mean", "median"):
658
+ # mean and median always result in floats
659
+ result.x = result.x.astype(np.int64)
660
+ tm.assert_frame_equal(result, expected, check_exact=True)
661
+
662
+
663
+ def test_func_duplicates_raises():
664
+ # GH28426
665
+ msg = "Function names"
666
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
667
+ with pytest.raises(SpecificationError, match=msg):
668
+ df.groupby("A").agg(["min", "min"])
669
+
670
+
671
+ @pytest.mark.parametrize(
672
+ "index",
673
+ [
674
+ pd.CategoricalIndex(list("abc")),
675
+ pd.interval_range(0, 3),
676
+ pd.period_range("2020", periods=3, freq="D"),
677
+ MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
678
+ ],
679
+ )
680
+ def test_agg_index_has_complex_internals(index):
681
+ # GH 31223
682
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
683
+ result = df.groupby("group").agg({"value": Series.nunique})
684
+ expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
685
+ tm.assert_frame_equal(result, expected)
686
+
687
+
688
+ def test_agg_split_block():
689
+ # https://github.com/pandas-dev/pandas/issues/31522
690
+ df = DataFrame(
691
+ {
692
+ "key1": ["a", "a", "b", "b", "a"],
693
+ "key2": ["one", "two", "one", "two", "one"],
694
+ "key3": ["three", "three", "three", "six", "six"],
695
+ }
696
+ )
697
+ result = df.groupby("key1").min()
698
+ expected = DataFrame(
699
+ {"key2": ["one", "one"], "key3": ["six", "six"]},
700
+ index=Index(["a", "b"], name="key1"),
701
+ )
702
+ tm.assert_frame_equal(result, expected)
703
+
704
+
705
+ def test_agg_split_object_part_datetime():
706
+ # https://github.com/pandas-dev/pandas/pull/31616
707
+ df = DataFrame(
708
+ {
709
+ "A": pd.date_range("2000", periods=4),
710
+ "B": ["a", "b", "c", "d"],
711
+ "C": [1, 2, 3, 4],
712
+ "D": ["b", "c", "d", "e"],
713
+ "E": pd.date_range("2000", periods=4),
714
+ "F": [1, 2, 3, 4],
715
+ }
716
+ ).astype(object)
717
+ result = df.groupby([0, 0, 0, 0]).min()
718
+ expected = DataFrame(
719
+ {
720
+ "A": [pd.Timestamp("2000")],
721
+ "B": ["a"],
722
+ "C": [1],
723
+ "D": ["b"],
724
+ "E": [pd.Timestamp("2000")],
725
+ "F": [1],
726
+ },
727
+ index=np.array([0]),
728
+ dtype=object,
729
+ )
730
+ tm.assert_frame_equal(result, expected)
731
+
732
+
733
+ class TestNamedAggregationSeries:
734
+ def test_series_named_agg(self):
735
+ df = Series([1, 2, 3, 4])
736
+ gr = df.groupby([0, 0, 1, 1])
737
+ result = gr.agg(a="sum", b="min")
738
+ expected = DataFrame(
739
+ {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1])
740
+ )
741
+ tm.assert_frame_equal(result, expected)
742
+
743
+ result = gr.agg(b="min", a="sum")
744
+ expected = expected[["b", "a"]]
745
+ tm.assert_frame_equal(result, expected)
746
+
747
+ def test_no_args_raises(self):
748
+ gr = Series([1, 2]).groupby([0, 1])
749
+ with pytest.raises(TypeError, match="Must provide"):
750
+ gr.agg()
751
+
752
+ # but we do allow this
753
+ result = gr.agg([])
754
+ expected = DataFrame(columns=[])
755
+ tm.assert_frame_equal(result, expected)
756
+
757
+ def test_series_named_agg_duplicates_no_raises(self):
758
+ # GH28426
759
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
760
+ grouped = gr.agg(a="sum", b="sum")
761
+ expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1]))
762
+ tm.assert_frame_equal(expected, grouped)
763
+
764
+ def test_mangled(self):
765
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
766
+ result = gr.agg(a=lambda x: 0, b=lambda x: 1)
767
+ expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1]))
768
+ tm.assert_frame_equal(result, expected)
769
+
770
+ @pytest.mark.parametrize(
771
+ "inp",
772
+ [
773
+ pd.NamedAgg(column="anything", aggfunc="min"),
774
+ ("anything", "min"),
775
+ ["anything", "min"],
776
+ ],
777
+ )
778
+ def test_named_agg_nametuple(self, inp):
779
+ # GH34422
780
+ s = Series([1, 1, 2, 2, 3, 3, 4, 5])
781
+ msg = f"func is expected but received {type(inp).__name__}"
782
+ with pytest.raises(TypeError, match=msg):
783
+ s.groupby(s.values).agg(a=inp)
784
+
785
+
786
+ class TestNamedAggregationDataFrame:
787
+ def test_agg_relabel(self):
788
+ df = DataFrame(
789
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
790
+ )
791
+ result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
792
+ expected = DataFrame(
793
+ {"a_max": [1, 3], "b_max": [6, 8]},
794
+ index=Index(["a", "b"], name="group"),
795
+ columns=["a_max", "b_max"],
796
+ )
797
+ tm.assert_frame_equal(result, expected)
798
+
799
+ # order invariance
800
+ p98 = functools.partial(np.percentile, q=98)
801
+ result = df.groupby("group").agg(
802
+ b_min=("B", "min"),
803
+ a_min=("A", "min"),
804
+ a_mean=("A", "mean"),
805
+ a_max=("A", "max"),
806
+ b_max=("B", "max"),
807
+ a_98=("A", p98),
808
+ )
809
+ expected = DataFrame(
810
+ {
811
+ "b_min": [5, 7],
812
+ "a_min": [0, 2],
813
+ "a_mean": [0.5, 2.5],
814
+ "a_max": [1, 3],
815
+ "b_max": [6, 8],
816
+ "a_98": [0.98, 2.98],
817
+ },
818
+ index=Index(["a", "b"], name="group"),
819
+ columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
820
+ )
821
+ tm.assert_frame_equal(result, expected)
822
+
823
+ def test_agg_relabel_non_identifier(self):
824
+ df = DataFrame(
825
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
826
+ )
827
+
828
+ result = df.groupby("group").agg(**{"my col": ("A", "max")})
829
+ expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
830
+ tm.assert_frame_equal(result, expected)
831
+
832
+ def test_duplicate_no_raises(self):
833
+ # GH 28426, if use same input function on same column,
834
+ # no error should raise
835
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
836
+
837
+ grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
838
+ expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
839
+ tm.assert_frame_equal(grouped, expected)
840
+
841
+ quant50 = functools.partial(np.percentile, q=50)
842
+ quant70 = functools.partial(np.percentile, q=70)
843
+ quant50.__name__ = "quant50"
844
+ quant70.__name__ = "quant70"
845
+
846
+ test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
847
+
848
+ grouped = test.groupby("col1").agg(
849
+ quantile_50=("col2", quant50), quantile_70=("col2", quant70)
850
+ )
851
+ expected = DataFrame(
852
+ {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
853
+ index=Index(["a", "b"], name="col1"),
854
+ )
855
+ tm.assert_frame_equal(grouped, expected)
856
+
857
+ def test_agg_relabel_with_level(self):
858
+ df = DataFrame(
859
+ {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
860
+ index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
861
+ )
862
+ result = df.groupby(level=0).agg(
863
+ aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
864
+ )
865
+ expected = DataFrame(
866
+ {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
867
+ )
868
+ tm.assert_frame_equal(result, expected)
869
+
870
+ def test_agg_relabel_other_raises(self):
871
+ df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
872
+ grouped = df.groupby("A")
873
+ match = "Must provide"
874
+ with pytest.raises(TypeError, match=match):
875
+ grouped.agg(foo=1)
876
+
877
+ with pytest.raises(TypeError, match=match):
878
+ grouped.agg()
879
+
880
+ with pytest.raises(TypeError, match=match):
881
+ grouped.agg(a=("B", "max"), b=(1, 2, 3))
882
+
883
+ def test_missing_raises(self):
884
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
885
+ match = re.escape("Column(s) ['C'] do not exist")
886
+ with pytest.raises(KeyError, match=match):
887
+ df.groupby("A").agg(c=("C", "sum"))
888
+
889
+ def test_agg_namedtuple(self):
890
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
891
+ result = df.groupby("A").agg(
892
+ b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
893
+ )
894
+ expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
895
+ tm.assert_frame_equal(result, expected)
896
+
897
+ def test_mangled(self):
898
+ df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
899
+ result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
900
+ expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
901
+ tm.assert_frame_equal(result, expected)
902
+
903
+
904
+ @pytest.mark.parametrize(
905
+ "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
906
+ [
907
+ (
908
+ (("y", "A"), "max"),
909
+ (("y", "A"), np.mean),
910
+ (("y", "B"), "mean"),
911
+ [1, 3],
912
+ [0.5, 2.5],
913
+ [5.5, 7.5],
914
+ ),
915
+ (
916
+ (("y", "A"), lambda x: max(x)),
917
+ (("y", "A"), lambda x: 1),
918
+ (("y", "B"), np.mean),
919
+ [1, 3],
920
+ [1, 1],
921
+ [5.5, 7.5],
922
+ ),
923
+ (
924
+ pd.NamedAgg(("y", "A"), "max"),
925
+ pd.NamedAgg(("y", "B"), np.mean),
926
+ pd.NamedAgg(("y", "A"), lambda x: 1),
927
+ [1, 3],
928
+ [5.5, 7.5],
929
+ [1, 1],
930
+ ),
931
+ ],
932
+ )
933
+ def test_agg_relabel_multiindex_column(
934
+ agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
935
+ ):
936
+ # GH 29422, add tests for multiindex column cases
937
+ df = DataFrame(
938
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
939
+ )
940
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
941
+ idx = Index(["a", "b"], name=("x", "group"))
942
+
943
+ result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
944
+ expected = DataFrame({"a_max": [1, 3]}, index=idx)
945
+ tm.assert_frame_equal(result, expected)
946
+
947
+ msg = "is currently using SeriesGroupBy.mean"
948
+ with tm.assert_produces_warning(FutureWarning, match=msg):
949
+ result = df.groupby(("x", "group")).agg(
950
+ col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
951
+ )
952
+ expected = DataFrame(
953
+ {"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
954
+ )
955
+ tm.assert_frame_equal(result, expected)
956
+
957
+
958
+ def test_agg_relabel_multiindex_raises_not_exist():
959
+ # GH 29422, add test for raises scenario when aggregate column does not exist
960
+ df = DataFrame(
961
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
962
+ )
963
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
964
+
965
+ with pytest.raises(KeyError, match="do not exist"):
966
+ df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
967
+
968
+
969
+ def test_agg_relabel_multiindex_duplicates():
970
+ # GH29422, add test for raises scenario when getting duplicates
971
+ # GH28426, after this change, duplicates should also work if the relabelling is
972
+ # different
973
+ df = DataFrame(
974
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
975
+ )
976
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
977
+
978
+ result = df.groupby(("x", "group")).agg(
979
+ a=(("y", "A"), "min"), b=(("y", "A"), "min")
980
+ )
981
+ idx = Index(["a", "b"], name=("x", "group"))
982
+ expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
983
+ tm.assert_frame_equal(result, expected)
984
+
985
+
986
+ @pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
987
+ def test_groupby_aggregate_empty_key(kwargs):
988
+ # GH: 32580
989
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
990
+ result = df.groupby("a").agg(kwargs)
991
+ expected = DataFrame(
992
+ [1, 4],
993
+ index=Index([1, 2], dtype="int64", name="a"),
994
+ columns=MultiIndex.from_tuples([["c", "min"]]),
995
+ )
996
+ tm.assert_frame_equal(result, expected)
997
+
998
+
999
+ def test_groupby_aggregate_empty_key_empty_return():
1000
+ # GH: 32580 Check if everything works, when return is empty
1001
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
1002
+ result = df.groupby("a").agg({"b": []})
1003
+ expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))
1004
+ tm.assert_frame_equal(result, expected)
1005
+
1006
+
1007
+ def test_groupby_aggregate_empty_with_multiindex_frame():
1008
+ # GH 39178
1009
+ df = DataFrame(columns=["a", "b", "c"])
1010
+ result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))
1011
+ expected = DataFrame(
1012
+ columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])
1013
+ )
1014
+ tm.assert_frame_equal(result, expected)
1015
+
1016
+
1017
+ def test_grouby_agg_loses_results_with_as_index_false_relabel():
1018
+ # GH 32240: When the aggregate function relabels column names and
1019
+ # as_index=False is specified, the results are dropped.
1020
+
1021
+ df = DataFrame(
1022
+ {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
1023
+ )
1024
+
1025
+ grouped = df.groupby("key", as_index=False)
1026
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
1027
+ expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
1028
+ tm.assert_frame_equal(result, expected)
1029
+
1030
+
1031
+ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
1032
+ # GH 32240: When the aggregate function relabels column names and
1033
+ # as_index=False is specified, the results are dropped. Check if
1034
+ # multiindex is returned in the right order
1035
+
1036
+ df = DataFrame(
1037
+ {
1038
+ "key": ["x", "y", "x", "y", "x", "x"],
1039
+ "key1": ["a", "b", "c", "b", "a", "c"],
1040
+ "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
1041
+ }
1042
+ )
1043
+
1044
+ grouped = df.groupby(["key", "key1"], as_index=False)
1045
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
1046
+ expected = DataFrame(
1047
+ {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
1048
+ )
1049
+ tm.assert_frame_equal(result, expected)
1050
+
1051
+
1052
+ @pytest.mark.parametrize(
1053
+ "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
1054
+ )
1055
+ def test_multiindex_custom_func(func):
1056
+ # GH 31777
1057
+ data = [[1, 4, 2], [5, 7, 1]]
1058
+ df = DataFrame(
1059
+ data,
1060
+ columns=MultiIndex.from_arrays(
1061
+ [[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"]
1062
+ ),
1063
+ )
1064
+ result = df.groupby(np.array([0, 1])).agg(func)
1065
+ expected_dict = {
1066
+ (1, 3): {0: 1.0, 1: 5.0},
1067
+ (1, 4): {0: 4.0, 1: 7.0},
1068
+ (2, 3): {0: 2.0, 1: 1.0},
1069
+ }
1070
+ expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns)
1071
+ tm.assert_frame_equal(result, expected)
1072
+
1073
+
1074
+ def myfunc(s):
1075
+ return np.percentile(s, q=0.90)
1076
+
1077
+
1078
+ @pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
1079
+ def test_lambda_named_agg(func):
1080
+ # see gh-28467
1081
+ animals = DataFrame(
1082
+ {
1083
+ "kind": ["cat", "dog", "cat", "dog"],
1084
+ "height": [9.1, 6.0, 9.5, 34.0],
1085
+ "weight": [7.9, 7.5, 9.9, 198.0],
1086
+ }
1087
+ )
1088
+
1089
+ result = animals.groupby("kind").agg(
1090
+ mean_height=("height", "mean"), perc90=("height", func)
1091
+ )
1092
+ expected = DataFrame(
1093
+ [[9.3, 9.1036], [20.0, 6.252]],
1094
+ columns=["mean_height", "perc90"],
1095
+ index=Index(["cat", "dog"], name="kind"),
1096
+ )
1097
+
1098
+ tm.assert_frame_equal(result, expected)
1099
+
1100
+
1101
+ def test_aggregate_mixed_types():
1102
+ # GH 16916
1103
+ df = DataFrame(
1104
+ data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
1105
+ )
1106
+ df["grouping"] = ["group 1", "group 1", 2]
1107
+ result = df.groupby("grouping").aggregate(lambda x: x.tolist())
1108
+ expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
1109
+ expected = DataFrame(
1110
+ expected_data,
1111
+ index=Index([2, "group 1"], dtype="object", name="grouping"),
1112
+ columns=Index(["X", "Y", "Z"], dtype="object"),
1113
+ )
1114
+ tm.assert_frame_equal(result, expected)
1115
+
1116
+
1117
+ @pytest.mark.xfail(reason="Not implemented;see GH 31256")
1118
+ def test_aggregate_udf_na_extension_type():
1119
+ # https://github.com/pandas-dev/pandas/pull/31359
1120
+ # This is currently failing to cast back to Int64Dtype.
1121
+ # The presence of the NA causes two problems
1122
+ # 1. NA is not an instance of Int64Dtype.type (numpy.int64)
1123
+ # 2. The presence of an NA forces object type, so the non-NA values is
1124
+ # a Python int rather than a NumPy int64. Python ints aren't
1125
+ # instances of numpy.int64.
1126
+ def aggfunc(x):
1127
+ if all(x > 2):
1128
+ return 1
1129
+ else:
1130
+ return pd.NA
1131
+
1132
+ df = DataFrame({"A": pd.array([1, 2, 3])})
1133
+ result = df.groupby([1, 1, 2]).agg(aggfunc)
1134
+ expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
1135
+ tm.assert_frame_equal(result, expected)
1136
+
1137
+
1138
+ class TestLambdaMangling:
1139
+ def test_basic(self):
1140
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
1141
+ result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
1142
+
1143
+ expected = DataFrame(
1144
+ {("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
1145
+ index=Index([0, 1], name="A"),
1146
+ )
1147
+ tm.assert_frame_equal(result, expected)
1148
+
1149
+ def test_mangle_series_groupby(self):
1150
+ gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
1151
+ result = gr.agg([lambda x: 0, lambda x: 1])
1152
+ exp_data = {"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}
1153
+ expected = DataFrame(exp_data, index=np.array([0, 1]))
1154
+ tm.assert_frame_equal(result, expected)
1155
+
1156
+ @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
1157
+ def test_with_kwargs(self):
1158
+ f1 = lambda x, y, b=1: x.sum() + y + b
1159
+ f2 = lambda x, y, b=2: x.sum() + y * b
1160
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
1161
+ expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
1162
+ tm.assert_frame_equal(result, expected)
1163
+
1164
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
1165
+ expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
1166
+ tm.assert_frame_equal(result, expected)
1167
+
1168
+ def test_agg_with_one_lambda(self):
1169
+ # GH 25719, write tests for DataFrameGroupby.agg with only one lambda
1170
+ df = DataFrame(
1171
+ {
1172
+ "kind": ["cat", "dog", "cat", "dog"],
1173
+ "height": [9.1, 6.0, 9.5, 34.0],
1174
+ "weight": [7.9, 7.5, 9.9, 198.0],
1175
+ }
1176
+ )
1177
+
1178
+ columns = ["height_sqr_min", "height_max", "weight_max"]
1179
+ expected = DataFrame(
1180
+ {
1181
+ "height_sqr_min": [82.81, 36.00],
1182
+ "height_max": [9.5, 34.0],
1183
+ "weight_max": [9.9, 198.0],
1184
+ },
1185
+ index=Index(["cat", "dog"], name="kind"),
1186
+ columns=columns,
1187
+ )
1188
+
1189
+ # check pd.NameAgg case
1190
+ result1 = df.groupby(by="kind").agg(
1191
+ height_sqr_min=pd.NamedAgg(
1192
+ column="height", aggfunc=lambda x: np.min(x**2)
1193
+ ),
1194
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1195
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1196
+ )
1197
+ tm.assert_frame_equal(result1, expected)
1198
+
1199
+ # check agg(key=(col, aggfunc)) case
1200
+ result2 = df.groupby(by="kind").agg(
1201
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1202
+ height_max=("height", "max"),
1203
+ weight_max=("weight", "max"),
1204
+ )
1205
+ tm.assert_frame_equal(result2, expected)
1206
+
1207
+ def test_agg_multiple_lambda(self):
1208
+ # GH25719, test for DataFrameGroupby.agg with multiple lambdas
1209
+ # with mixed aggfunc
1210
+ df = DataFrame(
1211
+ {
1212
+ "kind": ["cat", "dog", "cat", "dog"],
1213
+ "height": [9.1, 6.0, 9.5, 34.0],
1214
+ "weight": [7.9, 7.5, 9.9, 198.0],
1215
+ }
1216
+ )
1217
+ columns = [
1218
+ "height_sqr_min",
1219
+ "height_max",
1220
+ "weight_max",
1221
+ "height_max_2",
1222
+ "weight_min",
1223
+ ]
1224
+ expected = DataFrame(
1225
+ {
1226
+ "height_sqr_min": [82.81, 36.00],
1227
+ "height_max": [9.5, 34.0],
1228
+ "weight_max": [9.9, 198.0],
1229
+ "height_max_2": [9.5, 34.0],
1230
+ "weight_min": [7.9, 7.5],
1231
+ },
1232
+ index=Index(["cat", "dog"], name="kind"),
1233
+ columns=columns,
1234
+ )
1235
+
1236
+ # check agg(key=(col, aggfunc)) case
1237
+ result1 = df.groupby(by="kind").agg(
1238
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1239
+ height_max=("height", "max"),
1240
+ weight_max=("weight", "max"),
1241
+ height_max_2=("height", lambda x: np.max(x)),
1242
+ weight_min=("weight", lambda x: np.min(x)),
1243
+ )
1244
+ tm.assert_frame_equal(result1, expected)
1245
+
1246
+ # check pd.NamedAgg case
1247
+ result2 = df.groupby(by="kind").agg(
1248
+ height_sqr_min=pd.NamedAgg(
1249
+ column="height", aggfunc=lambda x: np.min(x**2)
1250
+ ),
1251
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1252
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1253
+ height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
1254
+ weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
1255
+ )
1256
+ tm.assert_frame_equal(result2, expected)
1257
+
1258
+
1259
+ def test_groupby_get_by_index():
1260
+ # GH 33439
1261
+ df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
1262
+ res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
1263
+ expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
1264
+ tm.assert_frame_equal(res, expected)
1265
+
1266
+
1267
+ @pytest.mark.parametrize(
1268
+ "grp_col_dict, exp_data",
1269
+ [
1270
+ ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
1271
+ ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
1272
+ ({"nr": "min"}, {"nr": [1, 5]}),
1273
+ ],
1274
+ )
1275
+ def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
1276
+ # test single aggregations on ordered categorical cols GHGH27800
1277
+
1278
+ # create the result dataframe
1279
+ input_df = DataFrame(
1280
+ {
1281
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1282
+ "cat_ord": list("aabbccdd"),
1283
+ "cat": list("aaaabbbb"),
1284
+ }
1285
+ )
1286
+
1287
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1288
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1289
+ result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
1290
+
1291
+ # create expected dataframe
1292
+ cat_index = pd.CategoricalIndex(
1293
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1294
+ )
1295
+
1296
+ expected_df = DataFrame(data=exp_data, index=cat_index)
1297
+
1298
+ if "cat_ord" in expected_df:
1299
+ # ordered categorical columns should be preserved
1300
+ dtype = input_df["cat_ord"].dtype
1301
+ expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)
1302
+
1303
+ tm.assert_frame_equal(result_df, expected_df)
1304
+
1305
+
1306
+ @pytest.mark.parametrize(
1307
+ "grp_col_dict, exp_data",
1308
+ [
1309
+ ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
1310
+ ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
1311
+ ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
1312
+ ],
1313
+ )
1314
+ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
1315
+ # test combined aggregations on ordered categorical cols GH27800
1316
+
1317
+ # create the result dataframe
1318
+ input_df = DataFrame(
1319
+ {
1320
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1321
+ "cat_ord": list("aabbccdd"),
1322
+ "cat": list("aaaabbbb"),
1323
+ }
1324
+ )
1325
+
1326
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1327
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1328
+ result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
1329
+
1330
+ # create expected dataframe
1331
+ cat_index = pd.CategoricalIndex(
1332
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1333
+ )
1334
+
1335
+ # unpack the grp_col_dict to create the multi-index tuple
1336
+ # this tuple will be used to create the expected dataframe index
1337
+ multi_index_list = []
1338
+ for k, v in grp_col_dict.items():
1339
+ if isinstance(v, list):
1340
+ multi_index_list.extend([k, value] for value in v)
1341
+ else:
1342
+ multi_index_list.append([k, v])
1343
+ multi_index = MultiIndex.from_tuples(tuple(multi_index_list))
1344
+
1345
+ expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
1346
+ for col in expected_df.columns:
1347
+ if isinstance(col, tuple) and "cat_ord" in col:
1348
+ # ordered categorical should be preserved
1349
+ expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)
1350
+
1351
+ tm.assert_frame_equal(result_df, expected_df)
1352
+
1353
+
1354
+ def test_nonagg_agg():
1355
+ # GH 35490 - Single/Multiple agg of non-agg function give same results
1356
+ # TODO: agg should raise for functions that don't aggregate
1357
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
1358
+ g = df.groupby("a")
1359
+
1360
+ result = g.agg(["cumsum"])
1361
+ result.columns = result.columns.droplevel(-1)
1362
+ expected = g.agg("cumsum")
1363
+
1364
+ tm.assert_frame_equal(result, expected)
1365
+
1366
+
1367
+ def test_aggregate_datetime_objects():
1368
+ # https://github.com/pandas-dev/pandas/issues/36003
1369
+ # ensure we don't raise an error but keep object dtype for out-of-bounds
1370
+ # datetimes
1371
+ df = DataFrame(
1372
+ {
1373
+ "A": ["X", "Y"],
1374
+ "B": [
1375
+ datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
1376
+ datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
1377
+ ],
1378
+ }
1379
+ )
1380
+ result = df.groupby("A").B.max()
1381
+ expected = df.set_index("A")["B"]
1382
+ tm.assert_series_equal(result, expected)
1383
+
1384
+
1385
+ def test_groupby_index_object_dtype():
1386
+ # GH 40014
1387
+ df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})
1388
+ df.index = df.index.astype("O")
1389
+ grouped = df.groupby(["c0", "c1"])
1390
+ res = grouped.p.agg(lambda x: all(x > 0))
1391
+ # Check that providing a user-defined function in agg()
1392
+ # produces the correct index shape when using an object-typed index.
1393
+ expected_index = MultiIndex.from_tuples(
1394
+ [("x", "x"), ("x", "y")], names=("c0", "c1")
1395
+ )
1396
+ expected = Series([False, True], index=expected_index, name="p")
1397
+ tm.assert_series_equal(res, expected)
1398
+
1399
+
1400
+ def test_timeseries_groupby_agg():
1401
+ # GH#43290
1402
+
1403
+ def func(ser):
1404
+ if ser.isna().all():
1405
+ return None
1406
+ return np.sum(ser)
1407
+
1408
+ df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")])
1409
+ res = df.groupby(lambda x: 1).agg(func)
1410
+
1411
+ expected = DataFrame([[1.0]], index=[1])
1412
+ tm.assert_frame_equal(res, expected)
1413
+
1414
+
1415
+ def test_groupby_agg_precision(any_real_numeric_dtype):
1416
+ if any_real_numeric_dtype in tm.ALL_INT_NUMPY_DTYPES:
1417
+ max_value = np.iinfo(any_real_numeric_dtype).max
1418
+ if any_real_numeric_dtype in tm.FLOAT_NUMPY_DTYPES:
1419
+ max_value = np.finfo(any_real_numeric_dtype).max
1420
+ if any_real_numeric_dtype in tm.FLOAT_EA_DTYPES:
1421
+ max_value = np.finfo(any_real_numeric_dtype.lower()).max
1422
+ if any_real_numeric_dtype in tm.ALL_INT_EA_DTYPES:
1423
+ max_value = np.iinfo(any_real_numeric_dtype.lower()).max
1424
+
1425
+ df = DataFrame(
1426
+ {
1427
+ "key1": ["a"],
1428
+ "key2": ["b"],
1429
+ "key3": pd.array([max_value], dtype=any_real_numeric_dtype),
1430
+ }
1431
+ )
1432
+ arrays = [["a"], ["b"]]
1433
+ index = MultiIndex.from_arrays(arrays, names=("key1", "key2"))
1434
+
1435
+ expected = DataFrame(
1436
+ {"key3": pd.array([max_value], dtype=any_real_numeric_dtype)}, index=index
1437
+ )
1438
+ result = df.groupby(["key1", "key2"]).agg(lambda x: x)
1439
+ tm.assert_frame_equal(result, expected)
1440
+
1441
+
1442
+ def test_groupby_aggregate_directory(reduction_func):
1443
+ # GH#32793
1444
+ if reduction_func in ["corrwith", "nth"]:
1445
+ return None
1446
+
1447
+ obj = DataFrame([[0, 1], [0, np.nan]])
1448
+
1449
+ result_reduced_series = obj.groupby(0).agg(reduction_func)
1450
+ result_reduced_frame = obj.groupby(0).agg({1: reduction_func})
1451
+
1452
+ if reduction_func in ["size", "ngroup"]:
1453
+ # names are different: None / 1
1454
+ tm.assert_series_equal(
1455
+ result_reduced_series, result_reduced_frame[1], check_names=False
1456
+ )
1457
+ else:
1458
+ tm.assert_frame_equal(result_reduced_series, result_reduced_frame)
1459
+ tm.assert_series_equal(
1460
+ result_reduced_series.dtypes, result_reduced_frame.dtypes
1461
+ )
1462
+
1463
+
1464
+ def test_group_mean_timedelta_nat():
1465
+ # GH43132
1466
+ data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]")
1467
+ expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0]))
1468
+
1469
+ result = data.groupby([0, 0, 0]).mean()
1470
+
1471
+ tm.assert_series_equal(result, expected)
1472
+
1473
+
1474
+ @pytest.mark.parametrize(
1475
+ "input_data, expected_output",
1476
+ [
1477
+ ( # no timezone
1478
+ ["2021-01-01T00:00", "NaT", "2021-01-01T02:00"],
1479
+ ["2021-01-01T01:00"],
1480
+ ),
1481
+ ( # timezone
1482
+ ["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"],
1483
+ ["2021-01-01T01:00-0100"],
1484
+ ),
1485
+ ],
1486
+ )
1487
+ def test_group_mean_datetime64_nat(input_data, expected_output):
1488
+ # GH43132
1489
+ data = to_datetime(Series(input_data))
1490
+ expected = to_datetime(Series(expected_output, index=np.array([0])))
1491
+
1492
+ result = data.groupby([0, 0, 0]).mean()
1493
+ tm.assert_series_equal(result, expected)
1494
+
1495
+
1496
+ @pytest.mark.parametrize(
1497
+ "func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])]
1498
+ )
1499
+ def test_groupby_complex(func, output):
1500
+ # GH#43701
1501
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1502
+ result = data.groupby(data.index % 2).agg(func)
1503
+ expected = Series(output)
1504
+ tm.assert_series_equal(result, expected)
1505
+
1506
+
1507
+ @pytest.mark.parametrize("func", ["min", "max", "var"])
1508
+ def test_groupby_complex_raises(func):
1509
+ # GH#43701
1510
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1511
+ msg = "No matching signature found"
1512
+ with pytest.raises(TypeError, match=msg):
1513
+ data.groupby(data.index % 2).agg(func)
1514
+
1515
+
1516
+ @pytest.mark.parametrize(
1517
+ "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]
1518
+ )
1519
+ def test_multi_axis_1_raises(func):
1520
+ # GH#46995
1521
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})
1522
+ msg = "DataFrame.groupby with axis=1 is deprecated"
1523
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1524
+ gb = df.groupby("a", axis=1)
1525
+ with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):
1526
+ gb.agg(func)
1527
+
1528
+
1529
+ @pytest.mark.parametrize(
1530
+ "test, constant",
1531
+ [
1532
+ ([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}),
1533
+ ([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}),
1534
+ ([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}),
1535
+ pytest.param(
1536
+ [["a", 1], ["a", 2], ["b", 3], ["b", 3]],
1537
+ {0: ["a", "b"], 1: [[1, 2], 3]},
1538
+ marks=pytest.mark.xfail,
1539
+ ),
1540
+ ],
1541
+ )
1542
+ def test_agg_of_mode_list(test, constant):
1543
+ # GH#25581
1544
+ df1 = DataFrame(test)
1545
+ result = df1.groupby(0).agg(Series.mode)
1546
+ # Mode usually only returns 1 value, but can return a list in the case of a tie.
1547
+
1548
+ expected = DataFrame(constant)
1549
+ expected = expected.set_index(0)
1550
+
1551
+ tm.assert_frame_equal(result, expected)
1552
+
1553
+
1554
+ def test_dataframe_groupy_agg_list_like_func_with_args():
1555
+ # GH#50624
1556
+ df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
1557
+ gb = df.groupby("y")
1558
+
1559
+ def foo1(x, a=1, c=0):
1560
+ return x.sum() + a + c
1561
+
1562
+ def foo2(x, b=2, c=0):
1563
+ return x.sum() + b + c
1564
+
1565
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1566
+ with pytest.raises(TypeError, match=msg):
1567
+ gb.agg([foo1, foo2], 3, b=3, c=4)
1568
+
1569
+ result = gb.agg([foo1, foo2], 3, c=4)
1570
+ expected = DataFrame(
1571
+ [[8, 8], [9, 9], [10, 10]],
1572
+ index=Index(["a", "b", "c"], name="y"),
1573
+ columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
1574
+ )
1575
+ tm.assert_frame_equal(result, expected)
1576
+
1577
+
1578
+ def test_series_groupy_agg_list_like_func_with_args():
1579
+ # GH#50624
1580
+ s = Series([1, 2, 3])
1581
+ sgb = s.groupby(s)
1582
+
1583
+ def foo1(x, a=1, c=0):
1584
+ return x.sum() + a + c
1585
+
1586
+ def foo2(x, b=2, c=0):
1587
+ return x.sum() + b + c
1588
+
1589
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1590
+ with pytest.raises(TypeError, match=msg):
1591
+ sgb.agg([foo1, foo2], 3, b=3, c=4)
1592
+
1593
+ result = sgb.agg([foo1, foo2], 3, c=4)
1594
+ expected = DataFrame(
1595
+ [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]
1596
+ )
1597
+ tm.assert_frame_equal(result, expected)
1598
+
1599
+
1600
+ def test_agg_groupings_selection():
1601
+ # GH#51186 - a selected grouping should be in the output of agg
1602
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 3, 4], "c": [5, 6, 7]})
1603
+ gb = df.groupby(["a", "b"])
1604
+ selected_gb = gb[["b", "c"]]
1605
+ result = selected_gb.agg(lambda x: x.sum())
1606
+ index = MultiIndex(
1607
+ levels=[[1, 2], [3, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
1608
+ )
1609
+ expected = DataFrame({"b": [6, 4], "c": [11, 7]}, index=index)
1610
+ tm.assert_frame_equal(result, expected)
1611
+
1612
+
1613
+ def test_agg_multiple_with_as_index_false_subset_to_a_single_column():
1614
+ # GH#50724
1615
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
1616
+ gb = df.groupby("a", as_index=False)["b"]
1617
+ result = gb.agg(["sum", "mean"])
1618
+ expected = DataFrame({"a": [1, 2], "sum": [7, 5], "mean": [3.5, 5.0]})
1619
+ tm.assert_frame_equal(result, expected)
1620
+
1621
+
1622
+ def test_agg_with_as_index_false_with_list():
1623
+ # GH#52849
1624
+ df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})
1625
+ gb = df.groupby(by=["a1", "a2"], as_index=False)
1626
+ result = gb.agg(["sum"])
1627
+
1628
+ expected = DataFrame(
1629
+ data=[[0, 2, 4], [0, 3, 5], [1, 3, 6]],
1630
+ columns=MultiIndex.from_tuples([("a1", ""), ("a2", ""), ("b", "sum")]),
1631
+ )
1632
+ tm.assert_frame_equal(result, expected)
1633
+
1634
+
1635
+ def test_groupby_agg_extension_timedelta_cumsum_with_named_aggregation():
1636
+ # GH#41720
1637
+ expected = DataFrame(
1638
+ {
1639
+ "td": {
1640
+ 0: pd.Timedelta("0 days 01:00:00"),
1641
+ 1: pd.Timedelta("0 days 01:15:00"),
1642
+ 2: pd.Timedelta("0 days 01:15:00"),
1643
+ }
1644
+ }
1645
+ )
1646
+ df = DataFrame(
1647
+ {
1648
+ "td": Series(
1649
+ ["0 days 01:00:00", "0 days 00:15:00", "0 days 01:15:00"],
1650
+ dtype="timedelta64[ns]",
1651
+ ),
1652
+ "grps": ["a", "a", "b"],
1653
+ }
1654
+ )
1655
+ gb = df.groupby("grps")
1656
+ result = gb.agg(td=("td", "cumsum"))
1657
+ tm.assert_frame_equal(result, expected)
1658
+
1659
+
1660
+ def test_groupby_aggregation_empty_group():
1661
+ # https://github.com/pandas-dev/pandas/issues/18869
1662
+ def func(x):
1663
+ if len(x) == 0:
1664
+ raise ValueError("length must not be 0")
1665
+ return len(x)
1666
+
1667
+ df = DataFrame(
1668
+ {"A": pd.Categorical(["a", "a"], categories=["a", "b", "c"]), "B": [1, 1]}
1669
+ )
1670
+ msg = "length must not be 0"
1671
+ with pytest.raises(ValueError, match=msg):
1672
+ df.groupby("A", observed=False).agg(func)
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test cython .agg behavior
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.core.dtypes.common import (
9
+ is_float_dtype,
10
+ is_integer_dtype,
11
+ )
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ NaT,
18
+ Series,
19
+ Timedelta,
20
+ Timestamp,
21
+ bdate_range,
22
+ )
23
+ import pandas._testing as tm
24
+ import pandas.core.common as com
25
+
26
+
27
+ @pytest.mark.parametrize(
28
+ "op_name",
29
+ [
30
+ "count",
31
+ "sum",
32
+ "std",
33
+ "var",
34
+ "sem",
35
+ "mean",
36
+ pytest.param(
37
+ "median",
38
+ # ignore mean of empty slice
39
+ # and all-NaN
40
+ marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
41
+ ),
42
+ "prod",
43
+ "min",
44
+ "max",
45
+ ],
46
+ )
47
+ def test_cythonized_aggers(op_name):
48
+ data = {
49
+ "A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
50
+ "B": ["A", "B"] * 6,
51
+ "C": np.random.default_rng(2).standard_normal(12),
52
+ }
53
+ df = DataFrame(data)
54
+ df.loc[2:10:2, "C"] = np.nan
55
+
56
+ op = lambda x: getattr(x, op_name)()
57
+
58
+ # single column
59
+ grouped = df.drop(["B"], axis=1).groupby("A")
60
+ exp = {cat: op(group["C"]) for cat, group in grouped}
61
+ exp = DataFrame({"C": exp})
62
+ exp.index.name = "A"
63
+ result = op(grouped)
64
+ tm.assert_frame_equal(result, exp)
65
+
66
+ # multiple columns
67
+ grouped = df.groupby(["A", "B"])
68
+ expd = {}
69
+ for (cat1, cat2), group in grouped:
70
+ expd.setdefault(cat1, {})[cat2] = op(group["C"])
71
+ exp = DataFrame(expd).T.stack(future_stack=True)
72
+ exp.index.names = ["A", "B"]
73
+ exp.name = "C"
74
+
75
+ result = op(grouped)["C"]
76
+ if op_name in ["sum", "prod"]:
77
+ tm.assert_series_equal(result, exp)
78
+
79
+
80
+ def test_cython_agg_boolean():
81
+ frame = DataFrame(
82
+ {
83
+ "a": np.random.default_rng(2).integers(0, 5, 50),
84
+ "b": np.random.default_rng(2).integers(0, 2, 50).astype("bool"),
85
+ }
86
+ )
87
+ result = frame.groupby("a")["b"].mean()
88
+ msg = "using SeriesGroupBy.mean"
89
+ with tm.assert_produces_warning(FutureWarning, match=msg):
90
+ # GH#53425
91
+ expected = frame.groupby("a")["b"].agg(np.mean)
92
+
93
+ tm.assert_series_equal(result, expected)
94
+
95
+
96
+ def test_cython_agg_nothing_to_agg():
97
+ frame = DataFrame(
98
+ {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
99
+ )
100
+
101
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
102
+ with pytest.raises(TypeError, match=msg):
103
+ frame.groupby("a")["b"].mean(numeric_only=True)
104
+
105
+ frame = DataFrame(
106
+ {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
107
+ )
108
+
109
+ result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True)
110
+ expected = DataFrame(
111
+ [], index=frame["a"].sort_values().drop_duplicates(), columns=[]
112
+ )
113
+ tm.assert_frame_equal(result, expected)
114
+
115
+
116
+ def test_cython_agg_nothing_to_agg_with_dates():
117
+ frame = DataFrame(
118
+ {
119
+ "a": np.random.default_rng(2).integers(0, 5, 50),
120
+ "b": ["foo", "bar"] * 25,
121
+ "dates": pd.date_range("now", periods=50, freq="min"),
122
+ }
123
+ )
124
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
125
+ with pytest.raises(TypeError, match=msg):
126
+ frame.groupby("b").dates.mean(numeric_only=True)
127
+
128
+
129
+ def test_cython_agg_frame_columns():
130
+ # #2113
131
+ df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
132
+
133
+ msg = "DataFrame.groupby with axis=1 is deprecated"
134
+ with tm.assert_produces_warning(FutureWarning, match=msg):
135
+ df.groupby(level=0, axis="columns").mean()
136
+ with tm.assert_produces_warning(FutureWarning, match=msg):
137
+ df.groupby(level=0, axis="columns").mean()
138
+ with tm.assert_produces_warning(FutureWarning, match=msg):
139
+ df.groupby(level=0, axis="columns").mean()
140
+ with tm.assert_produces_warning(FutureWarning, match=msg):
141
+ df.groupby(level=0, axis="columns").mean()
142
+
143
+
144
+ def test_cython_agg_return_dict():
145
+ # GH 16741
146
+ df = DataFrame(
147
+ {
148
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
149
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
150
+ "C": np.random.default_rng(2).standard_normal(8),
151
+ "D": np.random.default_rng(2).standard_normal(8),
152
+ }
153
+ )
154
+
155
+ ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
156
+ expected = Series(
157
+ [{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
158
+ index=Index(["bar", "foo"], name="A"),
159
+ name="B",
160
+ )
161
+ tm.assert_series_equal(ts, expected)
162
+
163
+
164
+ def test_cython_fail_agg():
165
+ dr = bdate_range("1/1/2000", periods=50)
166
+ ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
167
+
168
+ grouped = ts.groupby(lambda x: x.month)
169
+ summed = grouped.sum()
170
+ msg = "using SeriesGroupBy.sum"
171
+ with tm.assert_produces_warning(FutureWarning, match=msg):
172
+ # GH#53425
173
+ expected = grouped.agg(np.sum)
174
+ tm.assert_series_equal(summed, expected)
175
+
176
+
177
+ @pytest.mark.parametrize(
178
+ "op, targop",
179
+ [
180
+ ("mean", np.mean),
181
+ ("median", np.median),
182
+ ("var", np.var),
183
+ ("sum", np.sum),
184
+ ("prod", np.prod),
185
+ ("min", np.min),
186
+ ("max", np.max),
187
+ ("first", lambda x: x.iloc[0]),
188
+ ("last", lambda x: x.iloc[-1]),
189
+ ],
190
+ )
191
+ def test__cython_agg_general(op, targop):
192
+ df = DataFrame(np.random.default_rng(2).standard_normal(1000))
193
+ labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)
194
+
195
+ result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)
196
+ warn = FutureWarning if targop in com._cython_table else None
197
+ msg = f"using DataFrameGroupBy.{op}"
198
+ with tm.assert_produces_warning(warn, match=msg):
199
+ # GH#53425
200
+ expected = df.groupby(labels).agg(targop)
201
+ tm.assert_frame_equal(result, expected)
202
+
203
+
204
+ @pytest.mark.parametrize(
205
+ "op, targop",
206
+ [
207
+ ("mean", np.mean),
208
+ ("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
209
+ ("var", lambda x: np.var(x, ddof=1)),
210
+ ("min", np.min),
211
+ ("max", np.max),
212
+ ],
213
+ )
214
+ def test_cython_agg_empty_buckets(op, targop, observed):
215
+ df = DataFrame([11, 12, 13])
216
+ grps = range(0, 55, 5)
217
+
218
+ # calling _cython_agg_general directly, instead of via the user API
219
+ # which sets different values for min_count, so do that here.
220
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
221
+ result = g._cython_agg_general(op, alt=None, numeric_only=True)
222
+
223
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
224
+ expected = g.agg(lambda x: targop(x))
225
+ tm.assert_frame_equal(result, expected)
226
+
227
+
228
+ def test_cython_agg_empty_buckets_nanops(observed):
229
+ # GH-18869 can't call nanops on empty groups, so hardcode expected
230
+ # for these
231
+ df = DataFrame([11, 12, 13], columns=["a"])
232
+ grps = np.arange(0, 25, 5, dtype=int)
233
+ # add / sum
234
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
235
+ "sum", alt=None, numeric_only=True
236
+ )
237
+ intervals = pd.interval_range(0, 20, freq=5)
238
+ expected = DataFrame(
239
+ {"a": [0, 0, 36, 0]},
240
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
241
+ )
242
+ if observed:
243
+ expected = expected[expected.a != 0]
244
+
245
+ tm.assert_frame_equal(result, expected)
246
+
247
+ # prod
248
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
249
+ "prod", alt=None, numeric_only=True
250
+ )
251
+ expected = DataFrame(
252
+ {"a": [1, 1, 1716, 1]},
253
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
254
+ )
255
+ if observed:
256
+ expected = expected[expected.a != 1]
257
+
258
+ tm.assert_frame_equal(result, expected)
259
+
260
+
261
+ @pytest.mark.parametrize("op", ["first", "last", "max", "min"])
262
+ @pytest.mark.parametrize(
263
+ "data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
264
+ )
265
+ def test_cython_with_timestamp_and_nat(op, data):
266
+ # https://github.com/pandas-dev/pandas/issues/19526
267
+ df = DataFrame({"a": [0, 1], "b": [data, NaT]})
268
+ index = Index([0, 1], name="a")
269
+
270
+ # We will group by a and test the cython aggregations
271
+ expected = DataFrame({"b": [data, NaT]}, index=index)
272
+
273
+ result = df.groupby("a").aggregate(op)
274
+ tm.assert_frame_equal(expected, result)
275
+
276
+
277
+ @pytest.mark.parametrize(
278
+ "agg",
279
+ [
280
+ "min",
281
+ "max",
282
+ "count",
283
+ "sum",
284
+ "prod",
285
+ "var",
286
+ "mean",
287
+ "median",
288
+ "ohlc",
289
+ "cumprod",
290
+ "cumsum",
291
+ "shift",
292
+ "any",
293
+ "all",
294
+ "quantile",
295
+ "first",
296
+ "last",
297
+ "rank",
298
+ "cummin",
299
+ "cummax",
300
+ ],
301
+ )
302
+ def test_read_only_buffer_source_agg(agg):
303
+ # https://github.com/pandas-dev/pandas/issues/36014
304
+ df = DataFrame(
305
+ {
306
+ "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],
307
+ "species": ["setosa", "setosa", "setosa", "setosa", "setosa"],
308
+ }
309
+ )
310
+ df._mgr.arrays[0].flags.writeable = False
311
+
312
+ result = df.groupby(["species"]).agg({"sepal_length": agg})
313
+ expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})
314
+
315
+ tm.assert_equal(result, expected)
316
+
317
+
318
+ @pytest.mark.parametrize(
319
+ "op_name",
320
+ [
321
+ "count",
322
+ "sum",
323
+ "std",
324
+ "var",
325
+ "sem",
326
+ "mean",
327
+ "median",
328
+ "prod",
329
+ "min",
330
+ "max",
331
+ ],
332
+ )
333
+ def test_cython_agg_nullable_int(op_name):
334
+ # ensure that the cython-based aggregations don't fail for nullable dtype
335
+ # (eg https://github.com/pandas-dev/pandas/issues/37415)
336
+ df = DataFrame(
337
+ {
338
+ "A": ["A", "B"] * 5,
339
+ "B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),
340
+ }
341
+ )
342
+ result = getattr(df.groupby("A")["B"], op_name)()
343
+ df2 = df.assign(B=df["B"].astype("float64"))
344
+ expected = getattr(df2.groupby("A")["B"], op_name)()
345
+ if op_name in ("mean", "median"):
346
+ convert_integer = False
347
+ else:
348
+ convert_integer = True
349
+ expected = expected.convert_dtypes(convert_integer=convert_integer)
350
+ tm.assert_series_equal(result, expected)
351
+
352
+
353
+ @pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
354
+ def test_count_masked_returns_masked_dtype(dtype):
355
+ df = DataFrame(
356
+ {
357
+ "A": [1, 1],
358
+ "B": pd.array([1, pd.NA], dtype=dtype),
359
+ "C": pd.array([1, 1], dtype=dtype),
360
+ }
361
+ )
362
+ result = df.groupby("A").count()
363
+ expected = DataFrame(
364
+ [[1, 2]], index=Index([1], name="A"), columns=["B", "C"], dtype="Int64"
365
+ )
366
+ tm.assert_frame_equal(result, expected)
367
+
368
+
369
+ @pytest.mark.parametrize("with_na", [True, False])
370
+ @pytest.mark.parametrize(
371
+ "op_name, action",
372
+ [
373
+ # ("count", "always_int"),
374
+ ("sum", "large_int"),
375
+ # ("std", "always_float"),
376
+ ("var", "always_float"),
377
+ # ("sem", "always_float"),
378
+ ("mean", "always_float"),
379
+ ("median", "always_float"),
380
+ ("prod", "large_int"),
381
+ ("min", "preserve"),
382
+ ("max", "preserve"),
383
+ ("first", "preserve"),
384
+ ("last", "preserve"),
385
+ ],
386
+ )
387
+ @pytest.mark.parametrize(
388
+ "data",
389
+ [
390
+ pd.array([1, 2, 3, 4], dtype="Int64"),
391
+ pd.array([1, 2, 3, 4], dtype="Int8"),
392
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),
393
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),
394
+ pd.array([True, True, False, False], dtype="boolean"),
395
+ ],
396
+ )
397
+ def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):
398
+ if with_na:
399
+ data[3] = pd.NA
400
+
401
+ df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})
402
+ grouped = df.groupby("key")
403
+
404
+ if action == "always_int":
405
+ # always Int64
406
+ expected_dtype = pd.Int64Dtype()
407
+ elif action == "large_int":
408
+ # for any int/bool use Int64, for float preserve dtype
409
+ if is_float_dtype(data.dtype):
410
+ expected_dtype = data.dtype
411
+ elif is_integer_dtype(data.dtype):
412
+ # match the numpy dtype we'd get with the non-nullable analogue
413
+ expected_dtype = data.dtype
414
+ else:
415
+ expected_dtype = pd.Int64Dtype()
416
+ elif action == "always_float":
417
+ # for any int/bool use Float64, for float preserve dtype
418
+ if is_float_dtype(data.dtype):
419
+ expected_dtype = data.dtype
420
+ else:
421
+ expected_dtype = pd.Float64Dtype()
422
+ elif action == "preserve":
423
+ expected_dtype = data.dtype
424
+
425
+ result = getattr(grouped, op_name)()
426
+ assert result["col"].dtype == expected_dtype
427
+
428
+ result = grouped.aggregate(op_name)
429
+ assert result["col"].dtype == expected_dtype
430
+
431
+ result = getattr(grouped["col"], op_name)()
432
+ assert result.dtype == expected_dtype
433
+
434
+ result = grouped["col"].aggregate(op_name)
435
+ assert result.dtype == expected_dtype
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import NumbaUtilError
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ NamedAgg,
10
+ Series,
11
+ option_context,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+ pytestmark = pytest.mark.single_cpu
16
+
17
+
18
+ def test_correct_function_signature():
19
+ pytest.importorskip("numba")
20
+
21
+ def incorrect_function(x):
22
+ return sum(x) * 2.7
23
+
24
+ data = DataFrame(
25
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
26
+ columns=["key", "data"],
27
+ )
28
+ with pytest.raises(NumbaUtilError, match="The first 2"):
29
+ data.groupby("key").agg(incorrect_function, engine="numba")
30
+
31
+ with pytest.raises(NumbaUtilError, match="The first 2"):
32
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba")
33
+
34
+
35
+ def test_check_nopython_kwargs():
36
+ pytest.importorskip("numba")
37
+
38
+ def incorrect_function(values, index):
39
+ return sum(values) * 2.7
40
+
41
+ data = DataFrame(
42
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
43
+ columns=["key", "data"],
44
+ )
45
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
46
+ data.groupby("key").agg(incorrect_function, engine="numba", a=1)
47
+
48
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
49
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1)
50
+
51
+
52
+ @pytest.mark.filterwarnings("ignore")
53
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
54
+ @pytest.mark.parametrize("jit", [True, False])
55
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
56
+ @pytest.mark.parametrize("as_index", [True, False])
57
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
58
+ pytest.importorskip("numba")
59
+
60
+ def func_numba(values, index):
61
+ return np.mean(values) * 2.7
62
+
63
+ if jit:
64
+ # Test accepted jitted functions
65
+ import numba
66
+
67
+ func_numba = numba.jit(func_numba)
68
+
69
+ data = DataFrame(
70
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
71
+ )
72
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
73
+ grouped = data.groupby(0, as_index=as_index)
74
+ if pandas_obj == "Series":
75
+ grouped = grouped[1]
76
+
77
+ result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs)
78
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
79
+
80
+ tm.assert_equal(result, expected)
81
+
82
+
83
+ @pytest.mark.filterwarnings("ignore")
84
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
85
+ @pytest.mark.parametrize("jit", [True, False])
86
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
87
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
88
+ # Test that the functions are cached correctly if we switch functions
89
+ pytest.importorskip("numba")
90
+
91
+ def func_1(values, index):
92
+ return np.mean(values) - 3.4
93
+
94
+ def func_2(values, index):
95
+ return np.mean(values) * 2.7
96
+
97
+ if jit:
98
+ import numba
99
+
100
+ func_1 = numba.jit(func_1)
101
+ func_2 = numba.jit(func_2)
102
+
103
+ data = DataFrame(
104
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
105
+ )
106
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
107
+ grouped = data.groupby(0)
108
+ if pandas_obj == "Series":
109
+ grouped = grouped[1]
110
+
111
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
112
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
113
+ tm.assert_equal(result, expected)
114
+
115
+ # Add func_2 to the cache
116
+ result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs)
117
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
118
+ tm.assert_equal(result, expected)
119
+
120
+ # Retest func_1 which should use the cache
121
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
122
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
123
+ tm.assert_equal(result, expected)
124
+
125
+
126
+ def test_use_global_config():
127
+ pytest.importorskip("numba")
128
+
129
+ def func_1(values, index):
130
+ return np.mean(values) - 3.4
131
+
132
+ data = DataFrame(
133
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
134
+ )
135
+ grouped = data.groupby(0)
136
+ expected = grouped.agg(func_1, engine="numba")
137
+ with option_context("compute.use_numba", True):
138
+ result = grouped.agg(func_1, engine=None)
139
+ tm.assert_frame_equal(expected, result)
140
+
141
+
142
+ @pytest.mark.parametrize(
143
+ "agg_kwargs",
144
+ [
145
+ {"func": ["min", "max"]},
146
+ {"func": "min"},
147
+ {"func": {1: ["min", "max"], 2: "sum"}},
148
+ {"bmin": NamedAgg(column=1, aggfunc="min")},
149
+ ],
150
+ )
151
+ def test_multifunc_numba_vs_cython_frame(agg_kwargs):
152
+ pytest.importorskip("numba")
153
+ data = DataFrame(
154
+ {
155
+ 0: ["a", "a", "b", "b", "a"],
156
+ 1: [1.0, 2.0, 3.0, 4.0, 5.0],
157
+ 2: [1, 2, 3, 4, 5],
158
+ },
159
+ columns=[0, 1, 2],
160
+ )
161
+ grouped = data.groupby(0)
162
+ result = grouped.agg(**agg_kwargs, engine="numba")
163
+ expected = grouped.agg(**agg_kwargs, engine="cython")
164
+ tm.assert_frame_equal(result, expected)
165
+
166
+
167
+ @pytest.mark.parametrize(
168
+ "agg_kwargs,expected_func",
169
+ [
170
+ ({"func": lambda values, index: values.sum()}, "sum"),
171
+ # FIXME
172
+ pytest.param(
173
+ {
174
+ "func": [
175
+ lambda values, index: values.sum(),
176
+ lambda values, index: values.min(),
177
+ ]
178
+ },
179
+ ["sum", "min"],
180
+ marks=pytest.mark.xfail(
181
+ reason="This doesn't work yet! Fails in nopython pipeline!"
182
+ ),
183
+ ),
184
+ ],
185
+ )
186
+ def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
187
+ pytest.importorskip("numba")
188
+ data = DataFrame(
189
+ {
190
+ 0: ["a", "a", "b", "b", "a"],
191
+ 1: [1.0, 2.0, 3.0, 4.0, 5.0],
192
+ 2: [1, 2, 3, 4, 5],
193
+ },
194
+ columns=[0, 1, 2],
195
+ )
196
+ grouped = data.groupby(0)
197
+ result = grouped.agg(**agg_kwargs, engine="numba")
198
+ expected = grouped.agg(expected_func, engine="cython")
199
+ # check_dtype can be removed if GH 44952 is addressed
200
+ # Currently, UDFs still always return float64 while reductions can preserve dtype
201
+ tm.assert_frame_equal(result, expected, check_dtype=False)
202
+
203
+
204
+ @pytest.mark.parametrize(
205
+ "agg_kwargs",
206
+ [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
207
+ )
208
+ def test_multifunc_numba_vs_cython_series(agg_kwargs):
209
+ pytest.importorskip("numba")
210
+ labels = ["a", "a", "b", "b", "a"]
211
+ data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
212
+ grouped = data.groupby(labels)
213
+ agg_kwargs["engine"] = "numba"
214
+ result = grouped.agg(**agg_kwargs)
215
+ agg_kwargs["engine"] = "cython"
216
+ expected = grouped.agg(**agg_kwargs)
217
+ if isinstance(expected, DataFrame):
218
+ tm.assert_frame_equal(result, expected)
219
+ else:
220
+ tm.assert_series_equal(result, expected)
221
+
222
+
223
+ @pytest.mark.single_cpu
224
+ @pytest.mark.parametrize(
225
+ "data,agg_kwargs",
226
+ [
227
+ (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": ["min", "max"]}),
228
+ (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": "min"}),
229
+ (
230
+ DataFrame(
231
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
232
+ ),
233
+ {"func": ["min", "max"]},
234
+ ),
235
+ (
236
+ DataFrame(
237
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
238
+ ),
239
+ {"func": "min"},
240
+ ),
241
+ (
242
+ DataFrame(
243
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
244
+ ),
245
+ {"func": {1: ["min", "max"], 2: "sum"}},
246
+ ),
247
+ (
248
+ DataFrame(
249
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
250
+ ),
251
+ {"min_col": NamedAgg(column=1, aggfunc="min")},
252
+ ),
253
+ ],
254
+ )
255
+ def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
256
+ pytest.importorskip("numba")
257
+ labels = ["a", "a", "b", "b", "a"]
258
+ grouped = data.groupby(labels)
259
+ result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
260
+ expected = grouped.agg(**agg_kwargs, engine="numba")
261
+ if isinstance(expected, DataFrame):
262
+ tm.assert_frame_equal(result, expected)
263
+ else:
264
+ tm.assert_series_equal(result, expected)
265
+
266
+
267
+ def test_args_not_cached():
268
+ # GH 41647
269
+ pytest.importorskip("numba")
270
+
271
+ def sum_last(values, index, n):
272
+ return values[-n:].sum()
273
+
274
+ df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})
275
+ grouped_x = df.groupby("id")["x"]
276
+ result = grouped_x.agg(sum_last, 1, engine="numba")
277
+ expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id"))
278
+ tm.assert_series_equal(result, expected)
279
+
280
+ result = grouped_x.agg(sum_last, 2, engine="numba")
281
+ expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id"))
282
+ tm.assert_series_equal(result, expected)
283
+
284
+
285
+ def test_index_data_correctly_passed():
286
+ # GH 43133
287
+ pytest.importorskip("numba")
288
+
289
+ def f(values, index):
290
+ return np.mean(index)
291
+
292
+ df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])
293
+ result = df.groupby("group").aggregate(f, engine="numba")
294
+ expected = DataFrame(
295
+ [-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group")
296
+ )
297
+ tm.assert_frame_equal(result, expected)
298
+
299
+
300
+ def test_engine_kwargs_not_cached():
301
+ # If the user passes a different set of engine_kwargs don't return the same
302
+ # jitted function
303
+ pytest.importorskip("numba")
304
+ nogil = True
305
+ parallel = False
306
+ nopython = True
307
+
308
+ def func_kwargs(values, index):
309
+ return nogil + parallel + nopython
310
+
311
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
312
+ df = DataFrame({"value": [0, 0, 0]})
313
+ result = df.groupby(level=0).aggregate(
314
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
315
+ )
316
+ expected = DataFrame({"value": [2.0, 2.0, 2.0]})
317
+ tm.assert_frame_equal(result, expected)
318
+
319
+ nogil = False
320
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
321
+ result = df.groupby(level=0).aggregate(
322
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
323
+ )
324
+ expected = DataFrame({"value": [1.0, 1.0, 1.0]})
325
+ tm.assert_frame_equal(result, expected)
326
+
327
+
328
+ @pytest.mark.filterwarnings("ignore")
329
+ def test_multiindex_one_key(nogil, parallel, nopython):
330
+ pytest.importorskip("numba")
331
+
332
+ def numba_func(values, index):
333
+ return 1
334
+
335
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
336
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
337
+ result = df.groupby("A").agg(
338
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
339
+ )
340
+ expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"])
341
+ tm.assert_frame_equal(result, expected)
342
+
343
+
344
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
345
+ pytest.importorskip("numba")
346
+
347
+ def numba_func(values, index):
348
+ return 1
349
+
350
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
351
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
352
+ with pytest.raises(NotImplementedError, match="more than 1 grouping labels"):
353
+ df.groupby(["A", "B"]).agg(
354
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
355
+ )
356
+
357
+
358
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
359
+ pytest.importorskip("numba")
360
+ reduction, kwargs = numba_supported_reductions
361
+ df = DataFrame(
362
+ {
363
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
364
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
365
+ "C": np.random.default_rng(2).standard_normal(8),
366
+ "D": np.random.default_rng(2).standard_normal(8),
367
+ }
368
+ )
369
+ gb = df.groupby(["A", "B"])
370
+ res_agg = gb.agg(reduction, engine="numba", **kwargs)
371
+ expected_agg = gb.agg(reduction, engine="cython", **kwargs)
372
+ tm.assert_frame_equal(res_agg, expected_agg)
373
+ # Test that calling the aggregation directly also works
374
+ direct_res = getattr(gb, reduction)(engine="numba", **kwargs)
375
+ direct_expected = getattr(gb, reduction)(engine="cython", **kwargs)
376
+ tm.assert_frame_equal(direct_res, direct_expected)
377
+
378
+
379
+ def test_multilabel_udf_numba_vs_cython():
380
+ pytest.importorskip("numba")
381
+ df = DataFrame(
382
+ {
383
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
384
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
385
+ "C": np.random.default_rng(2).standard_normal(8),
386
+ "D": np.random.default_rng(2).standard_normal(8),
387
+ }
388
+ )
389
+ gb = df.groupby(["A", "B"])
390
+ result = gb.agg(lambda values, index: values.min(), engine="numba")
391
+ expected = gb.agg(lambda x: x.min(), engine="cython")
392
+ tm.assert_frame_equal(result, expected)
vllm/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test all other .agg behavior
3
+ """
4
+
5
+ import datetime as dt
6
+ from functools import partial
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas.errors import SpecificationError
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ MultiIndex,
18
+ PeriodIndex,
19
+ Series,
20
+ date_range,
21
+ period_range,
22
+ )
23
+ import pandas._testing as tm
24
+
25
+ from pandas.io.formats.printing import pprint_thing
26
+
27
+
28
+ def test_agg_partial_failure_raises():
29
+ # GH#43741
30
+
31
+ df = DataFrame(
32
+ {
33
+ "data1": np.random.default_rng(2).standard_normal(5),
34
+ "data2": np.random.default_rng(2).standard_normal(5),
35
+ "key1": ["a", "a", "b", "b", "a"],
36
+ "key2": ["one", "two", "one", "two", "one"],
37
+ }
38
+ )
39
+ grouped = df.groupby("key1")
40
+
41
+ def peak_to_peak(arr):
42
+ return arr.max() - arr.min()
43
+
44
+ with pytest.raises(TypeError, match="unsupported operand type"):
45
+ grouped.agg([peak_to_peak])
46
+
47
+ with pytest.raises(TypeError, match="unsupported operand type"):
48
+ grouped.agg(peak_to_peak)
49
+
50
+
51
+ def test_agg_datetimes_mixed():
52
+ data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
53
+
54
+ df1 = DataFrame(
55
+ {
56
+ "key": [x[0] for x in data],
57
+ "date": [x[1] for x in data],
58
+ "value": [x[2] for x in data],
59
+ }
60
+ )
61
+
62
+ data = [
63
+ [
64
+ row[0],
65
+ (dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
66
+ row[2],
67
+ ]
68
+ for row in data
69
+ ]
70
+
71
+ df2 = DataFrame(
72
+ {
73
+ "key": [x[0] for x in data],
74
+ "date": [x[1] for x in data],
75
+ "value": [x[2] for x in data],
76
+ }
77
+ )
78
+
79
+ df1["weights"] = df1["value"] / df1["value"].sum()
80
+ gb1 = df1.groupby("date").aggregate("sum")
81
+
82
+ df2["weights"] = df1["value"] / df1["value"].sum()
83
+ gb2 = df2.groupby("date").aggregate("sum")
84
+
85
+ assert len(gb1) == len(gb2)
86
+
87
+
88
+ def test_agg_period_index():
89
+ prng = period_range("2012-1-1", freq="M", periods=3)
90
+ df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)), index=prng)
91
+ rs = df.groupby(level=0).sum()
92
+ assert isinstance(rs.index, PeriodIndex)
93
+
94
+ # GH 3579
95
+ index = period_range(start="1999-01", periods=5, freq="M")
96
+ s1 = Series(np.random.default_rng(2).random(len(index)), index=index)
97
+ s2 = Series(np.random.default_rng(2).random(len(index)), index=index)
98
+ df = DataFrame.from_dict({"s1": s1, "s2": s2})
99
+ grouped = df.groupby(df.index.month)
100
+ list(grouped)
101
+
102
+
103
+ def test_agg_dict_parameter_cast_result_dtypes():
104
+ # GH 12821
105
+
106
+ df = DataFrame(
107
+ {
108
+ "class": ["A", "A", "B", "B", "C", "C", "D", "D"],
109
+ "time": date_range("1/1/2011", periods=8, freq="h"),
110
+ }
111
+ )
112
+ df.loc[[0, 1, 2, 5], "time"] = None
113
+
114
+ # test for `first` function
115
+ exp = df.loc[[0, 3, 4, 6]].set_index("class")
116
+ grouped = df.groupby("class")
117
+ tm.assert_frame_equal(grouped.first(), exp)
118
+ tm.assert_frame_equal(grouped.agg("first"), exp)
119
+ tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
120
+ tm.assert_series_equal(grouped.time.first(), exp["time"])
121
+ tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
122
+
123
+ # test for `last` function
124
+ exp = df.loc[[0, 3, 4, 7]].set_index("class")
125
+ grouped = df.groupby("class")
126
+ tm.assert_frame_equal(grouped.last(), exp)
127
+ tm.assert_frame_equal(grouped.agg("last"), exp)
128
+ tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
129
+ tm.assert_series_equal(grouped.time.last(), exp["time"])
130
+ tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
131
+
132
+ # count
133
+ exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
134
+ tm.assert_series_equal(grouped.time.agg(len), exp)
135
+ tm.assert_series_equal(grouped.time.size(), exp)
136
+
137
+ exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
138
+ tm.assert_series_equal(grouped.time.count(), exp)
139
+
140
+
141
+ def test_agg_cast_results_dtypes():
142
+ # similar to GH12821
143
+ # xref #11444
144
+ u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
145
+ v = list("aaabbbbbbccd")
146
+ df = DataFrame({"X": v, "Y": u})
147
+
148
+ result = df.groupby("X")["Y"].agg(len)
149
+ expected = df.groupby("X")["Y"].count()
150
+ tm.assert_series_equal(result, expected)
151
+
152
+
153
+ def test_aggregate_float64_no_int64():
154
+ # see gh-11199
155
+ df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
156
+
157
+ expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
158
+ expected.index.name = "b"
159
+
160
+ result = df.groupby("b")[["a"]].mean()
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
164
+ expected.index.name = "b"
165
+
166
+ result = df.groupby("b")[["a", "c"]].mean()
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+
170
+ def test_aggregate_api_consistency():
171
+ # GH 9052
172
+ # make sure that the aggregates via dict
173
+ # are consistent
174
+ df = DataFrame(
175
+ {
176
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
177
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
178
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
179
+ "D": np.arange(8),
180
+ }
181
+ )
182
+
183
+ grouped = df.groupby(["A", "B"])
184
+ c_mean = grouped["C"].mean()
185
+ c_sum = grouped["C"].sum()
186
+ d_mean = grouped["D"].mean()
187
+ d_sum = grouped["D"].sum()
188
+
189
+ result = grouped["D"].agg(["sum", "mean"])
190
+ expected = pd.concat([d_sum, d_mean], axis=1)
191
+ expected.columns = ["sum", "mean"]
192
+ tm.assert_frame_equal(result, expected, check_like=True)
193
+
194
+ result = grouped.agg(["sum", "mean"])
195
+ expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
196
+ expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
197
+ tm.assert_frame_equal(result, expected, check_like=True)
198
+
199
+ result = grouped[["D", "C"]].agg(["sum", "mean"])
200
+ expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
201
+ expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
202
+ tm.assert_frame_equal(result, expected, check_like=True)
203
+
204
+ result = grouped.agg({"C": "mean", "D": "sum"})
205
+ expected = pd.concat([d_sum, c_mean], axis=1)
206
+ tm.assert_frame_equal(result, expected, check_like=True)
207
+
208
+ result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
209
+ expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
210
+ expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
211
+
212
+ msg = r"Column\(s\) \['r', 'r2'\] do not exist"
213
+ with pytest.raises(KeyError, match=msg):
214
+ grouped[["D", "C"]].agg({"r": "sum", "r2": "mean"})
215
+
216
+
217
+ def test_agg_dict_renaming_deprecation():
218
+ # 15931
219
+ df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
220
+
221
+ msg = r"nested renamer is not supported"
222
+ with pytest.raises(SpecificationError, match=msg):
223
+ df.groupby("A").agg(
224
+ {"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
225
+ )
226
+
227
+ msg = r"Column\(s\) \['ma'\] do not exist"
228
+ with pytest.raises(KeyError, match=msg):
229
+ df.groupby("A")[["B", "C"]].agg({"ma": "max"})
230
+
231
+ msg = r"nested renamer is not supported"
232
+ with pytest.raises(SpecificationError, match=msg):
233
+ df.groupby("A").B.agg({"foo": "count"})
234
+
235
+
236
+ def test_agg_compat():
237
+ # GH 12334
238
+ df = DataFrame(
239
+ {
240
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
241
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
242
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
243
+ "D": np.arange(8),
244
+ }
245
+ )
246
+
247
+ g = df.groupby(["A", "B"])
248
+
249
+ msg = r"nested renamer is not supported"
250
+ with pytest.raises(SpecificationError, match=msg):
251
+ g["D"].agg({"C": ["sum", "std"]})
252
+
253
+ with pytest.raises(SpecificationError, match=msg):
254
+ g["D"].agg({"C": "sum", "D": "std"})
255
+
256
+
257
+ def test_agg_nested_dicts():
258
+ # API change for disallowing these types of nested dicts
259
+ df = DataFrame(
260
+ {
261
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
262
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
263
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
264
+ "D": np.arange(8),
265
+ }
266
+ )
267
+
268
+ g = df.groupby(["A", "B"])
269
+
270
+ msg = r"nested renamer is not supported"
271
+ with pytest.raises(SpecificationError, match=msg):
272
+ g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
273
+
274
+ with pytest.raises(SpecificationError, match=msg):
275
+ g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
276
+
277
+ # same name as the original column
278
+ # GH9052
279
+ with pytest.raises(SpecificationError, match=msg):
280
+ g["D"].agg({"result1": np.sum, "result2": np.mean})
281
+
282
+ with pytest.raises(SpecificationError, match=msg):
283
+ g["D"].agg({"D": np.sum, "result2": np.mean})
284
+
285
+
286
+ def test_agg_item_by_item_raise_typeerror():
287
+ df = DataFrame(np.random.default_rng(2).integers(10, size=(20, 10)))
288
+
289
+ def raiseException(df):
290
+ pprint_thing("----------------------------------------")
291
+ pprint_thing(df.to_string())
292
+ raise TypeError("test")
293
+
294
+ with pytest.raises(TypeError, match="test"):
295
+ df.groupby(0).agg(raiseException)
296
+
297
+
298
+ def test_series_agg_multikey():
299
+ ts = Series(
300
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
301
+ )
302
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
303
+
304
+ result = grouped.agg("sum")
305
+ expected = grouped.sum()
306
+ tm.assert_series_equal(result, expected)
307
+
308
+
309
+ def test_series_agg_multi_pure_python():
310
+ data = DataFrame(
311
+ {
312
+ "A": [
313
+ "foo",
314
+ "foo",
315
+ "foo",
316
+ "foo",
317
+ "bar",
318
+ "bar",
319
+ "bar",
320
+ "bar",
321
+ "foo",
322
+ "foo",
323
+ "foo",
324
+ ],
325
+ "B": [
326
+ "one",
327
+ "one",
328
+ "one",
329
+ "two",
330
+ "one",
331
+ "one",
332
+ "one",
333
+ "two",
334
+ "two",
335
+ "two",
336
+ "one",
337
+ ],
338
+ "C": [
339
+ "dull",
340
+ "dull",
341
+ "shiny",
342
+ "dull",
343
+ "dull",
344
+ "shiny",
345
+ "shiny",
346
+ "dull",
347
+ "shiny",
348
+ "shiny",
349
+ "shiny",
350
+ ],
351
+ "D": np.random.default_rng(2).standard_normal(11),
352
+ "E": np.random.default_rng(2).standard_normal(11),
353
+ "F": np.random.default_rng(2).standard_normal(11),
354
+ }
355
+ )
356
+
357
+ def bad(x):
358
+ assert len(x.values.base) > 0
359
+ return "foo"
360
+
361
+ result = data.groupby(["A", "B"]).agg(bad)
362
+ expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
363
+ tm.assert_frame_equal(result, expected)
364
+
365
+
366
+ def test_agg_consistency():
367
+ # agg with ([]) and () not consistent
368
+ # GH 6715
369
+ def P1(a):
370
+ return np.percentile(a.dropna(), q=1)
371
+
372
+ df = DataFrame(
373
+ {
374
+ "col1": [1, 2, 3, 4],
375
+ "col2": [10, 25, 26, 31],
376
+ "date": [
377
+ dt.date(2013, 2, 10),
378
+ dt.date(2013, 2, 10),
379
+ dt.date(2013, 2, 11),
380
+ dt.date(2013, 2, 11),
381
+ ],
382
+ }
383
+ )
384
+
385
+ g = df.groupby("date")
386
+
387
+ expected = g.agg([P1])
388
+ expected.columns = expected.columns.levels[0]
389
+
390
+ result = g.agg(P1)
391
+ tm.assert_frame_equal(result, expected)
392
+
393
+
394
+ def test_agg_callables():
395
+ # GH 7929
396
+ df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
397
+
398
+ class fn_class:
399
+ def __call__(self, x):
400
+ return sum(x)
401
+
402
+ equiv_callables = [
403
+ sum,
404
+ np.sum,
405
+ lambda x: sum(x),
406
+ lambda x: x.sum(),
407
+ partial(sum),
408
+ fn_class(),
409
+ ]
410
+
411
+ expected = df.groupby("foo").agg("sum")
412
+ for ecall in equiv_callables:
413
+ warn = FutureWarning if ecall is sum or ecall is np.sum else None
414
+ msg = "using DataFrameGroupBy.sum"
415
+ with tm.assert_produces_warning(warn, match=msg):
416
+ result = df.groupby("foo").agg(ecall)
417
+ tm.assert_frame_equal(result, expected)
418
+
419
+
420
+ def test_agg_over_numpy_arrays():
421
+ # GH 3788
422
+ df = DataFrame(
423
+ [
424
+ [1, np.array([10, 20, 30])],
425
+ [1, np.array([40, 50, 60])],
426
+ [2, np.array([20, 30, 40])],
427
+ ],
428
+ columns=["category", "arraydata"],
429
+ )
430
+ gb = df.groupby("category")
431
+
432
+ expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
433
+ expected_index = Index([1, 2], name="category")
434
+ expected_column = ["arraydata"]
435
+ expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
436
+
437
+ alt = gb.sum(numeric_only=False)
438
+ tm.assert_frame_equal(alt, expected)
439
+
440
+ result = gb.agg("sum", numeric_only=False)
441
+ tm.assert_frame_equal(result, expected)
442
+
443
+ # FIXME: the original version of this test called `gb.agg(sum)`
444
+ # and that raises TypeError if `numeric_only=False` is passed
445
+
446
+
447
+ @pytest.mark.parametrize("as_period", [True, False])
448
+ def test_agg_tzaware_non_datetime_result(as_period):
449
+ # discussed in GH#29589, fixed in GH#29641, operating on tzaware values
450
+ # with function that is not dtype-preserving
451
+ dti = date_range("2012-01-01", periods=4, tz="UTC")
452
+ if as_period:
453
+ dti = dti.tz_localize(None).to_period("D")
454
+
455
+ df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
456
+ gb = df.groupby("a")
457
+
458
+ # Case that _does_ preserve the dtype
459
+ result = gb["b"].agg(lambda x: x.iloc[0])
460
+ expected = Series(dti[::2], name="b")
461
+ expected.index.name = "a"
462
+ tm.assert_series_equal(result, expected)
463
+
464
+ # Cases that do _not_ preserve the dtype
465
+ result = gb["b"].agg(lambda x: x.iloc[0].year)
466
+ expected = Series([2012, 2012], name="b")
467
+ expected.index.name = "a"
468
+ tm.assert_series_equal(result, expected)
469
+
470
+ result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
471
+ expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
472
+ expected.index.name = "a"
473
+ if as_period:
474
+ expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")
475
+ expected.index.name = "a"
476
+ tm.assert_series_equal(result, expected)
477
+
478
+
479
+ def test_agg_timezone_round_trip():
480
+ # GH 15426
481
+ ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
482
+ df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
483
+
484
+ result1 = df.groupby("a")["b"].agg("min").iloc[0]
485
+ result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
486
+ result3 = df.groupby("a")["b"].min().iloc[0]
487
+
488
+ assert result1 == ts
489
+ assert result2 == ts
490
+ assert result3 == ts
491
+
492
+ dates = [
493
+ pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
494
+ ]
495
+ df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
496
+ grouped = df.groupby("A")
497
+
498
+ ts = df["B"].iloc[0]
499
+ assert ts == grouped.nth(0)["B"].iloc[0]
500
+ assert ts == grouped.head(1)["B"].iloc[0]
501
+ assert ts == grouped.first()["B"].iloc[0]
502
+
503
+ # GH#27110 applying iloc should return a DataFrame
504
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
505
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
506
+ assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
507
+
508
+ ts = df["B"].iloc[2]
509
+ assert ts == grouped.last()["B"].iloc[0]
510
+
511
+ # GH#27110 applying iloc should return a DataFrame
512
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
513
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
514
+ assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
515
+
516
+
517
+ def test_sum_uint64_overflow():
518
+ # see gh-14758
519
+ # Convert to uint64 and don't overflow
520
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
521
+ df = df + 9223372036854775807
522
+
523
+ index = Index(
524
+ [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
525
+ )
526
+ expected = DataFrame(
527
+ {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
528
+ index=index,
529
+ dtype=object,
530
+ )
531
+
532
+ expected.index.name = 0
533
+ result = df.groupby(0).sum(numeric_only=False)
534
+ tm.assert_frame_equal(result, expected)
535
+
536
+ # out column is non-numeric, so with numeric_only=True it is dropped
537
+ result2 = df.groupby(0).sum(numeric_only=True)
538
+ expected2 = expected[[]]
539
+ tm.assert_frame_equal(result2, expected2)
540
+
541
+
542
+ @pytest.mark.parametrize(
543
+ "structure, expected",
544
+ [
545
+ (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
546
+ (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
547
+ (
548
+ lambda x: tuple(x),
549
+ DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
550
+ ),
551
+ (
552
+ lambda x: list(x),
553
+ DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
554
+ ),
555
+ ],
556
+ )
557
+ def test_agg_structs_dataframe(structure, expected):
558
+ df = DataFrame(
559
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
560
+ )
561
+
562
+ result = df.groupby(["A", "B"]).aggregate(structure)
563
+ expected.index.names = ["A", "B"]
564
+ tm.assert_frame_equal(result, expected)
565
+
566
+
567
+ @pytest.mark.parametrize(
568
+ "structure, expected",
569
+ [
570
+ (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
571
+ (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
572
+ (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
573
+ (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
574
+ ],
575
+ )
576
+ def test_agg_structs_series(structure, expected):
577
+ # Issue #18079
578
+ df = DataFrame(
579
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
580
+ )
581
+
582
+ result = df.groupby("A")["C"].aggregate(structure)
583
+ expected.index.name = "A"
584
+ tm.assert_series_equal(result, expected)
585
+
586
+
587
+ def test_agg_category_nansum(observed):
588
+ categories = ["a", "b", "c"]
589
+ df = DataFrame(
590
+ {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
591
+ )
592
+ msg = "using SeriesGroupBy.sum"
593
+ with tm.assert_produces_warning(FutureWarning, match=msg):
594
+ result = df.groupby("A", observed=observed).B.agg(np.nansum)
595
+ expected = Series(
596
+ [3, 3, 0],
597
+ index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
598
+ name="B",
599
+ )
600
+ if observed:
601
+ expected = expected[expected != 0]
602
+ tm.assert_series_equal(result, expected)
603
+
604
+
605
+ def test_agg_list_like_func():
606
+ # GH 18473
607
+ df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
608
+ grouped = df.groupby("A", as_index=False, sort=False)
609
+ result = grouped.agg({"B": lambda x: list(x)})
610
+ expected = DataFrame(
611
+ {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
612
+ )
613
+ tm.assert_frame_equal(result, expected)
614
+
615
+
616
+ def test_agg_lambda_with_timezone():
617
+ # GH 23683
618
+ df = DataFrame(
619
+ {
620
+ "tag": [1, 1],
621
+ "date": [
622
+ pd.Timestamp("2018-01-01", tz="UTC"),
623
+ pd.Timestamp("2018-01-02", tz="UTC"),
624
+ ],
625
+ }
626
+ )
627
+ result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
628
+ expected = DataFrame(
629
+ [pd.Timestamp("2018-01-01", tz="UTC")],
630
+ index=Index([1], name="tag"),
631
+ columns=["date"],
632
+ )
633
+ tm.assert_frame_equal(result, expected)
634
+
635
+
636
+ @pytest.mark.parametrize(
637
+ "err_cls",
638
+ [
639
+ NotImplementedError,
640
+ RuntimeError,
641
+ KeyError,
642
+ IndexError,
643
+ OSError,
644
+ ValueError,
645
+ ArithmeticError,
646
+ AttributeError,
647
+ ],
648
+ )
649
+ def test_groupby_agg_err_catching(err_cls):
650
+ # make sure we suppress anything other than TypeError or AssertionError
651
+ # in _python_agg_general
652
+
653
+ # Use a non-standard EA to make sure we don't go down ndarray paths
654
+ from pandas.tests.extension.decimal.array import (
655
+ DecimalArray,
656
+ make_data,
657
+ to_decimal,
658
+ )
659
+
660
+ data = make_data()[:5]
661
+ df = DataFrame(
662
+ {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
663
+ )
664
+
665
+ expected = Series(to_decimal([data[0], data[3]]))
666
+
667
+ def weird_func(x):
668
+ # weird function that raise something other than TypeError or IndexError
669
+ # in _python_agg_general
670
+ if len(x) == 0:
671
+ raise err_cls
672
+ return x.iloc[0]
673
+
674
+ result = df["decimals"].groupby(df["id1"]).agg(weird_func)
675
+ tm.assert_series_equal(result, expected, check_names=False)
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_corrwith.cpython-310.pyc ADDED
Binary file (980 Bytes). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_describe.cpython-310.pyc ADDED
Binary file (8.9 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_groupby_shift_diff.cpython-310.pyc ADDED
Binary file (7.63 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_is_monotonic.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nlargest_nsmallest.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nth.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
vllm/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_quantile.cpython-310.pyc ADDED
Binary file (14.7 kB). View file