ZTWHHH commited on
Commit
8762e9b
·
verified ·
1 Parent(s): 3c2ef3e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. mgm/lib/python3.10/site-packages/altair/__init__.py +696 -0
  3. mgm/lib/python3.10/site-packages/altair/_magics.py +109 -0
  4. mgm/lib/python3.10/site-packages/altair/jupyter/__init__.py +21 -0
  5. mgm/lib/python3.10/site-packages/altair/jupyter/__pycache__/__init__.cpython-310.pyc +0 -0
  6. mgm/lib/python3.10/site-packages/altair/jupyter/__pycache__/jupyter_chart.cpython-310.pyc +0 -0
  7. mgm/lib/python3.10/site-packages/altair/jupyter/js/README.md +2 -0
  8. mgm/lib/python3.10/site-packages/altair/jupyter/js/index.js +230 -0
  9. mgm/lib/python3.10/site-packages/altair/jupyter/jupyter_chart.py +404 -0
  10. mgm/lib/python3.10/site-packages/altair/py.typed +0 -0
  11. mgm/lib/python3.10/site-packages/altair/theme.py +321 -0
  12. mgm/lib/python3.10/site-packages/altair/typing/__init__.py +96 -0
  13. mgm/lib/python3.10/site-packages/altair/typing/__pycache__/__init__.cpython-310.pyc +0 -0
  14. mgm/lib/python3.10/site-packages/altair/utils/execeval.py +98 -0
  15. mgm/lib/python3.10/site-packages/altair/utils/server.py +151 -0
  16. mgm/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc +0 -0
  17. mgm/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc +0 -0
  18. mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h +891 -0
  19. mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h +1626 -0
  20. mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h +693 -0
  21. mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h +0 -0
  22. mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h +273 -0
  23. mgm/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h +824 -0
  24. mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.11 +3 -0
  25. mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 +3 -0
  26. mgm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.11.2 +3 -0
  27. mgm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 +3 -0
  28. mgm/lib/python3.10/site-packages/nvidia/cufft/__init__.py +0 -0
  29. mgm/lib/python3.10/site-packages/nvidia/cufft/__pycache__/__init__.cpython-310.pyc +0 -0
  30. mgm/lib/python3.10/site-packages/nvidia/cufft/include/__init__.py +0 -0
  31. mgm/lib/python3.10/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc +0 -0
  32. mgm/lib/python3.10/site-packages/nvidia/cufft/include/cudalibxt.h +97 -0
  33. mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufft.h +322 -0
  34. mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h +269 -0
  35. mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h +454 -0
  36. mgm/lib/python3.10/site-packages/nvidia/cufft/lib/__init__.py +0 -0
  37. mgm/lib/python3.10/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  38. mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.10 +3 -0
  39. mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 +3 -0
  40. mgm/lib/python3.10/site-packages/nvidia/curand/__init__.py +0 -0
  41. mgm/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc +0 -0
  42. mgm/lib/python3.10/site-packages/nvidia/curand/include/__init__.py +0 -0
  43. mgm/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc +0 -0
  44. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand.h +1077 -0
  45. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h +87 -0
  46. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h +253 -0
  47. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h +93 -0
  48. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h +1665 -0
  49. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h +697 -0
  50. mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h +0 -0
.gitattributes CHANGED
@@ -1089,3 +1089,10 @@ mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpy
1089
  videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
1090
  mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1091
  mgm/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
1089
  videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
1090
  mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1091
  mgm/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1092
+ mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.11 filter=lfs diff=lfs merge=lfs -text
1093
+ mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
1094
+ mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
1095
+ mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.10 filter=lfs diff=lfs merge=lfs -text
1096
+ mgm/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text
1097
+ mgm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.11.2 filter=lfs diff=lfs merge=lfs -text
1098
+ mgm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text
mgm/lib/python3.10/site-packages/altair/__init__.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa
2
+ __version__ = "5.5.0"
3
+
4
+ # The content of __all__ is automatically written by
5
+ # tools/update_init_file.py. Do not modify directly.
6
+ __all__ = [
7
+ "Aggregate",
8
+ "AggregateOp",
9
+ "AggregateTransform",
10
+ "AggregatedFieldDef",
11
+ "Align",
12
+ "AllSortString",
13
+ "AltairDeprecationWarning",
14
+ "Angle",
15
+ "AngleDatum",
16
+ "AngleValue",
17
+ "AnyMark",
18
+ "AnyMarkConfig",
19
+ "AreaConfig",
20
+ "ArgmaxDef",
21
+ "ArgminDef",
22
+ "AutoSizeParams",
23
+ "AutosizeType",
24
+ "Axis",
25
+ "AxisConfig",
26
+ "AxisOrient",
27
+ "AxisResolveMap",
28
+ "BBox",
29
+ "BarConfig",
30
+ "BaseTitleNoValueRefs",
31
+ "Baseline",
32
+ "Bin",
33
+ "BinExtent",
34
+ "BinParams",
35
+ "BinTransform",
36
+ "BindCheckbox",
37
+ "BindDirect",
38
+ "BindInput",
39
+ "BindRadioSelect",
40
+ "BindRange",
41
+ "Binding",
42
+ "BinnedTimeUnit",
43
+ "Blend",
44
+ "BoxPlot",
45
+ "BoxPlotConfig",
46
+ "BoxPlotDef",
47
+ "BrushConfig",
48
+ "CalculateTransform",
49
+ "Categorical",
50
+ "ChainedWhen",
51
+ "Chart",
52
+ "ChartDataType",
53
+ "Color",
54
+ "ColorDatum",
55
+ "ColorDef",
56
+ "ColorName",
57
+ "ColorScheme",
58
+ "ColorValue",
59
+ "Column",
60
+ "CompositeMark",
61
+ "CompositeMarkDef",
62
+ "CompositionConfig",
63
+ "ConcatChart",
64
+ "ConcatSpecGenericSpec",
65
+ "ConditionalAxisColor",
66
+ "ConditionalAxisLabelAlign",
67
+ "ConditionalAxisLabelBaseline",
68
+ "ConditionalAxisLabelFontStyle",
69
+ "ConditionalAxisLabelFontWeight",
70
+ "ConditionalAxisNumber",
71
+ "ConditionalAxisNumberArray",
72
+ "ConditionalAxisPropertyAlignnull",
73
+ "ConditionalAxisPropertyColornull",
74
+ "ConditionalAxisPropertyFontStylenull",
75
+ "ConditionalAxisPropertyFontWeightnull",
76
+ "ConditionalAxisPropertyTextBaselinenull",
77
+ "ConditionalAxisPropertynumberArraynull",
78
+ "ConditionalAxisPropertynumbernull",
79
+ "ConditionalAxisPropertystringnull",
80
+ "ConditionalAxisString",
81
+ "ConditionalMarkPropFieldOrDatumDef",
82
+ "ConditionalMarkPropFieldOrDatumDefTypeForShape",
83
+ "ConditionalParameterMarkPropFieldOrDatumDef",
84
+ "ConditionalParameterMarkPropFieldOrDatumDefTypeForShape",
85
+ "ConditionalParameterStringFieldDef",
86
+ "ConditionalParameterValueDefGradientstringnullExprRef",
87
+ "ConditionalParameterValueDefTextExprRef",
88
+ "ConditionalParameterValueDefnumber",
89
+ "ConditionalParameterValueDefnumberArrayExprRef",
90
+ "ConditionalParameterValueDefnumberExprRef",
91
+ "ConditionalParameterValueDefstringExprRef",
92
+ "ConditionalParameterValueDefstringnullExprRef",
93
+ "ConditionalPredicateMarkPropFieldOrDatumDef",
94
+ "ConditionalPredicateMarkPropFieldOrDatumDefTypeForShape",
95
+ "ConditionalPredicateStringFieldDef",
96
+ "ConditionalPredicateValueDefAlignnullExprRef",
97
+ "ConditionalPredicateValueDefColornullExprRef",
98
+ "ConditionalPredicateValueDefFontStylenullExprRef",
99
+ "ConditionalPredicateValueDefFontWeightnullExprRef",
100
+ "ConditionalPredicateValueDefGradientstringnullExprRef",
101
+ "ConditionalPredicateValueDefTextBaselinenullExprRef",
102
+ "ConditionalPredicateValueDefTextExprRef",
103
+ "ConditionalPredicateValueDefnumber",
104
+ "ConditionalPredicateValueDefnumberArrayExprRef",
105
+ "ConditionalPredicateValueDefnumberArraynullExprRef",
106
+ "ConditionalPredicateValueDefnumberExprRef",
107
+ "ConditionalPredicateValueDefnumbernullExprRef",
108
+ "ConditionalPredicateValueDefstringExprRef",
109
+ "ConditionalPredicateValueDefstringnullExprRef",
110
+ "ConditionalStringFieldDef",
111
+ "ConditionalValueDefGradientstringnullExprRef",
112
+ "ConditionalValueDefTextExprRef",
113
+ "ConditionalValueDefnumber",
114
+ "ConditionalValueDefnumberArrayExprRef",
115
+ "ConditionalValueDefnumberExprRef",
116
+ "ConditionalValueDefstringExprRef",
117
+ "ConditionalValueDefstringnullExprRef",
118
+ "Config",
119
+ "CsvDataFormat",
120
+ "Cursor",
121
+ "Cyclical",
122
+ "Data",
123
+ "DataFormat",
124
+ "DataSource",
125
+ "DataType",
126
+ "Datasets",
127
+ "DateTime",
128
+ "DatumChannelMixin",
129
+ "DatumDef",
130
+ "Day",
131
+ "DensityTransform",
132
+ "DerivedStream",
133
+ "Description",
134
+ "DescriptionValue",
135
+ "Detail",
136
+ "Dict",
137
+ "DictInlineDataset",
138
+ "DictSelectionInit",
139
+ "DictSelectionInitInterval",
140
+ "Diverging",
141
+ "DomainUnionWith",
142
+ "DsvDataFormat",
143
+ "Element",
144
+ "Encoding",
145
+ "EncodingSortField",
146
+ "ErrorBand",
147
+ "ErrorBandConfig",
148
+ "ErrorBandDef",
149
+ "ErrorBar",
150
+ "ErrorBarConfig",
151
+ "ErrorBarDef",
152
+ "ErrorBarExtent",
153
+ "EventStream",
154
+ "EventType",
155
+ "Expr",
156
+ "ExprRef",
157
+ "ExtentTransform",
158
+ "Facet",
159
+ "FacetChart",
160
+ "FacetEncodingFieldDef",
161
+ "FacetFieldDef",
162
+ "FacetMapping",
163
+ "FacetSpec",
164
+ "FacetedEncoding",
165
+ "FacetedUnitSpec",
166
+ "Feature",
167
+ "FeatureCollection",
168
+ "FeatureGeometryGeoJsonProperties",
169
+ "Field",
170
+ "FieldChannelMixin",
171
+ "FieldDefWithoutScale",
172
+ "FieldEqualPredicate",
173
+ "FieldGTEPredicate",
174
+ "FieldGTPredicate",
175
+ "FieldLTEPredicate",
176
+ "FieldLTPredicate",
177
+ "FieldName",
178
+ "FieldOneOfPredicate",
179
+ "FieldOrDatumDefWithConditionDatumDefGradientstringnull",
180
+ "FieldOrDatumDefWithConditionDatumDefnumber",
181
+ "FieldOrDatumDefWithConditionDatumDefnumberArray",
182
+ "FieldOrDatumDefWithConditionDatumDefstringnull",
183
+ "FieldOrDatumDefWithConditionMarkPropFieldDefGradientstringnull",
184
+ "FieldOrDatumDefWithConditionMarkPropFieldDefTypeForShapestringnull",
185
+ "FieldOrDatumDefWithConditionMarkPropFieldDefnumber",
186
+ "FieldOrDatumDefWithConditionMarkPropFieldDefnumberArray",
187
+ "FieldOrDatumDefWithConditionStringDatumDefText",
188
+ "FieldOrDatumDefWithConditionStringFieldDefText",
189
+ "FieldOrDatumDefWithConditionStringFieldDefstring",
190
+ "FieldRange",
191
+ "FieldRangePredicate",
192
+ "FieldValidPredicate",
193
+ "Fill",
194
+ "FillDatum",
195
+ "FillOpacity",
196
+ "FillOpacityDatum",
197
+ "FillOpacityValue",
198
+ "FillValue",
199
+ "FilterTransform",
200
+ "Fit",
201
+ "FlattenTransform",
202
+ "FoldTransform",
203
+ "FontStyle",
204
+ "FontWeight",
205
+ "FormatConfig",
206
+ "Generator",
207
+ "GenericUnitSpecEncodingAnyMark",
208
+ "GeoJsonFeature",
209
+ "GeoJsonFeatureCollection",
210
+ "GeoJsonProperties",
211
+ "Geometry",
212
+ "GeometryCollection",
213
+ "Gradient",
214
+ "GradientStop",
215
+ "GraticuleGenerator",
216
+ "GraticuleParams",
217
+ "HConcatChart",
218
+ "HConcatSpecGenericSpec",
219
+ "Header",
220
+ "HeaderConfig",
221
+ "HexColor",
222
+ "Href",
223
+ "HrefValue",
224
+ "Impute",
225
+ "ImputeMethod",
226
+ "ImputeParams",
227
+ "ImputeSequence",
228
+ "ImputeTransform",
229
+ "InlineData",
230
+ "InlineDataset",
231
+ "Interpolate",
232
+ "IntervalSelectionConfig",
233
+ "IntervalSelectionConfigWithoutType",
234
+ "JoinAggregateFieldDef",
235
+ "JoinAggregateTransform",
236
+ "JsonDataFormat",
237
+ "JupyterChart",
238
+ "Key",
239
+ "LabelOverlap",
240
+ "LatLongDef",
241
+ "LatLongFieldDef",
242
+ "Latitude",
243
+ "Latitude2",
244
+ "Latitude2Datum",
245
+ "Latitude2Value",
246
+ "LatitudeDatum",
247
+ "LayerChart",
248
+ "LayerRepeatMapping",
249
+ "LayerRepeatSpec",
250
+ "LayerSpec",
251
+ "LayoutAlign",
252
+ "Legend",
253
+ "LegendBinding",
254
+ "LegendConfig",
255
+ "LegendOrient",
256
+ "LegendResolveMap",
257
+ "LegendStreamBinding",
258
+ "LineConfig",
259
+ "LineString",
260
+ "LinearGradient",
261
+ "LocalMultiTimeUnit",
262
+ "LocalSingleTimeUnit",
263
+ "Locale",
264
+ "LoessTransform",
265
+ "LogicalAndPredicate",
266
+ "LogicalNotPredicate",
267
+ "LogicalOrPredicate",
268
+ "Longitude",
269
+ "Longitude2",
270
+ "Longitude2Datum",
271
+ "Longitude2Value",
272
+ "LongitudeDatum",
273
+ "LookupData",
274
+ "LookupSelection",
275
+ "LookupTransform",
276
+ "Mark",
277
+ "MarkConfig",
278
+ "MarkDef",
279
+ "MarkInvalidDataMode",
280
+ "MarkPropDefGradientstringnull",
281
+ "MarkPropDefnumber",
282
+ "MarkPropDefnumberArray",
283
+ "MarkPropDefstringnullTypeForShape",
284
+ "MarkType",
285
+ "MaxRowsError",
286
+ "MergedStream",
287
+ "Month",
288
+ "MultiLineString",
289
+ "MultiPoint",
290
+ "MultiPolygon",
291
+ "MultiTimeUnit",
292
+ "NamedData",
293
+ "NonArgAggregateOp",
294
+ "NonLayerRepeatSpec",
295
+ "NonNormalizedSpec",
296
+ "NumberLocale",
297
+ "NumericArrayMarkPropDef",
298
+ "NumericMarkPropDef",
299
+ "OffsetDef",
300
+ "Opacity",
301
+ "OpacityDatum",
302
+ "OpacityValue",
303
+ "Order",
304
+ "OrderFieldDef",
305
+ "OrderOnlyDef",
306
+ "OrderValue",
307
+ "OrderValueDef",
308
+ "Orient",
309
+ "Orientation",
310
+ "OverlayMarkDef",
311
+ "Padding",
312
+ "Parameter",
313
+ "ParameterExpression",
314
+ "ParameterExtent",
315
+ "ParameterName",
316
+ "ParameterPredicate",
317
+ "Parse",
318
+ "ParseValue",
319
+ "PivotTransform",
320
+ "Point",
321
+ "PointSelectionConfig",
322
+ "PointSelectionConfigWithoutType",
323
+ "PolarDef",
324
+ "Polygon",
325
+ "Position",
326
+ "Position2Def",
327
+ "PositionDatumDef",
328
+ "PositionDatumDefBase",
329
+ "PositionDef",
330
+ "PositionFieldDef",
331
+ "PositionFieldDefBase",
332
+ "PositionValueDef",
333
+ "Predicate",
334
+ "PredicateComposition",
335
+ "PrimitiveValue",
336
+ "Projection",
337
+ "ProjectionConfig",
338
+ "ProjectionType",
339
+ "QuantileTransform",
340
+ "RadialGradient",
341
+ "Radius",
342
+ "Radius2",
343
+ "Radius2Datum",
344
+ "Radius2Value",
345
+ "RadiusDatum",
346
+ "RadiusValue",
347
+ "RangeConfig",
348
+ "RangeEnum",
349
+ "RangeRaw",
350
+ "RangeRawArray",
351
+ "RangeScheme",
352
+ "RectConfig",
353
+ "RegressionTransform",
354
+ "RelativeBandSize",
355
+ "RepeatChart",
356
+ "RepeatMapping",
357
+ "RepeatRef",
358
+ "RepeatSpec",
359
+ "Resolve",
360
+ "ResolveMode",
361
+ "Root",
362
+ "Row",
363
+ "RowColLayoutAlign",
364
+ "RowColboolean",
365
+ "RowColnumber",
366
+ "RowColumnEncodingFieldDef",
367
+ "SCHEMA_URL",
368
+ "SCHEMA_VERSION",
369
+ "SampleTransform",
370
+ "Scale",
371
+ "ScaleBinParams",
372
+ "ScaleBins",
373
+ "ScaleConfig",
374
+ "ScaleDatumDef",
375
+ "ScaleFieldDef",
376
+ "ScaleInterpolateEnum",
377
+ "ScaleInterpolateParams",
378
+ "ScaleInvalidDataConfig",
379
+ "ScaleInvalidDataShowAsValueangle",
380
+ "ScaleInvalidDataShowAsValuecolor",
381
+ "ScaleInvalidDataShowAsValuefill",
382
+ "ScaleInvalidDataShowAsValuefillOpacity",
383
+ "ScaleInvalidDataShowAsValueopacity",
384
+ "ScaleInvalidDataShowAsValueradius",
385
+ "ScaleInvalidDataShowAsValueshape",
386
+ "ScaleInvalidDataShowAsValuesize",
387
+ "ScaleInvalidDataShowAsValuestroke",
388
+ "ScaleInvalidDataShowAsValuestrokeDash",
389
+ "ScaleInvalidDataShowAsValuestrokeOpacity",
390
+ "ScaleInvalidDataShowAsValuestrokeWidth",
391
+ "ScaleInvalidDataShowAsValuetheta",
392
+ "ScaleInvalidDataShowAsValuex",
393
+ "ScaleInvalidDataShowAsValuexOffset",
394
+ "ScaleInvalidDataShowAsValuey",
395
+ "ScaleInvalidDataShowAsValueyOffset",
396
+ "ScaleInvalidDataShowAsangle",
397
+ "ScaleInvalidDataShowAscolor",
398
+ "ScaleInvalidDataShowAsfill",
399
+ "ScaleInvalidDataShowAsfillOpacity",
400
+ "ScaleInvalidDataShowAsopacity",
401
+ "ScaleInvalidDataShowAsradius",
402
+ "ScaleInvalidDataShowAsshape",
403
+ "ScaleInvalidDataShowAssize",
404
+ "ScaleInvalidDataShowAsstroke",
405
+ "ScaleInvalidDataShowAsstrokeDash",
406
+ "ScaleInvalidDataShowAsstrokeOpacity",
407
+ "ScaleInvalidDataShowAsstrokeWidth",
408
+ "ScaleInvalidDataShowAstheta",
409
+ "ScaleInvalidDataShowAsx",
410
+ "ScaleInvalidDataShowAsxOffset",
411
+ "ScaleInvalidDataShowAsy",
412
+ "ScaleInvalidDataShowAsyOffset",
413
+ "ScaleResolveMap",
414
+ "ScaleType",
415
+ "SchemaBase",
416
+ "SchemeParams",
417
+ "SecondaryFieldDef",
418
+ "SelectionConfig",
419
+ "SelectionExpression",
420
+ "SelectionInit",
421
+ "SelectionInitInterval",
422
+ "SelectionInitIntervalMapping",
423
+ "SelectionInitMapping",
424
+ "SelectionParameter",
425
+ "SelectionPredicateComposition",
426
+ "SelectionResolution",
427
+ "SelectionType",
428
+ "SequenceGenerator",
429
+ "SequenceParams",
430
+ "SequentialMultiHue",
431
+ "SequentialSingleHue",
432
+ "Shape",
433
+ "ShapeDatum",
434
+ "ShapeDef",
435
+ "ShapeValue",
436
+ "SharedEncoding",
437
+ "SingleDefUnitChannel",
438
+ "SingleTimeUnit",
439
+ "Size",
440
+ "SizeDatum",
441
+ "SizeValue",
442
+ "Sort",
443
+ "SortArray",
444
+ "SortByChannel",
445
+ "SortByChannelDesc",
446
+ "SortByEncoding",
447
+ "SortField",
448
+ "SortOrder",
449
+ "Spec",
450
+ "SphereGenerator",
451
+ "StackOffset",
452
+ "StackTransform",
453
+ "StandardType",
454
+ "Step",
455
+ "StepFor",
456
+ "Stream",
457
+ "StringFieldDef",
458
+ "StringFieldDefWithCondition",
459
+ "StringValueDefWithCondition",
460
+ "Stroke",
461
+ "StrokeCap",
462
+ "StrokeDash",
463
+ "StrokeDashDatum",
464
+ "StrokeDashValue",
465
+ "StrokeDatum",
466
+ "StrokeJoin",
467
+ "StrokeOpacity",
468
+ "StrokeOpacityDatum",
469
+ "StrokeOpacityValue",
470
+ "StrokeValue",
471
+ "StrokeWidth",
472
+ "StrokeWidthDatum",
473
+ "StrokeWidthValue",
474
+ "StyleConfigIndex",
475
+ "SymbolShape",
476
+ "TOPLEVEL_ONLY_KEYS",
477
+ "Text",
478
+ "TextBaseline",
479
+ "TextDatum",
480
+ "TextDef",
481
+ "TextDirection",
482
+ "TextValue",
483
+ "Then",
484
+ "Theta",
485
+ "Theta2",
486
+ "Theta2Datum",
487
+ "Theta2Value",
488
+ "ThetaDatum",
489
+ "ThetaValue",
490
+ "TickConfig",
491
+ "TickCount",
492
+ "TimeInterval",
493
+ "TimeIntervalStep",
494
+ "TimeLocale",
495
+ "TimeUnit",
496
+ "TimeUnitParams",
497
+ "TimeUnitTransform",
498
+ "TimeUnitTransformParams",
499
+ "Title",
500
+ "TitleAnchor",
501
+ "TitleConfig",
502
+ "TitleFrame",
503
+ "TitleOrient",
504
+ "TitleParams",
505
+ "Tooltip",
506
+ "TooltipContent",
507
+ "TooltipValue",
508
+ "TopLevelConcatSpec",
509
+ "TopLevelFacetSpec",
510
+ "TopLevelHConcatSpec",
511
+ "TopLevelLayerSpec",
512
+ "TopLevelMixin",
513
+ "TopLevelParameter",
514
+ "TopLevelRepeatSpec",
515
+ "TopLevelSelectionParameter",
516
+ "TopLevelSpec",
517
+ "TopLevelUnitSpec",
518
+ "TopLevelVConcatSpec",
519
+ "TopoDataFormat",
520
+ "Transform",
521
+ "Type",
522
+ "TypeForShape",
523
+ "TypedFieldDef",
524
+ "URI",
525
+ "Undefined",
526
+ "UnitSpec",
527
+ "UnitSpecWithFrame",
528
+ "Url",
529
+ "UrlData",
530
+ "UrlValue",
531
+ "UtcMultiTimeUnit",
532
+ "UtcSingleTimeUnit",
533
+ "VConcatChart",
534
+ "VConcatSpecGenericSpec",
535
+ "VEGAEMBED_VERSION",
536
+ "VEGALITE_VERSION",
537
+ "VEGA_VERSION",
538
+ "ValueChannelMixin",
539
+ "ValueDefWithConditionMarkPropFieldOrDatumDefGradientstringnull",
540
+ "ValueDefWithConditionMarkPropFieldOrDatumDefTypeForShapestringnull",
541
+ "ValueDefWithConditionMarkPropFieldOrDatumDefnumber",
542
+ "ValueDefWithConditionMarkPropFieldOrDatumDefnumberArray",
543
+ "ValueDefWithConditionMarkPropFieldOrDatumDefstringnull",
544
+ "ValueDefWithConditionStringFieldDefText",
545
+ "ValueDefnumber",
546
+ "ValueDefnumberwidthheightExprRef",
547
+ "VariableParameter",
548
+ "Vector10string",
549
+ "Vector12string",
550
+ "Vector2DateTime",
551
+ "Vector2Vector2number",
552
+ "Vector2boolean",
553
+ "Vector2number",
554
+ "Vector2string",
555
+ "Vector3number",
556
+ "Vector7string",
557
+ "VegaLite",
558
+ "VegaLiteSchema",
559
+ "ViewBackground",
560
+ "ViewConfig",
561
+ "When",
562
+ "WindowEventType",
563
+ "WindowFieldDef",
564
+ "WindowOnlyOp",
565
+ "WindowTransform",
566
+ "X",
567
+ "X2",
568
+ "X2Datum",
569
+ "X2Value",
570
+ "XDatum",
571
+ "XError",
572
+ "XError2",
573
+ "XError2Value",
574
+ "XErrorValue",
575
+ "XOffset",
576
+ "XOffsetDatum",
577
+ "XOffsetValue",
578
+ "XValue",
579
+ "Y",
580
+ "Y2",
581
+ "Y2Datum",
582
+ "Y2Value",
583
+ "YDatum",
584
+ "YError",
585
+ "YError2",
586
+ "YError2Value",
587
+ "YErrorValue",
588
+ "YOffset",
589
+ "YOffsetDatum",
590
+ "YOffsetValue",
591
+ "YValue",
592
+ "api",
593
+ "binding",
594
+ "binding_checkbox",
595
+ "binding_radio",
596
+ "binding_range",
597
+ "binding_select",
598
+ "channels",
599
+ "check_fields_and_encodings",
600
+ "compiler",
601
+ "concat",
602
+ "condition",
603
+ "core",
604
+ "data",
605
+ "data_transformers",
606
+ "datum",
607
+ "default_data_transformer",
608
+ "display",
609
+ "expr",
610
+ "graticule",
611
+ "hconcat",
612
+ "jupyter",
613
+ "layer",
614
+ "limit_rows",
615
+ "load_ipython_extension",
616
+ "load_schema",
617
+ "mixins",
618
+ "param",
619
+ "parse_shorthand",
620
+ "renderers",
621
+ "repeat",
622
+ "sample",
623
+ "schema",
624
+ "selection_interval",
625
+ "selection_point",
626
+ "sequence",
627
+ "sphere",
628
+ "theme",
629
+ "to_csv",
630
+ "to_json",
631
+ "to_values",
632
+ "topo_feature",
633
+ "typing",
634
+ "utils",
635
+ "v5",
636
+ "value",
637
+ "vconcat",
638
+ "vegalite",
639
+ "vegalite_compilers",
640
+ "when",
641
+ "with_property_setters",
642
+ ]
643
+
644
+
645
+ def __dir__():
646
+ return __all__
647
+
648
+
649
+ from altair.vegalite import *
650
+ from altair.vegalite.v5.schema.core import Dict
651
+ from altair.jupyter import JupyterChart
652
+ from altair.expr import expr
653
+ from altair.utils import AltairDeprecationWarning, parse_shorthand, Undefined
654
+ from altair import typing, theme
655
+
656
+
657
+ def load_ipython_extension(ipython):
658
+ from altair._magics import vegalite
659
+
660
+ ipython.register_magic_function(vegalite, "cell")
661
+
662
+
663
+ def __getattr__(name: str):
664
+ from altair.utils.deprecation import deprecated_warn
665
+
666
+ if name == "themes":
667
+ deprecated_warn(
668
+ "Most cases require only the following change:\n\n"
669
+ " # Deprecated\n"
670
+ " alt.themes.enable('quartz')\n\n"
671
+ " # Updated\n"
672
+ " alt.theme.enable('quartz')\n\n"
673
+ "If your code registers a theme, make the following change:\n\n"
674
+ " # Deprecated\n"
675
+ " def custom_theme():\n"
676
+ " return {'height': 400, 'width': 700}\n"
677
+ " alt.themes.register('theme_name', custom_theme)\n"
678
+ " alt.themes.enable('theme_name')\n\n"
679
+ " # Updated\n"
680
+ " @alt.theme.register('theme_name', enable=True)\n"
681
+ " def custom_theme():\n"
682
+ " return alt.theme.ThemeConfig(\n"
683
+ " {'height': 400, 'width': 700}\n"
684
+ " )\n\n"
685
+ "See the updated User Guide for further details:\n"
686
+ " https://altair-viz.github.io/user_guide/api.html#theme\n"
687
+ " https://altair-viz.github.io/user_guide/customization.html#chart-themes",
688
+ version="5.5.0",
689
+ alternative="altair.theme",
690
+ stacklevel=3,
691
+ action="once",
692
+ )
693
+ return theme._themes
694
+ else:
695
+ msg = f"module {__name__!r} has no attribute {name!r}"
696
+ raise AttributeError(msg)
mgm/lib/python3.10/site-packages/altair/_magics.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Magic functions for rendering vega-lite specifications."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import warnings
7
+ from importlib.util import find_spec
8
+ from typing import Any
9
+
10
+ from IPython.core import magic_arguments
11
+ from narwhals.stable.v1.dependencies import is_pandas_dataframe
12
+
13
+ from altair.vegalite import v5 as vegalite_v5
14
+
15
+ __all__ = ["vegalite"]
16
+
17
+ RENDERERS = {
18
+ "vega-lite": {
19
+ "5": vegalite_v5.VegaLite,
20
+ },
21
+ }
22
+
23
+
24
+ TRANSFORMERS = {
25
+ "vega-lite": {
26
+ "5": vegalite_v5.data_transformers,
27
+ },
28
+ }
29
+
30
+
31
+ def _prepare_data(data, data_transformers):
32
+ """Convert input data to data for use within schema."""
33
+ if data is None or isinstance(data, dict):
34
+ return data
35
+ elif is_pandas_dataframe(data):
36
+ if func := data_transformers.get():
37
+ data = func(data)
38
+ return data
39
+ elif isinstance(data, str):
40
+ return {"url": data}
41
+ else:
42
+ warnings.warn(f"data of type {type(data)} not recognized", stacklevel=1)
43
+ return data
44
+
45
+
46
+ def _get_variable(name: str) -> Any:
47
+ """Get a variable from the notebook namespace."""
48
+ from IPython.core.getipython import get_ipython
49
+
50
+ if ip := get_ipython():
51
+ if name not in ip.user_ns:
52
+ msg = f"argument '{name}' does not match the name of any defined variable"
53
+ raise NameError(msg)
54
+ return ip.user_ns[name]
55
+ else:
56
+ msg = (
57
+ "Magic command must be run within an IPython "
58
+ "environment, in which get_ipython() is defined."
59
+ )
60
+ raise ValueError(msg)
61
+
62
+
63
+ @magic_arguments.magic_arguments()
64
+ @magic_arguments.argument(
65
+ "data",
66
+ nargs="?",
67
+ help="local variablename of a pandas DataFrame to be used as the dataset",
68
+ )
69
+ @magic_arguments.argument("-v", "--version", dest="version", default="v5")
70
+ @magic_arguments.argument("-j", "--json", dest="json", action="store_true")
71
+ def vegalite(line, cell) -> vegalite_v5.VegaLite:
72
+ """
73
+ Cell magic for displaying vega-lite visualizations in CoLab.
74
+
75
+ %%vegalite [dataframe] [--json] [--version='v5']
76
+
77
+ Visualize the contents of the cell using Vega-Lite, optionally
78
+ specifying a pandas DataFrame object to be used as the dataset.
79
+
80
+ if --json is passed, then input is parsed as json rather than yaml.
81
+ """
82
+ args = magic_arguments.parse_argstring(vegalite, line)
83
+ existing_versions = {"v5": "5"}
84
+ version = existing_versions[args.version]
85
+ assert version in RENDERERS["vega-lite"]
86
+ VegaLite = RENDERERS["vega-lite"][version]
87
+ data_transformers = TRANSFORMERS["vega-lite"][version]
88
+
89
+ if args.json:
90
+ spec = json.loads(cell)
91
+ elif not find_spec("yaml"):
92
+ try:
93
+ spec = json.loads(cell)
94
+ except json.JSONDecodeError as err:
95
+ msg = (
96
+ "%%vegalite: spec is not valid JSON. "
97
+ "Install pyyaml to parse spec as yaml"
98
+ )
99
+ raise ValueError(msg) from err
100
+ else:
101
+ import yaml
102
+
103
+ spec = yaml.load(cell, Loader=yaml.SafeLoader)
104
+
105
+ if args.data is not None:
106
+ data = _get_variable(args.data)
107
+ spec["data"] = _prepare_data(data, data_transformers)
108
+
109
+ return VegaLite(spec)
mgm/lib/python3.10/site-packages/altair/jupyter/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import anywidget # noqa: F401
3
+ except ImportError:
4
+ # When anywidget isn't available, create stand-in JupyterChart class
5
+ # that raises an informative import error on construction. This
6
+ # way we can make JupyterChart available in the altair namespace
7
+ # when anywidget is not installed
8
+ class JupyterChart:
9
+ def __init__(self, *args, **kwargs):
10
+ msg = (
11
+ "The Altair JupyterChart requires the anywidget \n"
12
+ "Python package which may be installed using pip with\n"
13
+ " pip install anywidget\n"
14
+ "or using conda with\n"
15
+ " conda install -c conda-forge anywidget\n"
16
+ "Afterwards, you will need to restart your Python kernel."
17
+ )
18
+ raise ImportError(msg)
19
+
20
+ else:
21
+ from .jupyter_chart import JupyterChart # noqa: F401
mgm/lib/python3.10/site-packages/altair/jupyter/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (834 Bytes). View file
 
mgm/lib/python3.10/site-packages/altair/jupyter/__pycache__/jupyter_chart.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
mgm/lib/python3.10/site-packages/altair/jupyter/js/README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # JupyterChart
2
+ This directory contains the JavaScript portion of the Altair `JupyterChart`. The `JupyterChart` is based on the [AnyWidget](https://anywidget.dev/) project.
mgm/lib/python3.10/site-packages/altair/jupyter/js/index.js ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import vegaEmbed from "https://esm.sh/vega-embed@6?deps=vega@5&deps=vega-lite@5.20.1";
2
+ import lodashDebounce from "https://esm.sh/lodash-es@4.17.21/debounce";
3
+
4
+ // Note: For offline support, the import lines above are removed and the remaining script
5
+ // is bundled using vl-convert's javascript_bundle function. See the documentation of
6
+ // the javascript_bundle function for details on the available imports and their names.
7
+ // If an additional import is required in the future, it will need to be added to vl-convert
8
+ // in order to preserve offline support.
9
+ async function render({ model, el }) {
10
+ let finalize;
11
+
12
+ function showError(error){
13
+ el.innerHTML = (
14
+ '<div style="color:red;">'
15
+ + '<p>JavaScript Error: ' + error.message + '</p>'
16
+ + "<p>This usually means there's a typo in your chart specification. "
17
+ + "See the javascript console for the full traceback.</p>"
18
+ + '</div>'
19
+ );
20
+ }
21
+
22
+ const reembed = async () => {
23
+ if (finalize != null) {
24
+ finalize();
25
+ }
26
+
27
+ model.set("local_tz", Intl.DateTimeFormat().resolvedOptions().timeZone);
28
+
29
+ let spec = structuredClone(model.get("spec"));
30
+ if (spec == null) {
31
+ // Remove any existing chart and return
32
+ while (el.firstChild) {
33
+ el.removeChild(el.lastChild);
34
+ }
35
+ model.save_changes();
36
+ return;
37
+ }
38
+ let embedOptions = structuredClone(model.get("embed_options")) ?? undefined;
39
+
40
+ let api;
41
+ try {
42
+ api = await vegaEmbed(el, spec, embedOptions);
43
+ } catch (error) {
44
+ showError(error)
45
+ return;
46
+ }
47
+
48
+ finalize = api.finalize;
49
+
50
+ // Debounce config
51
+ const wait = model.get("debounce_wait") ?? 10;
52
+ const debounceOpts = {leading: false, trailing: true};
53
+ if (model.get("max_wait") ?? true) {
54
+ debounceOpts["maxWait"] = wait;
55
+ }
56
+
57
+ const initialSelections = {};
58
+ for (const selectionName of Object.keys(model.get("_vl_selections"))) {
59
+ const storeName = `${selectionName}_store`;
60
+ const selectionHandler = (_, value) => {
61
+ const newSelections = cleanJson(model.get("_vl_selections") ?? {});
62
+ const store = cleanJson(api.view.data(storeName) ?? []);
63
+
64
+ newSelections[selectionName] = {value, store};
65
+ model.set("_vl_selections", newSelections);
66
+ model.save_changes();
67
+ };
68
+ api.view.addSignalListener(selectionName, lodashDebounce(selectionHandler, wait, debounceOpts));
69
+
70
+ initialSelections[selectionName] = {
71
+ value: cleanJson(api.view.signal(selectionName) ?? {}),
72
+ store: cleanJson(api.view.data(storeName) ?? [])
73
+ }
74
+ }
75
+ model.set("_vl_selections", initialSelections);
76
+
77
+ const initialParams = {};
78
+ for (const paramName of Object.keys(model.get("_params"))) {
79
+ const paramHandler = (_, value) => {
80
+ const newParams = JSON.parse(JSON.stringify(model.get("_params"))) || {};
81
+ newParams[paramName] = value;
82
+ model.set("_params", newParams);
83
+ model.save_changes();
84
+ };
85
+ api.view.addSignalListener(paramName, lodashDebounce(paramHandler, wait, debounceOpts));
86
+
87
+ initialParams[paramName] = api.view.signal(paramName) ?? null
88
+ }
89
+ model.set("_params", initialParams);
90
+ model.save_changes();
91
+
92
+ // Param change callback
93
+ model.on('change:_params', async (new_params) => {
94
+ for (const [param, value] of Object.entries(new_params.changed._params)) {
95
+ api.view.signal(param, value);
96
+ }
97
+ await api.view.runAsync();
98
+ });
99
+
100
+ // Add signal/data listeners
101
+ for (const watch of model.get("_js_watch_plan") ?? []) {
102
+ if (watch.namespace === "data") {
103
+ const dataHandler = (_, value) => {
104
+ model.set("_js_to_py_updates", [{
105
+ namespace: "data",
106
+ name: watch.name,
107
+ scope: watch.scope,
108
+ value: cleanJson(value)
109
+ }]);
110
+ model.save_changes();
111
+ };
112
+ addDataListener(api.view, watch.name, watch.scope, lodashDebounce(dataHandler, wait, debounceOpts))
113
+
114
+ } else if (watch.namespace === "signal") {
115
+ const signalHandler = (_, value) => {
116
+ model.set("_js_to_py_updates", [{
117
+ namespace: "signal",
118
+ name: watch.name,
119
+ scope: watch.scope,
120
+ value: cleanJson(value)
121
+ }]);
122
+ model.save_changes();
123
+ };
124
+
125
+ addSignalListener(api.view, watch.name, watch.scope, lodashDebounce(signalHandler, wait, debounceOpts))
126
+ }
127
+ }
128
+
129
+ // Add signal/data updaters
130
+ model.on('change:_py_to_js_updates', async (updates) => {
131
+ for (const update of updates.changed._py_to_js_updates ?? []) {
132
+ if (update.namespace === "signal") {
133
+ setSignalValue(api.view, update.name, update.scope, update.value);
134
+ } else if (update.namespace === "data") {
135
+ setDataValue(api.view, update.name, update.scope, update.value);
136
+ }
137
+ }
138
+ await api.view.runAsync();
139
+ });
140
+ }
141
+
142
+ model.on('change:spec', reembed);
143
+ model.on('change:embed_options', reembed);
144
+ model.on('change:debounce_wait', reembed);
145
+ model.on('change:max_wait', reembed);
146
+ await reembed();
147
+ }
148
+
149
+ function cleanJson(data) {
150
+ return JSON.parse(JSON.stringify(data))
151
+ }
152
+
153
+ function getNestedRuntime(view, scope) {
154
+ var runtime = view._runtime;
155
+ for (const index of scope) {
156
+ runtime = runtime.subcontext[index];
157
+ }
158
+ return runtime
159
+ }
160
+
161
+ function lookupSignalOp(view, name, scope) {
162
+ let parent_runtime = getNestedRuntime(view, scope);
163
+ return parent_runtime.signals[name] ?? null;
164
+ }
165
+
166
+ function dataRef(view, name, scope) {
167
+ let parent_runtime = getNestedRuntime(view, scope);
168
+ return parent_runtime.data[name];
169
+ }
170
+
171
+ export function setSignalValue(view, name, scope, value) {
172
+ let signal_op = lookupSignalOp(view, name, scope);
173
+ view.update(signal_op, value);
174
+ }
175
+
176
+ export function setDataValue(view, name, scope, value) {
177
+ let dataset = dataRef(view, name, scope);
178
+ let changeset = view.changeset().remove(() => true).insert(value)
179
+ dataset.modified = true;
180
+ view.pulse(dataset.input, changeset);
181
+ }
182
+
183
+ export function addSignalListener(view, name, scope, handler) {
184
+ let signal_op = lookupSignalOp(view, name, scope);
185
+ return addOperatorListener(
186
+ view,
187
+ name,
188
+ signal_op,
189
+ handler,
190
+ );
191
+ }
192
+
193
+ export function addDataListener(view, name, scope, handler) {
194
+ let dataset = dataRef(view, name, scope).values;
195
+ return addOperatorListener(
196
+ view,
197
+ name,
198
+ dataset,
199
+ handler,
200
+ );
201
+ }
202
+
203
+ // Private helpers from Vega for dealing with nested signals/data
204
+ function findOperatorHandler(op, handler) {
205
+ const h = (op._targets || [])
206
+ .filter(op => op._update && op._update.handler === handler);
207
+ return h.length ? h[0] : null;
208
+ }
209
+
210
+ function addOperatorListener(view, name, op, handler) {
211
+ let h = findOperatorHandler(op, handler);
212
+ if (!h) {
213
+ h = trap(view, () => handler(name, op.value));
214
+ h.handler = handler;
215
+ view.on(op, null, h);
216
+ }
217
+ return view;
218
+ }
219
+
220
+ function trap(view, fn) {
221
+ return !fn ? null : function() {
222
+ try {
223
+ fn.apply(this, arguments);
224
+ } catch (error) {
225
+ view.error(error);
226
+ }
227
+ };
228
+ }
229
+
230
+ export default { render }
mgm/lib/python3.10/site-packages/altair/jupyter/jupyter_chart.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import pathlib
5
+ from typing import Any
6
+
7
+ import anywidget
8
+ import traitlets
9
+
10
+ import altair as alt
11
+ from altair import TopLevelSpec
12
+ from altair.utils._vegafusion_data import (
13
+ compile_to_vegafusion_chart_state,
14
+ using_vegafusion,
15
+ )
16
+ from altair.utils.selection import IndexSelection, IntervalSelection, PointSelection
17
+
18
+ _here = pathlib.Path(__file__).parent
19
+
20
+
21
+ class Params(traitlets.HasTraits):
22
+ """Traitlet class storing a JupyterChart's params."""
23
+
24
+ def __init__(self, trait_values):
25
+ super().__init__()
26
+
27
+ for key, value in trait_values.items():
28
+ if isinstance(value, (int, float)):
29
+ traitlet_type = traitlets.Float()
30
+ elif isinstance(value, str):
31
+ traitlet_type = traitlets.Unicode()
32
+ elif isinstance(value, list):
33
+ traitlet_type = traitlets.List()
34
+ elif isinstance(value, dict):
35
+ traitlet_type = traitlets.Dict()
36
+ else:
37
+ traitlet_type = traitlets.Any()
38
+
39
+ # Add the new trait.
40
+ self.add_traits(**{key: traitlet_type})
41
+
42
+ # Set the trait's value.
43
+ setattr(self, key, value)
44
+
45
+ def __repr__(self):
46
+ return f"Params({self.trait_values()})"
47
+
48
+
49
+ class Selections(traitlets.HasTraits):
50
+ """Traitlet class storing a JupyterChart's selections."""
51
+
52
+ def __init__(self, trait_values):
53
+ super().__init__()
54
+
55
+ for key, value in trait_values.items():
56
+ if isinstance(value, IndexSelection):
57
+ traitlet_type = traitlets.Instance(IndexSelection)
58
+ elif isinstance(value, PointSelection):
59
+ traitlet_type = traitlets.Instance(PointSelection)
60
+ elif isinstance(value, IntervalSelection):
61
+ traitlet_type = traitlets.Instance(IntervalSelection)
62
+ else:
63
+ msg = f"Unexpected selection type: {type(value)}"
64
+ raise ValueError(msg)
65
+
66
+ # Add the new trait.
67
+ self.add_traits(**{key: traitlet_type})
68
+
69
+ # Set the trait's value.
70
+ setattr(self, key, value)
71
+
72
+ # Make read-only
73
+ self.observe(self._make_read_only, names=key)
74
+
75
+ def __repr__(self):
76
+ return f"Selections({self.trait_values()})"
77
+
78
+ def _make_read_only(self, change):
79
+ """Work around to make traits read-only, but still allow us to change them internally."""
80
+ if change["name"] in self.traits() and change["old"] != change["new"]:
81
+ self._set_value(change["name"], change["old"])
82
+ msg = (
83
+ "Selections may not be set from Python.\n"
84
+ f"Attempted to set select: {change['name']}"
85
+ )
86
+ raise ValueError(msg)
87
+
88
+ def _set_value(self, key, value):
89
+ self.unobserve(self._make_read_only, names=key)
90
+ setattr(self, key, value)
91
+ self.observe(self._make_read_only, names=key)
92
+
93
+
94
+ def load_js_src() -> str:
95
+ return (_here / "js" / "index.js").read_text()
96
+
97
+
98
+ class JupyterChart(anywidget.AnyWidget):
99
+ _esm = load_js_src()
100
+ _css = r"""
101
+ .vega-embed {
102
+ /* Make sure action menu isn't cut off */
103
+ overflow: visible;
104
+ }
105
+ """
106
+
107
+ # Public traitlets
108
+ chart = traitlets.Instance(TopLevelSpec, allow_none=True)
109
+ spec = traitlets.Dict(allow_none=True).tag(sync=True)
110
+ debounce_wait = traitlets.Float(default_value=10).tag(sync=True)
111
+ max_wait = traitlets.Bool(default_value=True).tag(sync=True)
112
+ local_tz = traitlets.Unicode(default_value=None, allow_none=True).tag(sync=True)
113
+ debug = traitlets.Bool(default_value=False)
114
+ embed_options = traitlets.Dict(default_value=None, allow_none=True).tag(sync=True)
115
+
116
+ # Internal selection traitlets
117
+ _selection_types = traitlets.Dict()
118
+ _vl_selections = traitlets.Dict().tag(sync=True)
119
+
120
+ # Internal param traitlets
121
+ _params = traitlets.Dict().tag(sync=True)
122
+
123
+ # Internal comm traitlets for VegaFusion support
124
+ _chart_state = traitlets.Any(allow_none=True)
125
+ _js_watch_plan = traitlets.Any(allow_none=True).tag(sync=True)
126
+ _js_to_py_updates = traitlets.Any(allow_none=True).tag(sync=True)
127
+ _py_to_js_updates = traitlets.Any(allow_none=True).tag(sync=True)
128
+
129
+ # Track whether charts are configured for offline use
130
+ _is_offline = False
131
+
132
+ @classmethod
133
+ def enable_offline(cls, offline: bool = True):
134
+ """
135
+ Configure JupyterChart's offline behavior.
136
+
137
+ Parameters
138
+ ----------
139
+ offline: bool
140
+ If True, configure JupyterChart to operate in offline mode where JavaScript
141
+ dependencies are loaded from vl-convert.
142
+ If False, configure it to operate in online mode where JavaScript dependencies
143
+ are loaded from CDN dynamically. This is the default behavior.
144
+ """
145
+ from altair.utils._importers import import_vl_convert, vl_version_for_vl_convert
146
+
147
+ if offline:
148
+ if cls._is_offline:
149
+ # Already offline
150
+ return
151
+
152
+ vlc = import_vl_convert()
153
+
154
+ src_lines = load_js_src().split("\n")
155
+
156
+ # Remove leading lines with only whitespace, comments, or imports
157
+ while src_lines and (
158
+ len(src_lines[0].strip()) == 0
159
+ or src_lines[0].startswith("import")
160
+ or src_lines[0].startswith("//")
161
+ ):
162
+ src_lines.pop(0)
163
+
164
+ src = "\n".join(src_lines)
165
+
166
+ # vl-convert's javascript_bundle function creates a self-contained JavaScript bundle
167
+ # for JavaScript snippets that import from a small set of dependencies that
168
+ # vl-convert includes. To see the available imports and their imported names, run
169
+ # import vl_convert as vlc
170
+ # help(vlc.javascript_bundle)
171
+ bundled_src = vlc.javascript_bundle(
172
+ src, vl_version=vl_version_for_vl_convert()
173
+ )
174
+ cls._esm = bundled_src
175
+ cls._is_offline = True
176
+ else:
177
+ cls._esm = load_js_src()
178
+ cls._is_offline = False
179
+
180
+ def __init__(
181
+ self,
182
+ chart: TopLevelSpec,
183
+ debounce_wait: int = 10,
184
+ max_wait: bool = True,
185
+ debug: bool = False,
186
+ embed_options: dict | None = None,
187
+ **kwargs: Any,
188
+ ):
189
+ """
190
+ Jupyter Widget for displaying and updating Altair Charts, and retrieving selection and parameter values.
191
+
192
+ Parameters
193
+ ----------
194
+ chart: Chart
195
+ Altair Chart instance
196
+ debounce_wait: int
197
+ Debouncing wait time in milliseconds. Updates will be sent from the client to the kernel
198
+ after debounce_wait milliseconds of no chart interactions.
199
+ max_wait: bool
200
+ If True (default), updates will be sent from the client to the kernel every debounce_wait
201
+ milliseconds even if there are ongoing chart interactions. If False, updates will not be
202
+ sent until chart interactions have completed.
203
+ debug: bool
204
+ If True, debug messages will be printed
205
+ embed_options: dict
206
+ Options to pass to vega-embed.
207
+ See https://github.com/vega/vega-embed?tab=readme-ov-file#options
208
+ """
209
+ self.params = Params({})
210
+ self.selections = Selections({})
211
+ super().__init__(
212
+ chart=chart,
213
+ debounce_wait=debounce_wait,
214
+ max_wait=max_wait,
215
+ debug=debug,
216
+ embed_options=embed_options,
217
+ **kwargs,
218
+ )
219
+
220
+ @traitlets.observe("chart")
221
+ def _on_change_chart(self, change): # noqa: C901
222
+ """Updates the JupyterChart's internal state when the wrapped Chart instance changes."""
223
+ new_chart = change.new
224
+ selection_watches = []
225
+ selection_types = {}
226
+ initial_params = {}
227
+ initial_vl_selections = {}
228
+ empty_selections = {}
229
+
230
+ if new_chart is None:
231
+ with self.hold_sync():
232
+ self.spec = None
233
+ self._selection_types = selection_types
234
+ self._vl_selections = initial_vl_selections
235
+ self._params = initial_params
236
+ return
237
+
238
+ params = getattr(new_chart, "params", [])
239
+
240
+ if params is not alt.Undefined:
241
+ for param in new_chart.params:
242
+ if isinstance(param.name, alt.ParameterName):
243
+ clean_name = param.name.to_json().strip('"')
244
+ else:
245
+ clean_name = param.name
246
+
247
+ select = getattr(param, "select", alt.Undefined)
248
+
249
+ if select != alt.Undefined:
250
+ if not isinstance(select, dict):
251
+ select = select.to_dict()
252
+
253
+ select_type = select["type"]
254
+ if select_type == "point":
255
+ if not (
256
+ select.get("fields", None) or select.get("encodings", None)
257
+ ):
258
+ # Point selection with no associated fields or encodings specified.
259
+ # This is an index-based selection
260
+ selection_types[clean_name] = "index"
261
+ empty_selections[clean_name] = IndexSelection(
262
+ name=clean_name, value=[], store=[]
263
+ )
264
+ else:
265
+ selection_types[clean_name] = "point"
266
+ empty_selections[clean_name] = PointSelection(
267
+ name=clean_name, value=[], store=[]
268
+ )
269
+ elif select_type == "interval":
270
+ selection_types[clean_name] = "interval"
271
+ empty_selections[clean_name] = IntervalSelection(
272
+ name=clean_name, value={}, store=[]
273
+ )
274
+ else:
275
+ msg = f"Unexpected selection type {select.type}"
276
+ raise ValueError(msg)
277
+ selection_watches.append(clean_name)
278
+ initial_vl_selections[clean_name] = {"value": None, "store": []}
279
+ else:
280
+ clean_value = param.value if param.value != alt.Undefined else None
281
+ initial_params[clean_name] = clean_value
282
+
283
+ # Handle the params generated by transforms
284
+ for param_name in collect_transform_params(new_chart):
285
+ initial_params[param_name] = None
286
+
287
+ # Setup params
288
+ self.params = Params(initial_params)
289
+
290
+ def on_param_traitlet_changed(param_change):
291
+ new_params = dict(self._params)
292
+ new_params[param_change["name"]] = param_change["new"]
293
+ self._params = new_params
294
+
295
+ self.params.observe(on_param_traitlet_changed)
296
+
297
+ # Setup selections
298
+ self.selections = Selections(empty_selections)
299
+
300
+ # Update properties all together
301
+ with self.hold_sync():
302
+ if using_vegafusion():
303
+ if self.local_tz is None:
304
+ self.spec = None
305
+
306
+ def on_local_tz_change(change):
307
+ self._init_with_vegafusion(change["new"])
308
+
309
+ self.observe(on_local_tz_change, ["local_tz"])
310
+ else:
311
+ self._init_with_vegafusion(self.local_tz)
312
+ else:
313
+ self.spec = new_chart.to_dict()
314
+ self._selection_types = selection_types
315
+ self._vl_selections = initial_vl_selections
316
+ self._params = initial_params
317
+
318
+ def _init_with_vegafusion(self, local_tz: str):
319
+ if self.chart is not None:
320
+ vegalite_spec = self.chart.to_dict(context={"pre_transform": False})
321
+ with self.hold_sync():
322
+ self._chart_state = compile_to_vegafusion_chart_state(
323
+ vegalite_spec, local_tz
324
+ )
325
+ self._js_watch_plan = self._chart_state.get_watch_plan()[
326
+ "client_to_server"
327
+ ]
328
+ self.spec = self._chart_state.get_transformed_spec()
329
+
330
+ # Callback to update chart state and send updates back to client
331
+ def on_js_to_py_updates(change):
332
+ if self.debug:
333
+ updates_str = json.dumps(change["new"], indent=2)
334
+ print(
335
+ f"JavaScript to Python VegaFusion updates:\n {updates_str}"
336
+ )
337
+ updates = self._chart_state.update(change["new"])
338
+ if self.debug:
339
+ updates_str = json.dumps(updates, indent=2)
340
+ print(
341
+ f"Python to JavaScript VegaFusion updates:\n {updates_str}"
342
+ )
343
+ self._py_to_js_updates = updates
344
+
345
+ self.observe(on_js_to_py_updates, ["_js_to_py_updates"])
346
+
347
+ @traitlets.observe("_params")
348
+ def _on_change_params(self, change):
349
+ for param_name, value in change.new.items():
350
+ setattr(self.params, param_name, value)
351
+
352
+ @traitlets.observe("_vl_selections")
353
+ def _on_change_selections(self, change):
354
+ """Updates the JupyterChart's public selections traitlet in response to changes that the JavaScript logic makes to the internal _selections traitlet."""
355
+ for selection_name, selection_dict in change.new.items():
356
+ value = selection_dict["value"]
357
+ store = selection_dict["store"]
358
+ selection_type = self._selection_types[selection_name]
359
+ if selection_type == "index":
360
+ self.selections._set_value(
361
+ selection_name,
362
+ IndexSelection.from_vega(selection_name, signal=value, store=store),
363
+ )
364
+ elif selection_type == "point":
365
+ self.selections._set_value(
366
+ selection_name,
367
+ PointSelection.from_vega(selection_name, signal=value, store=store),
368
+ )
369
+ elif selection_type == "interval":
370
+ self.selections._set_value(
371
+ selection_name,
372
+ IntervalSelection.from_vega(
373
+ selection_name, signal=value, store=store
374
+ ),
375
+ )
376
+
377
+
378
+ def collect_transform_params(chart: TopLevelSpec) -> set[str]:
379
+ """
380
+ Collect the names of params that are defined by transforms.
381
+
382
+ Parameters
383
+ ----------
384
+ chart: Chart from which to extract transform params
385
+
386
+ Returns
387
+ -------
388
+ set of param names
389
+ """
390
+ transform_params = set()
391
+
392
+ # Handle recursive case
393
+ for prop in ("layer", "concat", "hconcat", "vconcat"):
394
+ for child in getattr(chart, prop, []):
395
+ transform_params.update(collect_transform_params(child))
396
+
397
+ # Handle chart's own transforms
398
+ transforms = getattr(chart, "transform", [])
399
+ transforms = transforms if transforms != alt.Undefined else []
400
+ for tx in transforms:
401
+ if hasattr(tx, "param"):
402
+ transform_params.add(tx.param)
403
+
404
+ return transform_params
mgm/lib/python3.10/site-packages/altair/py.typed ADDED
File without changes
mgm/lib/python3.10/site-packages/altair/theme.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Customizing chart configuration defaults."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from functools import wraps as _wraps
6
+ from typing import TYPE_CHECKING, Any
7
+ from typing import overload as _overload
8
+
9
+ from altair.vegalite.v5.schema._config import (
10
+ AreaConfigKwds,
11
+ AutoSizeParamsKwds,
12
+ AxisConfigKwds,
13
+ AxisResolveMapKwds,
14
+ BarConfigKwds,
15
+ BindCheckboxKwds,
16
+ BindDirectKwds,
17
+ BindInputKwds,
18
+ BindRadioSelectKwds,
19
+ BindRangeKwds,
20
+ BoxPlotConfigKwds,
21
+ BrushConfigKwds,
22
+ CompositionConfigKwds,
23
+ ConfigKwds,
24
+ DateTimeKwds,
25
+ DerivedStreamKwds,
26
+ ErrorBandConfigKwds,
27
+ ErrorBarConfigKwds,
28
+ FeatureGeometryGeoJsonPropertiesKwds,
29
+ FormatConfigKwds,
30
+ GeoJsonFeatureCollectionKwds,
31
+ GeoJsonFeatureKwds,
32
+ GeometryCollectionKwds,
33
+ GradientStopKwds,
34
+ HeaderConfigKwds,
35
+ IntervalSelectionConfigKwds,
36
+ IntervalSelectionConfigWithoutTypeKwds,
37
+ LegendConfigKwds,
38
+ LegendResolveMapKwds,
39
+ LegendStreamBindingKwds,
40
+ LinearGradientKwds,
41
+ LineConfigKwds,
42
+ LineStringKwds,
43
+ LocaleKwds,
44
+ MarkConfigKwds,
45
+ MergedStreamKwds,
46
+ MultiLineStringKwds,
47
+ MultiPointKwds,
48
+ MultiPolygonKwds,
49
+ NumberLocaleKwds,
50
+ OverlayMarkDefKwds,
51
+ PaddingKwds,
52
+ PointKwds,
53
+ PointSelectionConfigKwds,
54
+ PointSelectionConfigWithoutTypeKwds,
55
+ PolygonKwds,
56
+ ProjectionConfigKwds,
57
+ ProjectionKwds,
58
+ RadialGradientKwds,
59
+ RangeConfigKwds,
60
+ RectConfigKwds,
61
+ ResolveKwds,
62
+ RowColKwds,
63
+ ScaleConfigKwds,
64
+ ScaleInvalidDataConfigKwds,
65
+ ScaleResolveMapKwds,
66
+ SelectionConfigKwds,
67
+ StepKwds,
68
+ StyleConfigIndexKwds,
69
+ ThemeConfig,
70
+ TickConfigKwds,
71
+ TimeIntervalStepKwds,
72
+ TimeLocaleKwds,
73
+ TitleConfigKwds,
74
+ TitleParamsKwds,
75
+ TooltipContentKwds,
76
+ TopLevelSelectionParameterKwds,
77
+ VariableParameterKwds,
78
+ ViewBackgroundKwds,
79
+ ViewConfigKwds,
80
+ )
81
+ from altair.vegalite.v5.theme import themes as _themes
82
+
83
+ if TYPE_CHECKING:
84
+ import sys
85
+ from typing import Any, Callable, Literal
86
+
87
+ if sys.version_info >= (3, 11):
88
+ from typing import LiteralString
89
+ else:
90
+ from typing_extensions import LiteralString
91
+ if sys.version_info >= (3, 10):
92
+ from typing import ParamSpec
93
+ else:
94
+ from typing_extensions import ParamSpec
95
+
96
+ from altair.utils.plugin_registry import Plugin
97
+
98
+ P = ParamSpec("P")
99
+
100
+ __all__ = [
101
+ "AreaConfigKwds",
102
+ "AutoSizeParamsKwds",
103
+ "AxisConfigKwds",
104
+ "AxisResolveMapKwds",
105
+ "BarConfigKwds",
106
+ "BindCheckboxKwds",
107
+ "BindDirectKwds",
108
+ "BindInputKwds",
109
+ "BindRadioSelectKwds",
110
+ "BindRangeKwds",
111
+ "BoxPlotConfigKwds",
112
+ "BrushConfigKwds",
113
+ "CompositionConfigKwds",
114
+ "ConfigKwds",
115
+ "DateTimeKwds",
116
+ "DerivedStreamKwds",
117
+ "ErrorBandConfigKwds",
118
+ "ErrorBarConfigKwds",
119
+ "FeatureGeometryGeoJsonPropertiesKwds",
120
+ "FormatConfigKwds",
121
+ "GeoJsonFeatureCollectionKwds",
122
+ "GeoJsonFeatureKwds",
123
+ "GeometryCollectionKwds",
124
+ "GradientStopKwds",
125
+ "HeaderConfigKwds",
126
+ "IntervalSelectionConfigKwds",
127
+ "IntervalSelectionConfigWithoutTypeKwds",
128
+ "LegendConfigKwds",
129
+ "LegendResolveMapKwds",
130
+ "LegendStreamBindingKwds",
131
+ "LineConfigKwds",
132
+ "LineStringKwds",
133
+ "LinearGradientKwds",
134
+ "LocaleKwds",
135
+ "MarkConfigKwds",
136
+ "MergedStreamKwds",
137
+ "MultiLineStringKwds",
138
+ "MultiPointKwds",
139
+ "MultiPolygonKwds",
140
+ "NumberLocaleKwds",
141
+ "OverlayMarkDefKwds",
142
+ "PaddingKwds",
143
+ "PointKwds",
144
+ "PointSelectionConfigKwds",
145
+ "PointSelectionConfigWithoutTypeKwds",
146
+ "PolygonKwds",
147
+ "ProjectionConfigKwds",
148
+ "ProjectionKwds",
149
+ "RadialGradientKwds",
150
+ "RangeConfigKwds",
151
+ "RectConfigKwds",
152
+ "ResolveKwds",
153
+ "RowColKwds",
154
+ "ScaleConfigKwds",
155
+ "ScaleInvalidDataConfigKwds",
156
+ "ScaleResolveMapKwds",
157
+ "SelectionConfigKwds",
158
+ "StepKwds",
159
+ "StyleConfigIndexKwds",
160
+ "ThemeConfig",
161
+ "TickConfigKwds",
162
+ "TimeIntervalStepKwds",
163
+ "TimeLocaleKwds",
164
+ "TitleConfigKwds",
165
+ "TitleParamsKwds",
166
+ "TooltipContentKwds",
167
+ "TopLevelSelectionParameterKwds",
168
+ "VariableParameterKwds",
169
+ "ViewBackgroundKwds",
170
+ "ViewConfigKwds",
171
+ "active",
172
+ "enable",
173
+ "get",
174
+ "names",
175
+ "options",
176
+ "register",
177
+ "unregister",
178
+ ]
179
+
180
+
181
+ def register(
182
+ name: LiteralString, *, enable: bool
183
+ ) -> Callable[[Plugin[ThemeConfig]], Plugin[ThemeConfig]]:
184
+ """
185
+ Decorator for registering a theme function.
186
+
187
+ Parameters
188
+ ----------
189
+ name
190
+ Unique name assigned in registry.
191
+ enable
192
+ Auto-enable the wrapped theme.
193
+
194
+ Examples
195
+ --------
196
+ Register and enable a theme::
197
+
198
+ import altair as alt
199
+ from altair import theme
200
+
201
+
202
+ @theme.register("param_font_size", enable=True)
203
+ def custom_theme() -> theme.ThemeConfig:
204
+ sizes = 12, 14, 16, 18, 20
205
+ return {
206
+ "autosize": {"contains": "content", "resize": True},
207
+ "background": "#F3F2F1",
208
+ "config": {
209
+ "axisX": {"labelFontSize": sizes[1], "titleFontSize": sizes[1]},
210
+ "axisY": {"labelFontSize": sizes[1], "titleFontSize": sizes[1]},
211
+ "font": "'Lato', 'Segoe UI', Tahoma, Verdana, sans-serif",
212
+ "headerColumn": {"labelFontSize": sizes[1]},
213
+ "headerFacet": {"labelFontSize": sizes[1]},
214
+ "headerRow": {"labelFontSize": sizes[1]},
215
+ "legend": {"labelFontSize": sizes[0], "titleFontSize": sizes[1]},
216
+ "text": {"fontSize": sizes[0]},
217
+ "title": {"fontSize": sizes[-1]},
218
+ },
219
+ "height": {"step": 28},
220
+ "width": 350,
221
+ }
222
+
223
+ We can then see the ``name`` parameter displayed when checking::
224
+
225
+ theme.active
226
+ "param_font_size"
227
+
228
+ Until another theme has been enabled, all charts will use defaults set in ``custom_theme()``::
229
+
230
+ from vega_datasets import data
231
+
232
+ source = data.stocks()
233
+ lines = (
234
+ alt.Chart(source, title=alt.Title("Stocks"))
235
+ .mark_line()
236
+ .encode(x="date:T", y="price:Q", color="symbol:N")
237
+ )
238
+ lines.interactive(bind_y=False)
239
+
240
+ """
241
+
242
+ # HACK: See for `LiteralString` requirement in `name`
243
+ # https://github.com/vega/altair/pull/3526#discussion_r1743350127
244
+ def decorate(func: Plugin[ThemeConfig], /) -> Plugin[ThemeConfig]:
245
+ _register(name, func)
246
+ if enable:
247
+ _themes.enable(name)
248
+
249
+ @_wraps(func)
250
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> ThemeConfig:
251
+ return func(*args, **kwargs)
252
+
253
+ return wrapper
254
+
255
+ return decorate
256
+
257
+
258
+ def unregister(name: LiteralString) -> Plugin[ThemeConfig]:
259
+ """
260
+ Remove and return a previously registered theme.
261
+
262
+ Parameters
263
+ ----------
264
+ name
265
+ Unique name assigned during ``alt.theme.register``.
266
+
267
+ Raises
268
+ ------
269
+ TypeError
270
+ When ``name`` has not been registered.
271
+ """
272
+ plugin = _register(name, None)
273
+ if plugin is None:
274
+ msg = (
275
+ f"Found no theme named {name!r} in registry.\n"
276
+ f"Registered themes:\n"
277
+ f"{names()!r}"
278
+ )
279
+ raise TypeError(msg)
280
+ else:
281
+ return plugin
282
+
283
+
284
+ enable = _themes.enable
285
+ get = _themes.get
286
+ names = _themes.names
287
+ active: str
288
+ """Return the name of the currently active theme."""
289
+ options: dict[str, Any]
290
+ """Return the current themes options dictionary."""
291
+
292
+
293
+ def __dir__() -> list[str]:
294
+ return __all__
295
+
296
+
297
+ @_overload
298
+ def __getattr__(name: Literal["active"]) -> str: ... # type: ignore[misc]
299
+ @_overload
300
+ def __getattr__(name: Literal["options"]) -> dict[str, Any]: ... # type: ignore[misc]
301
+ def __getattr__(name: str) -> Any:
302
+ if name == "active":
303
+ return _themes.active
304
+ elif name == "options":
305
+ return _themes.options
306
+ else:
307
+ msg = f"module {__name__!r} has no attribute {name!r}"
308
+ raise AttributeError(msg)
309
+
310
+
311
+ def _register(
312
+ name: LiteralString, fn: Plugin[ThemeConfig] | None, /
313
+ ) -> Plugin[ThemeConfig] | None:
314
+ if fn is None:
315
+ return _themes._plugins.pop(name, None)
316
+ elif _themes.plugin_type(fn):
317
+ _themes._plugins[name] = fn
318
+ return fn
319
+ else:
320
+ msg = f"{type(fn).__name__!r} is not a callable theme\n\n{fn!r}"
321
+ raise TypeError(msg)
mgm/lib/python3.10/site-packages/altair/typing/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Public types to ease integrating with `altair`."""
2
+
3
+ from __future__ import annotations
4
+
5
+ __all__ = [
6
+ "ChannelAngle",
7
+ "ChannelColor",
8
+ "ChannelColumn",
9
+ "ChannelDescription",
10
+ "ChannelDetail",
11
+ "ChannelFacet",
12
+ "ChannelFill",
13
+ "ChannelFillOpacity",
14
+ "ChannelHref",
15
+ "ChannelKey",
16
+ "ChannelLatitude",
17
+ "ChannelLatitude2",
18
+ "ChannelLongitude",
19
+ "ChannelLongitude2",
20
+ "ChannelOpacity",
21
+ "ChannelOrder",
22
+ "ChannelRadius",
23
+ "ChannelRadius2",
24
+ "ChannelRow",
25
+ "ChannelShape",
26
+ "ChannelSize",
27
+ "ChannelStroke",
28
+ "ChannelStrokeDash",
29
+ "ChannelStrokeOpacity",
30
+ "ChannelStrokeWidth",
31
+ "ChannelText",
32
+ "ChannelTheta",
33
+ "ChannelTheta2",
34
+ "ChannelTooltip",
35
+ "ChannelUrl",
36
+ "ChannelX",
37
+ "ChannelX2",
38
+ "ChannelXError",
39
+ "ChannelXError2",
40
+ "ChannelXOffset",
41
+ "ChannelY",
42
+ "ChannelY2",
43
+ "ChannelYError",
44
+ "ChannelYError2",
45
+ "ChannelYOffset",
46
+ "ChartType",
47
+ "EncodeKwds",
48
+ "Optional",
49
+ "is_chart_type",
50
+ ]
51
+
52
+ from altair.utils.schemapi import Optional
53
+ from altair.vegalite.v5.api import ChartType, is_chart_type
54
+ from altair.vegalite.v5.schema.channels import (
55
+ ChannelAngle,
56
+ ChannelColor,
57
+ ChannelColumn,
58
+ ChannelDescription,
59
+ ChannelDetail,
60
+ ChannelFacet,
61
+ ChannelFill,
62
+ ChannelFillOpacity,
63
+ ChannelHref,
64
+ ChannelKey,
65
+ ChannelLatitude,
66
+ ChannelLatitude2,
67
+ ChannelLongitude,
68
+ ChannelLongitude2,
69
+ ChannelOpacity,
70
+ ChannelOrder,
71
+ ChannelRadius,
72
+ ChannelRadius2,
73
+ ChannelRow,
74
+ ChannelShape,
75
+ ChannelSize,
76
+ ChannelStroke,
77
+ ChannelStrokeDash,
78
+ ChannelStrokeOpacity,
79
+ ChannelStrokeWidth,
80
+ ChannelText,
81
+ ChannelTheta,
82
+ ChannelTheta2,
83
+ ChannelTooltip,
84
+ ChannelUrl,
85
+ ChannelX,
86
+ ChannelX2,
87
+ ChannelXError,
88
+ ChannelXError2,
89
+ ChannelXOffset,
90
+ ChannelY,
91
+ ChannelY2,
92
+ ChannelYError,
93
+ ChannelYError2,
94
+ ChannelYOffset,
95
+ EncodeKwds,
96
+ )
mgm/lib/python3.10/site-packages/altair/typing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
mgm/lib/python3.10/site-packages/altair/utils/execeval.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ast
4
+ import sys
5
+ from typing import TYPE_CHECKING, Any, Callable, Literal, overload
6
+
7
+ if TYPE_CHECKING:
8
+ from os import PathLike
9
+
10
+ from _typeshed import ReadableBuffer
11
+
12
+ if sys.version_info >= (3, 11):
13
+ from typing import Self
14
+ else:
15
+ from typing_extensions import Self
16
+
17
+
18
+ class _CatchDisplay:
19
+ """Class to temporarily catch sys.displayhook."""
20
+
21
+ def __init__(self) -> None:
22
+ self.output: Any | None = None
23
+
24
+ def __enter__(self) -> Self:
25
+ self.old_hook: Callable[[object], Any] = sys.displayhook
26
+ sys.displayhook = self
27
+ return self
28
+
29
+ def __exit__(self, type, value, traceback) -> Literal[False]:
30
+ sys.displayhook = self.old_hook
31
+ # Returning False will cause exceptions to propagate
32
+ return False
33
+
34
+ def __call__(self, output: Any) -> None:
35
+ self.output = output
36
+
37
+
38
+ @overload
39
+ def eval_block(
40
+ code: str | Any,
41
+ namespace: dict[str, Any] | None = ...,
42
+ filename: str | ReadableBuffer | PathLike[Any] = ...,
43
+ *,
44
+ strict: Literal[False] = ...,
45
+ ) -> Any | None: ...
46
+ @overload
47
+ def eval_block(
48
+ code: str | Any,
49
+ namespace: dict[str, Any] | None = ...,
50
+ filename: str | ReadableBuffer | PathLike[Any] = ...,
51
+ *,
52
+ strict: Literal[True] = ...,
53
+ ) -> Any: ...
54
+ def eval_block(
55
+ code: str | Any,
56
+ namespace: dict[str, Any] | None = None,
57
+ filename: str | ReadableBuffer | PathLike[Any] = "<string>",
58
+ *,
59
+ strict: bool = False,
60
+ ) -> Any | None:
61
+ """
62
+ Execute a multi-line block of code in the given namespace.
63
+
64
+ If the final statement in the code is an expression, return
65
+ the result of the expression.
66
+
67
+ If ``strict``, raise a ``TypeError`` when the return value would be ``None``.
68
+ """
69
+ tree = ast.parse(code, filename="<ast>", mode="exec")
70
+ if namespace is None:
71
+ namespace = {}
72
+ catch_display = _CatchDisplay()
73
+
74
+ if isinstance(tree.body[-1], ast.Expr):
75
+ to_exec, to_eval = tree.body[:-1], tree.body[-1:]
76
+ else:
77
+ to_exec, to_eval = tree.body, []
78
+
79
+ for node in to_exec:
80
+ compiled = compile(ast.Module([node], []), filename=filename, mode="exec")
81
+ exec(compiled, namespace)
82
+
83
+ with catch_display:
84
+ for node in to_eval:
85
+ compiled = compile(
86
+ ast.Interactive([node]), filename=filename, mode="single"
87
+ )
88
+ exec(compiled, namespace)
89
+
90
+ if strict:
91
+ output = catch_display.output
92
+ if output is None:
93
+ msg = f"Expected a non-None value but got {output!r}"
94
+ raise TypeError(msg)
95
+ else:
96
+ return output
97
+ else:
98
+ return catch_display.output
mgm/lib/python3.10/site-packages/altair/utils/server.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Simple server used to show altair graphics from a prompt or script.
3
+
4
+ This is adapted from the mpld3 package; see
5
+ https://github.com/mpld3/mpld3/blob/master/mpld3/_server.py
6
+ """
7
+
8
+ import itertools
9
+ import random
10
+ import socket
11
+ import sys
12
+ import threading
13
+ import webbrowser
14
+ from http import server
15
+ from io import BytesIO as IO
16
+
17
+ JUPYTER_WARNING = """
18
+ Note: if you're in the Jupyter notebook, Chart.serve() is not the best
19
+ way to view plots. Consider using Chart.display().
20
+ You must interrupt the kernel to cancel this command.
21
+ """
22
+
23
+
24
+ # Mock server used for testing
25
+
26
+
27
+ class MockRequest:
28
+ def makefile(self, *args, **kwargs):
29
+ return IO(b"GET /")
30
+
31
+ def sendall(self, response):
32
+ pass
33
+
34
+
35
+ class MockServer:
36
+ def __init__(self, ip_port, Handler):
37
+ Handler(MockRequest(), ip_port[0], self)
38
+
39
+ def serve_forever(self):
40
+ pass
41
+
42
+ def server_close(self):
43
+ pass
44
+
45
+
46
+ def generate_handler(html, files=None):
47
+ if files is None:
48
+ files = {}
49
+
50
+ class MyHandler(server.BaseHTTPRequestHandler):
51
+ def do_GET(self):
52
+ """Respond to a GET request."""
53
+ if self.path == "/":
54
+ self.send_response(200)
55
+ self.send_header("Content-type", "text/html")
56
+ self.end_headers()
57
+ self.wfile.write(html.encode())
58
+ elif self.path in files:
59
+ content_type, content = files[self.path]
60
+ self.send_response(200)
61
+ self.send_header("Content-type", content_type)
62
+ self.end_headers()
63
+ self.wfile.write(content.encode())
64
+ else:
65
+ self.send_error(404)
66
+
67
+ return MyHandler
68
+
69
+
70
+ def find_open_port(ip, port, n=50):
71
+ """Find an open port near the specified port."""
72
+ ports = itertools.chain(
73
+ (port + i for i in range(n)), (port + random.randint(-2 * n, 2 * n))
74
+ )
75
+
76
+ for port in ports:
77
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
78
+ result = s.connect_ex((ip, port))
79
+ s.close()
80
+ if result != 0:
81
+ return port
82
+ msg = "no open ports found"
83
+ raise ValueError(msg)
84
+
85
+
86
+ def serve(
87
+ html,
88
+ ip="127.0.0.1",
89
+ port=8888,
90
+ n_retries=50,
91
+ files=None,
92
+ jupyter_warning=True,
93
+ open_browser=True,
94
+ http_server=None,
95
+ ) -> None:
96
+ """
97
+ Start a server serving the given HTML, and (optionally) open a browser.
98
+
99
+ Parameters
100
+ ----------
101
+ html : string
102
+ HTML to serve
103
+ ip : string (default = '127.0.0.1')
104
+ ip address at which the HTML will be served.
105
+ port : int (default = 8888)
106
+ the port at which to serve the HTML
107
+ n_retries : int (default = 50)
108
+ the number of nearby ports to search if the specified port is in use.
109
+ files : dictionary (optional)
110
+ dictionary of extra content to serve
111
+ jupyter_warning : bool (optional)
112
+ if True (default), then print a warning if this is used within Jupyter
113
+ open_browser : bool (optional)
114
+ if True (default), then open a web browser to the given HTML
115
+ http_server : class (optional)
116
+ optionally specify an HTTPServer class to use for showing the
117
+ figure. The default is Python's basic HTTPServer.
118
+ """
119
+ port = find_open_port(ip, port, n_retries)
120
+ Handler = generate_handler(html, files)
121
+
122
+ if http_server is None:
123
+ srvr = server.HTTPServer((ip, port), Handler)
124
+ else:
125
+ srvr = http_server((ip, port), Handler)
126
+
127
+ if jupyter_warning:
128
+ try:
129
+ __IPYTHON__ # type: ignore # noqa
130
+ except NameError:
131
+ pass
132
+ else:
133
+ print(JUPYTER_WARNING)
134
+
135
+ # Start the server
136
+ print(f"Serving to http://{ip}:{port}/ [Ctrl-C to exit]")
137
+ sys.stdout.flush()
138
+
139
+ if open_browser:
140
+ # Use a thread to open a web browser pointing to the server
141
+ def b():
142
+ return webbrowser.open(f"http://{ip}:{port}")
143
+
144
+ threading.Thread(target=b).start()
145
+
146
+ try:
147
+ srvr.serve_forever()
148
+ except (KeyboardInterrupt, SystemExit):
149
+ print("\nstopping Server...")
150
+
151
+ srvr.server_close()
mgm/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (155 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h ADDED
@@ -0,0 +1,891 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the CUBLAS library, defining the API
52
+ *
53
+ * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines)
54
+ * on top of the CUDA runtime.
55
+ */
56
+
57
+ #if !defined(CUBLAS_H_)
58
+ #define CUBLAS_H_
59
+
60
+ #if defined(CUBLAS_V2_H_)
61
+ #error "It is an error to include both cublas.h and cublas_v2.h"
62
+ #endif
63
+
64
+ #include <cuda_runtime.h>
65
+
66
+ #ifndef CUBLASWINAPI
67
+ #ifdef _WIN32
68
+ #define CUBLASWINAPI __stdcall
69
+ #else
70
+ #define CUBLASWINAPI
71
+ #endif
72
+ #endif
73
+
74
+ #undef CUBLASAPI
75
+ #ifdef __CUDACC__
76
+ #define CUBLASAPI __host__
77
+ #else
78
+ #define CUBLASAPI
79
+ #endif
80
+
81
+ #include "cublas_api.h"
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif
86
+
87
+ /* CUBLAS data types */
88
+ #define cublasStatus cublasStatus_t
89
+
90
+ cublasStatus CUBLASWINAPI cublasInit(void);
91
+ cublasStatus CUBLASWINAPI cublasShutdown(void);
92
+ cublasStatus CUBLASWINAPI cublasGetError(void);
93
+
94
+ cublasStatus CUBLASWINAPI cublasGetVersion(int* version);
95
+ cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr);
96
+
97
+ cublasStatus CUBLASWINAPI cublasFree(void* devicePtr);
98
+
99
+ cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream);
100
+
101
+ /* ---------------- CUBLAS BLAS1 functions ---------------- */
102
+ /* NRM2 */
103
+ float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx);
104
+ double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx);
105
+ float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx);
106
+ double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx);
107
+ /*------------------------------------------------------------------------*/
108
+ /* DOT */
109
+ float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy);
110
+ double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy);
111
+ cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
112
+ cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
113
+ cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
114
+ cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
115
+ /*------------------------------------------------------------------------*/
116
+ /* SCAL */
117
+ void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx);
118
+ void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx);
119
+ void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx);
120
+ void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx);
121
+
122
+ void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx);
123
+ void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx);
124
+ /*------------------------------------------------------------------------*/
125
+ /* AXPY */
126
+ void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy);
127
+ void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy);
128
+ void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy);
129
+ void CUBLASWINAPI
130
+ cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
131
+ /*------------------------------------------------------------------------*/
132
+ /* COPY */
133
+ void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy);
134
+ void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy);
135
+ void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy);
136
+ void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
137
+ /*------------------------------------------------------------------------*/
138
+ /* SWAP */
139
+ void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy);
140
+ void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy);
141
+ void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy);
142
+ void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
143
+ /*------------------------------------------------------------------------*/
144
+ /* AMAX */
145
+ int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx);
146
+ int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx);
147
+ int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx);
148
+ int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx);
149
+ /*------------------------------------------------------------------------*/
150
+ /* AMIN */
151
+ int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx);
152
+ int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx);
153
+
154
+ int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx);
155
+ int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx);
156
+ /*------------------------------------------------------------------------*/
157
+ /* ASUM */
158
+ float CUBLASWINAPI cublasSasum(int n, const float* x, int incx);
159
+ double CUBLASWINAPI cublasDasum(int n, const double* x, int incx);
160
+ float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx);
161
+ double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx);
162
+ /*------------------------------------------------------------------------*/
163
+ /* ROT */
164
+ void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss);
165
+ void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss);
166
+ void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s);
167
+ void CUBLASWINAPI
168
+ cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs);
169
+ void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s);
170
+ void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s);
171
+ /*------------------------------------------------------------------------*/
172
+ /* ROTG */
173
+ void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss);
174
+ void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss);
175
+ void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs);
176
+ void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs);
177
+ /*------------------------------------------------------------------------*/
178
+ /* ROTM */
179
+ void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam);
180
+ void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam);
181
+ /*------------------------------------------------------------------------*/
182
+ /* ROTMG */
183
+ void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam);
184
+ void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam);
185
+
186
+ /* --------------- CUBLAS BLAS2 functions ---------------- */
187
+ /* GEMV */
188
+ void CUBLASWINAPI cublasSgemv(char trans,
189
+ int m,
190
+ int n,
191
+ float alpha,
192
+ const float* A,
193
+ int lda,
194
+ const float* x,
195
+ int incx,
196
+ float beta,
197
+ float* y,
198
+ int incy);
199
+ void CUBLASWINAPI cublasDgemv(char trans,
200
+ int m,
201
+ int n,
202
+ double alpha,
203
+ const double* A,
204
+ int lda,
205
+ const double* x,
206
+ int incx,
207
+ double beta,
208
+ double* y,
209
+ int incy);
210
+ void CUBLASWINAPI cublasCgemv(char trans,
211
+ int m,
212
+ int n,
213
+ cuComplex alpha,
214
+ const cuComplex* A,
215
+ int lda,
216
+ const cuComplex* x,
217
+ int incx,
218
+ cuComplex beta,
219
+ cuComplex* y,
220
+ int incy);
221
+ void CUBLASWINAPI cublasZgemv(char trans,
222
+ int m,
223
+ int n,
224
+ cuDoubleComplex alpha,
225
+ const cuDoubleComplex* A,
226
+ int lda,
227
+ const cuDoubleComplex* x,
228
+ int incx,
229
+ cuDoubleComplex beta,
230
+ cuDoubleComplex* y,
231
+ int incy);
232
+ /*------------------------------------------------------------------------*/
233
+ /* GBMV */
234
+ void CUBLASWINAPI cublasSgbmv(char trans,
235
+ int m,
236
+ int n,
237
+ int kl,
238
+ int ku,
239
+ float alpha,
240
+ const float* A,
241
+ int lda,
242
+ const float* x,
243
+ int incx,
244
+ float beta,
245
+ float* y,
246
+ int incy);
247
+ void CUBLASWINAPI cublasDgbmv(char trans,
248
+ int m,
249
+ int n,
250
+ int kl,
251
+ int ku,
252
+ double alpha,
253
+ const double* A,
254
+ int lda,
255
+ const double* x,
256
+ int incx,
257
+ double beta,
258
+ double* y,
259
+ int incy);
260
+ void CUBLASWINAPI cublasCgbmv(char trans,
261
+ int m,
262
+ int n,
263
+ int kl,
264
+ int ku,
265
+ cuComplex alpha,
266
+ const cuComplex* A,
267
+ int lda,
268
+ const cuComplex* x,
269
+ int incx,
270
+ cuComplex beta,
271
+ cuComplex* y,
272
+ int incy);
273
+ void CUBLASWINAPI cublasZgbmv(char trans,
274
+ int m,
275
+ int n,
276
+ int kl,
277
+ int ku,
278
+ cuDoubleComplex alpha,
279
+ const cuDoubleComplex* A,
280
+ int lda,
281
+ const cuDoubleComplex* x,
282
+ int incx,
283
+ cuDoubleComplex beta,
284
+ cuDoubleComplex* y,
285
+ int incy);
286
+ /*------------------------------------------------------------------------*/
287
+ /* TRMV */
288
+ void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
289
+ void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
290
+ void CUBLASWINAPI
291
+ cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
292
+ void CUBLASWINAPI
293
+ cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
294
+ /*------------------------------------------------------------------------*/
295
+ /* TBMV */
296
+ void CUBLASWINAPI
297
+ cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
298
+ void CUBLASWINAPI
299
+ cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
300
+ void CUBLASWINAPI
301
+ cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
302
+ void CUBLASWINAPI cublasZtbmv(
303
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
304
+ /*------------------------------------------------------------------------*/
305
+ /* TPMV */
306
+ void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
307
+
308
+ void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
309
+
310
+ void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
311
+
312
+ void CUBLASWINAPI
313
+ cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
314
+ /*------------------------------------------------------------------------*/
315
+ /* TRSV */
316
+ void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
317
+
318
+ void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
319
+
320
+ void CUBLASWINAPI
321
+ cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
322
+
323
+ void CUBLASWINAPI
324
+ cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
325
+ /*------------------------------------------------------------------------*/
326
+ /* TPSV */
327
+ void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
328
+
329
+ void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
330
+
331
+ void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
332
+
333
+ void CUBLASWINAPI
334
+ cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
335
+ /*------------------------------------------------------------------------*/
336
+ /* TBSV */
337
+ void CUBLASWINAPI
338
+ cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
339
+
340
+ void CUBLASWINAPI
341
+ cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
342
+ void CUBLASWINAPI
343
+ cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
344
+
345
+ void CUBLASWINAPI cublasZtbsv(
346
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
347
+ /*------------------------------------------------------------------------*/
348
+ /* SYMV/HEMV */
349
+ void CUBLASWINAPI cublasSsymv(
350
+ char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy);
351
+ void CUBLASWINAPI cublasDsymv(char uplo,
352
+ int n,
353
+ double alpha,
354
+ const double* A,
355
+ int lda,
356
+ const double* x,
357
+ int incx,
358
+ double beta,
359
+ double* y,
360
+ int incy);
361
+ void CUBLASWINAPI cublasChemv(char uplo,
362
+ int n,
363
+ cuComplex alpha,
364
+ const cuComplex* A,
365
+ int lda,
366
+ const cuComplex* x,
367
+ int incx,
368
+ cuComplex beta,
369
+ cuComplex* y,
370
+ int incy);
371
+ void CUBLASWINAPI cublasZhemv(char uplo,
372
+ int n,
373
+ cuDoubleComplex alpha,
374
+ const cuDoubleComplex* A,
375
+ int lda,
376
+ const cuDoubleComplex* x,
377
+ int incx,
378
+ cuDoubleComplex beta,
379
+ cuDoubleComplex* y,
380
+ int incy);
381
+ /*------------------------------------------------------------------------*/
382
+ /* SBMV/HBMV */
383
+ void CUBLASWINAPI cublasSsbmv(char uplo,
384
+ int n,
385
+ int k,
386
+ float alpha,
387
+ const float* A,
388
+ int lda,
389
+ const float* x,
390
+ int incx,
391
+ float beta,
392
+ float* y,
393
+ int incy);
394
+ void CUBLASWINAPI cublasDsbmv(char uplo,
395
+ int n,
396
+ int k,
397
+ double alpha,
398
+ const double* A,
399
+ int lda,
400
+ const double* x,
401
+ int incx,
402
+ double beta,
403
+ double* y,
404
+ int incy);
405
+ void CUBLASWINAPI cublasChbmv(char uplo,
406
+ int n,
407
+ int k,
408
+ cuComplex alpha,
409
+ const cuComplex* A,
410
+ int lda,
411
+ const cuComplex* x,
412
+ int incx,
413
+ cuComplex beta,
414
+ cuComplex* y,
415
+ int incy);
416
+ void CUBLASWINAPI cublasZhbmv(char uplo,
417
+ int n,
418
+ int k,
419
+ cuDoubleComplex alpha,
420
+ const cuDoubleComplex* A,
421
+ int lda,
422
+ const cuDoubleComplex* x,
423
+ int incx,
424
+ cuDoubleComplex beta,
425
+ cuDoubleComplex* y,
426
+ int incy);
427
+ /*------------------------------------------------------------------------*/
428
+ /* SPMV/HPMV */
429
+ void CUBLASWINAPI
430
+ cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy);
431
+ void CUBLASWINAPI cublasDspmv(
432
+ char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy);
433
+ void CUBLASWINAPI cublasChpmv(char uplo,
434
+ int n,
435
+ cuComplex alpha,
436
+ const cuComplex* AP,
437
+ const cuComplex* x,
438
+ int incx,
439
+ cuComplex beta,
440
+ cuComplex* y,
441
+ int incy);
442
+ void CUBLASWINAPI cublasZhpmv(char uplo,
443
+ int n,
444
+ cuDoubleComplex alpha,
445
+ const cuDoubleComplex* AP,
446
+ const cuDoubleComplex* x,
447
+ int incx,
448
+ cuDoubleComplex beta,
449
+ cuDoubleComplex* y,
450
+ int incy);
451
+
452
+ /*------------------------------------------------------------------------*/
453
+ /* GER */
454
+ void CUBLASWINAPI
455
+ cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
456
+ void CUBLASWINAPI
457
+ cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
458
+
459
+ void CUBLASWINAPI cublasCgeru(
460
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
461
+ void CUBLASWINAPI cublasCgerc(
462
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
463
+ void CUBLASWINAPI cublasZgeru(int m,
464
+ int n,
465
+ cuDoubleComplex alpha,
466
+ const cuDoubleComplex* x,
467
+ int incx,
468
+ const cuDoubleComplex* y,
469
+ int incy,
470
+ cuDoubleComplex* A,
471
+ int lda);
472
+ void CUBLASWINAPI cublasZgerc(int m,
473
+ int n,
474
+ cuDoubleComplex alpha,
475
+ const cuDoubleComplex* x,
476
+ int incx,
477
+ const cuDoubleComplex* y,
478
+ int incy,
479
+ cuDoubleComplex* A,
480
+ int lda);
481
+ /*------------------------------------------------------------------------*/
482
+ /* SYR/HER */
483
+ void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda);
484
+ void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda);
485
+
486
+ void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda);
487
+ void CUBLASWINAPI
488
+ cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda);
489
+
490
+ /*------------------------------------------------------------------------*/
491
+ /* SPR/HPR */
492
+ void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP);
493
+ void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP);
494
+ void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP);
495
+ void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP);
496
+ /*------------------------------------------------------------------------*/
497
+ /* SYR2/HER2 */
498
+ void CUBLASWINAPI
499
+ cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
500
+ void CUBLASWINAPI
501
+ cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
502
+ void CUBLASWINAPI cublasCher2(char uplo,
503
+ int n,
504
+ cuComplex alpha,
505
+ const cuComplex* x,
506
+ int incx,
507
+ const cuComplex* y,
508
+ int incy,
509
+ cuComplex* A,
510
+ int lda);
511
+ void CUBLASWINAPI cublasZher2(char uplo,
512
+ int n,
513
+ cuDoubleComplex alpha,
514
+ const cuDoubleComplex* x,
515
+ int incx,
516
+ const cuDoubleComplex* y,
517
+ int incy,
518
+ cuDoubleComplex* A,
519
+ int lda);
520
+
521
+ /*------------------------------------------------------------------------*/
522
+ /* SPR2/HPR2 */
523
+ void CUBLASWINAPI
524
+ cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP);
525
+ void CUBLASWINAPI
526
+ cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP);
527
+ void CUBLASWINAPI cublasChpr2(
528
+ char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP);
529
+ void CUBLASWINAPI cublasZhpr2(char uplo,
530
+ int n,
531
+ cuDoubleComplex alpha,
532
+ const cuDoubleComplex* x,
533
+ int incx,
534
+ const cuDoubleComplex* y,
535
+ int incy,
536
+ cuDoubleComplex* AP);
537
+ /* ------------------------BLAS3 Functions ------------------------------- */
538
+ /* GEMM */
539
+ void CUBLASWINAPI cublasSgemm(char transa,
540
+ char transb,
541
+ int m,
542
+ int n,
543
+ int k,
544
+ float alpha,
545
+ const float* A,
546
+ int lda,
547
+ const float* B,
548
+ int ldb,
549
+ float beta,
550
+ float* C,
551
+ int ldc);
552
+ void CUBLASWINAPI cublasDgemm(char transa,
553
+ char transb,
554
+ int m,
555
+ int n,
556
+ int k,
557
+ double alpha,
558
+ const double* A,
559
+ int lda,
560
+ const double* B,
561
+ int ldb,
562
+ double beta,
563
+ double* C,
564
+ int ldc);
565
+ void CUBLASWINAPI cublasCgemm(char transa,
566
+ char transb,
567
+ int m,
568
+ int n,
569
+ int k,
570
+ cuComplex alpha,
571
+ const cuComplex* A,
572
+ int lda,
573
+ const cuComplex* B,
574
+ int ldb,
575
+ cuComplex beta,
576
+ cuComplex* C,
577
+ int ldc);
578
+ void CUBLASWINAPI cublasZgemm(char transa,
579
+ char transb,
580
+ int m,
581
+ int n,
582
+ int k,
583
+ cuDoubleComplex alpha,
584
+ const cuDoubleComplex* A,
585
+ int lda,
586
+ const cuDoubleComplex* B,
587
+ int ldb,
588
+ cuDoubleComplex beta,
589
+ cuDoubleComplex* C,
590
+ int ldc);
591
+ /* -------------------------------------------------------*/
592
+ /* SYRK */
593
+ void CUBLASWINAPI
594
+ cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc);
595
+ void CUBLASWINAPI cublasDsyrk(
596
+ char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc);
597
+
598
+ void CUBLASWINAPI cublasCsyrk(char uplo,
599
+ char trans,
600
+ int n,
601
+ int k,
602
+ cuComplex alpha,
603
+ const cuComplex* A,
604
+ int lda,
605
+ cuComplex beta,
606
+ cuComplex* C,
607
+ int ldc);
608
+ void CUBLASWINAPI cublasZsyrk(char uplo,
609
+ char trans,
610
+ int n,
611
+ int k,
612
+ cuDoubleComplex alpha,
613
+ const cuDoubleComplex* A,
614
+ int lda,
615
+ cuDoubleComplex beta,
616
+ cuDoubleComplex* C,
617
+ int ldc);
618
+ /* ------------------------------------------------------- */
619
+ /* HERK */
620
+ void CUBLASWINAPI cublasCherk(
621
+ char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc);
622
+ void CUBLASWINAPI cublasZherk(char uplo,
623
+ char trans,
624
+ int n,
625
+ int k,
626
+ double alpha,
627
+ const cuDoubleComplex* A,
628
+ int lda,
629
+ double beta,
630
+ cuDoubleComplex* C,
631
+ int ldc);
632
+ /* ------------------------------------------------------- */
633
+ /* SYR2K */
634
+ void CUBLASWINAPI cublasSsyr2k(char uplo,
635
+ char trans,
636
+ int n,
637
+ int k,
638
+ float alpha,
639
+ const float* A,
640
+ int lda,
641
+ const float* B,
642
+ int ldb,
643
+ float beta,
644
+ float* C,
645
+ int ldc);
646
+
647
+ void CUBLASWINAPI cublasDsyr2k(char uplo,
648
+ char trans,
649
+ int n,
650
+ int k,
651
+ double alpha,
652
+ const double* A,
653
+ int lda,
654
+ const double* B,
655
+ int ldb,
656
+ double beta,
657
+ double* C,
658
+ int ldc);
659
+ void CUBLASWINAPI cublasCsyr2k(char uplo,
660
+ char trans,
661
+ int n,
662
+ int k,
663
+ cuComplex alpha,
664
+ const cuComplex* A,
665
+ int lda,
666
+ const cuComplex* B,
667
+ int ldb,
668
+ cuComplex beta,
669
+ cuComplex* C,
670
+ int ldc);
671
+
672
+ void CUBLASWINAPI cublasZsyr2k(char uplo,
673
+ char trans,
674
+ int n,
675
+ int k,
676
+ cuDoubleComplex alpha,
677
+ const cuDoubleComplex* A,
678
+ int lda,
679
+ const cuDoubleComplex* B,
680
+ int ldb,
681
+ cuDoubleComplex beta,
682
+ cuDoubleComplex* C,
683
+ int ldc);
684
+ /* ------------------------------------------------------- */
685
+ /* HER2K */
686
+ void CUBLASWINAPI cublasCher2k(char uplo,
687
+ char trans,
688
+ int n,
689
+ int k,
690
+ cuComplex alpha,
691
+ const cuComplex* A,
692
+ int lda,
693
+ const cuComplex* B,
694
+ int ldb,
695
+ float beta,
696
+ cuComplex* C,
697
+ int ldc);
698
+
699
+ void CUBLASWINAPI cublasZher2k(char uplo,
700
+ char trans,
701
+ int n,
702
+ int k,
703
+ cuDoubleComplex alpha,
704
+ const cuDoubleComplex* A,
705
+ int lda,
706
+ const cuDoubleComplex* B,
707
+ int ldb,
708
+ double beta,
709
+ cuDoubleComplex* C,
710
+ int ldc);
711
+
712
+ /*------------------------------------------------------------------------*/
713
+ /* SYMM*/
714
+ void CUBLASWINAPI cublasSsymm(char side,
715
+ char uplo,
716
+ int m,
717
+ int n,
718
+ float alpha,
719
+ const float* A,
720
+ int lda,
721
+ const float* B,
722
+ int ldb,
723
+ float beta,
724
+ float* C,
725
+ int ldc);
726
+ void CUBLASWINAPI cublasDsymm(char side,
727
+ char uplo,
728
+ int m,
729
+ int n,
730
+ double alpha,
731
+ const double* A,
732
+ int lda,
733
+ const double* B,
734
+ int ldb,
735
+ double beta,
736
+ double* C,
737
+ int ldc);
738
+
739
+ void CUBLASWINAPI cublasCsymm(char side,
740
+ char uplo,
741
+ int m,
742
+ int n,
743
+ cuComplex alpha,
744
+ const cuComplex* A,
745
+ int lda,
746
+ const cuComplex* B,
747
+ int ldb,
748
+ cuComplex beta,
749
+ cuComplex* C,
750
+ int ldc);
751
+
752
+ void CUBLASWINAPI cublasZsymm(char side,
753
+ char uplo,
754
+ int m,
755
+ int n,
756
+ cuDoubleComplex alpha,
757
+ const cuDoubleComplex* A,
758
+ int lda,
759
+ const cuDoubleComplex* B,
760
+ int ldb,
761
+ cuDoubleComplex beta,
762
+ cuDoubleComplex* C,
763
+ int ldc);
764
+ /*------------------------------------------------------------------------*/
765
+ /* HEMM*/
766
+ void CUBLASWINAPI cublasChemm(char side,
767
+ char uplo,
768
+ int m,
769
+ int n,
770
+ cuComplex alpha,
771
+ const cuComplex* A,
772
+ int lda,
773
+ const cuComplex* B,
774
+ int ldb,
775
+ cuComplex beta,
776
+ cuComplex* C,
777
+ int ldc);
778
+ void CUBLASWINAPI cublasZhemm(char side,
779
+ char uplo,
780
+ int m,
781
+ int n,
782
+ cuDoubleComplex alpha,
783
+ const cuDoubleComplex* A,
784
+ int lda,
785
+ const cuDoubleComplex* B,
786
+ int ldb,
787
+ cuDoubleComplex beta,
788
+ cuDoubleComplex* C,
789
+ int ldc);
790
+
791
+ /*------------------------------------------------------------------------*/
792
+ /* TRSM*/
793
+ void CUBLASWINAPI cublasStrsm(char side,
794
+ char uplo,
795
+ char transa,
796
+ char diag,
797
+ int m,
798
+ int n,
799
+ float alpha,
800
+ const float* A,
801
+ int lda,
802
+ float* B,
803
+ int ldb);
804
+
805
+ void CUBLASWINAPI cublasDtrsm(char side,
806
+ char uplo,
807
+ char transa,
808
+ char diag,
809
+ int m,
810
+ int n,
811
+ double alpha,
812
+ const double* A,
813
+ int lda,
814
+ double* B,
815
+ int ldb);
816
+
817
+ void CUBLASWINAPI cublasCtrsm(char side,
818
+ char uplo,
819
+ char transa,
820
+ char diag,
821
+ int m,
822
+ int n,
823
+ cuComplex alpha,
824
+ const cuComplex* A,
825
+ int lda,
826
+ cuComplex* B,
827
+ int ldb);
828
+
829
+ void CUBLASWINAPI cublasZtrsm(char side,
830
+ char uplo,
831
+ char transa,
832
+ char diag,
833
+ int m,
834
+ int n,
835
+ cuDoubleComplex alpha,
836
+ const cuDoubleComplex* A,
837
+ int lda,
838
+ cuDoubleComplex* B,
839
+ int ldb);
840
+ /*------------------------------------------------------------------------*/
841
+ /* TRMM*/
842
+ void CUBLASWINAPI cublasStrmm(char side,
843
+ char uplo,
844
+ char transa,
845
+ char diag,
846
+ int m,
847
+ int n,
848
+ float alpha,
849
+ const float* A,
850
+ int lda,
851
+ float* B,
852
+ int ldb);
853
+ void CUBLASWINAPI cublasDtrmm(char side,
854
+ char uplo,
855
+ char transa,
856
+ char diag,
857
+ int m,
858
+ int n,
859
+ double alpha,
860
+ const double* A,
861
+ int lda,
862
+ double* B,
863
+ int ldb);
864
+ void CUBLASWINAPI cublasCtrmm(char side,
865
+ char uplo,
866
+ char transa,
867
+ char diag,
868
+ int m,
869
+ int n,
870
+ cuComplex alpha,
871
+ const cuComplex* A,
872
+ int lda,
873
+ cuComplex* B,
874
+ int ldb);
875
+ void CUBLASWINAPI cublasZtrmm(char side,
876
+ char uplo,
877
+ char transa,
878
+ char diag,
879
+ int m,
880
+ int n,
881
+ cuDoubleComplex alpha,
882
+ const cuDoubleComplex* A,
883
+ int lda,
884
+ cuDoubleComplex* B,
885
+ int ldb);
886
+
887
+ #if defined(__cplusplus)
888
+ }
889
+ #endif /* __cplusplus */
890
+
891
+ #endif /* !defined(CUBLAS_H_) */
mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h ADDED
@@ -0,0 +1,1626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+ #pragma once
50
+
51
+ #ifndef CUBLASAPI
52
+ #ifdef __CUDACC__
53
+ #define CUBLASAPI __host__ __device__
54
+ #else
55
+ #define CUBLASAPI
56
+ #endif
57
+ #endif
58
+
59
+ #include <cublas_api.h>
60
+
61
+ #include <stdint.h>
62
+ #include <stddef.h>
63
+ #include <stdio.h>
64
+
65
+ #if defined(__cplusplus)
66
+ extern "C" {
67
+ #endif /* __cplusplus */
68
+
69
+ /** Opaque structure holding CUBLASLT context
70
+ */
71
+ typedef struct cublasLtContext* cublasLtHandle_t;
72
+
73
+ cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle);
74
+
75
+ cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle);
76
+
77
+ const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status);
78
+
79
+ const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status);
80
+
81
+ size_t CUBLASWINAPI cublasLtGetVersion(void);
82
+
83
+ size_t CUBLASWINAPI cublasLtGetCudartVersion(void);
84
+
85
+ cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value);
86
+
87
+ /** Semi-opaque descriptor for matrix memory layout
88
+ */
89
+ typedef struct {
90
+ uint64_t data[8];
91
+ } cublasLtMatrixLayoutOpaque_t;
92
+
93
+ /** Opaque descriptor for matrix memory layout
94
+ */
95
+ typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t;
96
+
97
+ /** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes)
98
+ *
99
+ * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save
100
+ * on selecting the right configuration again.
101
+ */
102
+ typedef struct {
103
+ uint64_t data[8];
104
+ } cublasLtMatmulAlgo_t;
105
+
106
+ /** Semi-opaque descriptor for cublasLtMatmul() operation details
107
+ */
108
+ typedef struct {
109
+ uint64_t data[12];
110
+ } cublasLtMatmulDescOpaque_t;
111
+
112
+ /** Opaque descriptor for cublasLtMatmul() operation details
113
+ */
114
+ typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t;
115
+
116
+ /** Semi-opaque descriptor for cublasLtMatrixTransform() operation details
117
+ */
118
+ typedef struct {
119
+ uint64_t data[8];
120
+ } cublasLtMatrixTransformDescOpaque_t;
121
+
122
+ /** Opaque descriptor for cublasLtMatrixTransform() operation details
123
+ */
124
+ typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t;
125
+
126
+ /** Semi-opaque descriptor for cublasLtMatmulPreference() operation details
127
+ */
128
+ typedef struct {
129
+ uint64_t data[10];
130
+ } cublasLtMatmulPreferenceOpaque_t;
131
+
132
+ /** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration
133
+ */
134
+ typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t;
135
+
136
+ /** Tile size (in C/D matrix Rows x Cols)
137
+ *
138
+ * General order of tile IDs is sorted by size first and by first dimension second.
139
+ */
140
+ typedef enum {
141
+ CUBLASLT_MATMUL_TILE_UNDEFINED = 0,
142
+ CUBLASLT_MATMUL_TILE_8x8 = 1,
143
+ CUBLASLT_MATMUL_TILE_8x16 = 2,
144
+ CUBLASLT_MATMUL_TILE_16x8 = 3,
145
+ CUBLASLT_MATMUL_TILE_8x32 = 4,
146
+ CUBLASLT_MATMUL_TILE_16x16 = 5,
147
+ CUBLASLT_MATMUL_TILE_32x8 = 6,
148
+ CUBLASLT_MATMUL_TILE_8x64 = 7,
149
+ CUBLASLT_MATMUL_TILE_16x32 = 8,
150
+ CUBLASLT_MATMUL_TILE_32x16 = 9,
151
+ CUBLASLT_MATMUL_TILE_64x8 = 10,
152
+ CUBLASLT_MATMUL_TILE_32x32 = 11,
153
+ CUBLASLT_MATMUL_TILE_32x64 = 12,
154
+ CUBLASLT_MATMUL_TILE_64x32 = 13,
155
+ CUBLASLT_MATMUL_TILE_32x128 = 14,
156
+ CUBLASLT_MATMUL_TILE_64x64 = 15,
157
+ CUBLASLT_MATMUL_TILE_128x32 = 16,
158
+ CUBLASLT_MATMUL_TILE_64x128 = 17,
159
+ CUBLASLT_MATMUL_TILE_128x64 = 18,
160
+ CUBLASLT_MATMUL_TILE_64x256 = 19,
161
+ CUBLASLT_MATMUL_TILE_128x128 = 20,
162
+ CUBLASLT_MATMUL_TILE_256x64 = 21,
163
+ CUBLASLT_MATMUL_TILE_64x512 = 22,
164
+ CUBLASLT_MATMUL_TILE_128x256 = 23,
165
+ CUBLASLT_MATMUL_TILE_256x128 = 24,
166
+ CUBLASLT_MATMUL_TILE_512x64 = 25,
167
+ CUBLASLT_MATMUL_TILE_64x96 = 26,
168
+ CUBLASLT_MATMUL_TILE_96x64 = 27,
169
+ CUBLASLT_MATMUL_TILE_96x128 = 28,
170
+ CUBLASLT_MATMUL_TILE_128x160 = 29,
171
+ CUBLASLT_MATMUL_TILE_160x128 = 30,
172
+ CUBLASLT_MATMUL_TILE_192x128 = 31,
173
+ CUBLASLT_MATMUL_TILE_END
174
+ } cublasLtMatmulTile_t;
175
+
176
+ /** Size and number of stages in which elements are read into shared memory
177
+ *
178
+ * General order of stages IDs is sorted by stage size first and by number of stages second.
179
+ */
180
+ typedef enum {
181
+ CUBLASLT_MATMUL_STAGES_UNDEFINED = 0,
182
+ CUBLASLT_MATMUL_STAGES_16x1 = 1,
183
+ CUBLASLT_MATMUL_STAGES_16x2 = 2,
184
+ CUBLASLT_MATMUL_STAGES_16x3 = 3,
185
+ CUBLASLT_MATMUL_STAGES_16x4 = 4,
186
+ CUBLASLT_MATMUL_STAGES_16x5 = 5,
187
+ CUBLASLT_MATMUL_STAGES_16x6 = 6,
188
+ CUBLASLT_MATMUL_STAGES_32x1 = 7,
189
+ CUBLASLT_MATMUL_STAGES_32x2 = 8,
190
+ CUBLASLT_MATMUL_STAGES_32x3 = 9,
191
+ CUBLASLT_MATMUL_STAGES_32x4 = 10,
192
+ CUBLASLT_MATMUL_STAGES_32x5 = 11,
193
+ CUBLASLT_MATMUL_STAGES_32x6 = 12,
194
+ CUBLASLT_MATMUL_STAGES_64x1 = 13,
195
+ CUBLASLT_MATMUL_STAGES_64x2 = 14,
196
+ CUBLASLT_MATMUL_STAGES_64x3 = 15,
197
+ CUBLASLT_MATMUL_STAGES_64x4 = 16,
198
+ CUBLASLT_MATMUL_STAGES_64x5 = 17,
199
+ CUBLASLT_MATMUL_STAGES_64x6 = 18,
200
+ CUBLASLT_MATMUL_STAGES_128x1 = 19,
201
+ CUBLASLT_MATMUL_STAGES_128x2 = 20,
202
+ CUBLASLT_MATMUL_STAGES_128x3 = 21,
203
+ CUBLASLT_MATMUL_STAGES_128x4 = 22,
204
+ CUBLASLT_MATMUL_STAGES_128x5 = 23,
205
+ CUBLASLT_MATMUL_STAGES_128x6 = 24,
206
+ CUBLASLT_MATMUL_STAGES_32x10 = 25,
207
+ CUBLASLT_MATMUL_STAGES_8x4 = 26,
208
+ CUBLASLT_MATMUL_STAGES_16x10 = 27,
209
+ CUBLASLT_MATMUL_STAGES_8x5 = 28,
210
+ CUBLASLT_MATMUL_STAGES_16x80 = 29,
211
+ CUBLASLT_MATMUL_STAGES_64x80 = 30,
212
+ CUBLASLT_MATMUL_STAGES_END
213
+ } cublasLtMatmulStages_t;
214
+
215
+ /** Pointer mode to use for alpha/beta */
216
+ typedef enum {
217
+ /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */
218
+ CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST,
219
+ /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */
220
+ CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE,
221
+ /** pointer targets an array in device memory */
222
+ CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2,
223
+ /** alpha pointer targets an array in device memory, beta is zero. Note:
224
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */
225
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3,
226
+ /** alpha pointer targets an array in device memory, beta is a single value in host memory. */
227
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4,
228
+ } cublasLtPointerMode_t;
229
+
230
+ /** Mask to define and query pointer mode capability */
231
+ typedef enum {
232
+ /** no initial filtering is performed when querying pointer mode capabilities, will use gemm pointer mode defined in
233
+ operation description **/
234
+ CUBLASLT_POINTER_MODE_MASK_NO_FILTERING = 0,
235
+ /** see CUBLASLT_POINTER_MODE_HOST */
236
+ CUBLASLT_POINTER_MODE_MASK_HOST = 1,
237
+ /** see CUBLASLT_POINTER_MODE_DEVICE */
238
+ CUBLASLT_POINTER_MODE_MASK_DEVICE = 2,
239
+ /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */
240
+ CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4,
241
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */
242
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8,
243
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */
244
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16,
245
+ } cublasLtPointerModeMask_t;
246
+
247
+ /** Implementation details that may affect numerical behavior of algorithms. */
248
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0)
249
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0)
250
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0)
251
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0)
252
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0)
253
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0)
254
+
255
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8)
256
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8)
257
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8)
258
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8)
259
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8)
260
+
261
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16)
262
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16)
263
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16)
264
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16)
265
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16)
266
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16)
267
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16)
268
+
269
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32)
270
+ typedef uint64_t cublasLtNumericalImplFlags_t;
271
+
272
+ /** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C).
273
+ *
274
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
275
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
276
+ * when workspaceSizeInBytes is less than workspace required by configured
277
+ * algo
278
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
279
+ * operation
280
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
281
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
282
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
283
+ */
284
+ cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle,
285
+ cublasLtMatmulDesc_t computeDesc,
286
+ const void* alpha, /* host or device pointer */
287
+ const void* A,
288
+ cublasLtMatrixLayout_t Adesc,
289
+ const void* B,
290
+ cublasLtMatrixLayout_t Bdesc,
291
+ const void* beta, /* host or device pointer */
292
+ const void* C,
293
+ cublasLtMatrixLayout_t Cdesc,
294
+ void* D,
295
+ cublasLtMatrixLayout_t Ddesc,
296
+ const cublasLtMatmulAlgo_t* algo,
297
+ void* workspace,
298
+ size_t workspaceSizeInBytes,
299
+ cudaStream_t stream);
300
+
301
+ /** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B))
302
+ *
303
+ * Can be used to change memory order of data or to scale and shift the values.
304
+ *
305
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
306
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
307
+ * when A is not NULL, but Adesc is NULL
308
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
309
+ * operation
310
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
311
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
312
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
313
+ */
314
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle,
315
+ cublasLtMatrixTransformDesc_t transformDesc,
316
+ const void* alpha, /* host or device pointer */
317
+ const void* A,
318
+ cublasLtMatrixLayout_t Adesc,
319
+ const void* beta, /* host or device pointer */
320
+ const void* B,
321
+ cublasLtMatrixLayout_t Bdesc,
322
+ void* C,
323
+ cublasLtMatrixLayout_t Cdesc,
324
+ cudaStream_t stream);
325
+
326
+ /* ---------------------------------------------------------------------------------------*/
327
+ /* Helper functions for cublasLtMatrixLayout_t */
328
+ /* ---------------------------------------------------------------------------------------*/
329
+
330
+ /** Enum for data ordering */
331
+ typedef enum {
332
+ /** Column-major
333
+ *
334
+ * Leading dimension is the stride (in elements) to the beginning of next column in memory.
335
+ */
336
+ CUBLASLT_ORDER_COL = 0,
337
+ /** Row major
338
+ *
339
+ * Leading dimension is the stride (in elements) to the beginning of next row in memory.
340
+ */
341
+ CUBLASLT_ORDER_ROW = 1,
342
+ /** Column-major ordered tiles of 32 columns.
343
+ *
344
+ * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33
345
+ * columns and 2 rows, ld must be at least (32) * 2 = 64.
346
+ */
347
+ CUBLASLT_ORDER_COL32 = 2,
348
+ /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved
349
+ * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern.
350
+ *
351
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next
352
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256.
353
+ */
354
+ CUBLASLT_ORDER_COL4_4R2_8C = 3,
355
+ /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows.
356
+ * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col.
357
+ *
358
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next
359
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024.
360
+ */
361
+ CUBLASLT_ORDER_COL32_2R_4R4 = 4,
362
+
363
+ } cublasLtOrder_t;
364
+
365
+ /** Attributes of memory layout */
366
+ typedef enum {
367
+ /** Data type, see cudaDataType.
368
+ *
369
+ * uint32_t
370
+ */
371
+ CUBLASLT_MATRIX_LAYOUT_TYPE = 0,
372
+
373
+ /** Memory order of the data, see cublasLtOrder_t.
374
+ *
375
+ * int32_t, default: CUBLASLT_ORDER_COL
376
+ */
377
+ CUBLASLT_MATRIX_LAYOUT_ORDER = 1,
378
+
379
+ /** Number of rows.
380
+ *
381
+ * Usually only values that can be expressed as int32_t are supported.
382
+ *
383
+ * uint64_t
384
+ */
385
+ CUBLASLT_MATRIX_LAYOUT_ROWS = 2,
386
+
387
+ /** Number of columns.
388
+ *
389
+ * Usually only values that can be expressed as int32_t are supported.
390
+ *
391
+ * uint64_t
392
+ */
393
+ CUBLASLT_MATRIX_LAYOUT_COLS = 3,
394
+
395
+ /** Matrix leading dimension.
396
+ *
397
+ * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for
398
+ * other memory orders see documentation for cublasLtOrder_t values.
399
+ *
400
+ * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not
401
+ * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL).
402
+ *
403
+ * int64_t;
404
+ */
405
+ CUBLASLT_MATRIX_LAYOUT_LD = 4,
406
+
407
+ /** Number of matmul operations to perform in the batch.
408
+ *
409
+ * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT
410
+ *
411
+ * int32_t, default: 1
412
+ */
413
+ CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5,
414
+
415
+ /** Stride (in elements) to the next matrix for strided batch operation.
416
+ *
417
+ * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride
418
+ * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F,
419
+ * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices
420
+ * is a 2B (16bit) floating point type).
421
+ *
422
+ * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix
423
+ * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride
424
+ * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B
425
+ * each). This behavior is expected to be corrected in the next major cuBLAS version.
426
+ *
427
+ * int64_t, default: 0
428
+ */
429
+ CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6,
430
+
431
+ /** Stride (in bytes) to the imaginary plane for planar complex layout.
432
+ *
433
+ * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved
434
+ * in memory in each element)
435
+ */
436
+ CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7,
437
+ } cublasLtMatrixLayoutAttribute_t;
438
+
439
+ /** Internal. Do not use directly.
440
+ */
441
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( //
442
+ cublasLtMatrixLayout_t matLayout,
443
+ size_t size,
444
+ cudaDataType type,
445
+ uint64_t rows,
446
+ uint64_t cols,
447
+ int64_t ld);
448
+
449
+ /** Initialize matrix layout descriptor in pre-allocated space.
450
+ *
451
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
452
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
453
+ */
454
+ static inline cublasStatus_t cublasLtMatrixLayoutInit(
455
+ cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) {
456
+ return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld);
457
+ }
458
+
459
+ /** Create new matrix layout descriptor.
460
+ *
461
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
462
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
463
+ */
464
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( //
465
+ cublasLtMatrixLayout_t* matLayout,
466
+ cudaDataType type,
467
+ uint64_t rows,
468
+ uint64_t cols,
469
+ int64_t ld);
470
+
471
+ /** Destroy matrix layout descriptor.
472
+ *
473
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
474
+ */
475
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout);
476
+
477
+ /** Set matrix layout descriptor attribute.
478
+ *
479
+ * \param[in] matLayout The descriptor
480
+ * \param[in] attr The attribute
481
+ * \param[in] buf memory address containing the new value
482
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
483
+ *
484
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
485
+ * selected attribute
486
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
487
+ */
488
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( //
489
+ cublasLtMatrixLayout_t matLayout,
490
+ cublasLtMatrixLayoutAttribute_t attr,
491
+ const void* buf,
492
+ size_t sizeInBytes);
493
+
494
+ /** Get matrix layout descriptor attribute.
495
+ *
496
+ * \param[in] matLayout The descriptor
497
+ * \param[in] attr The attribute
498
+ * \param[out] buf memory address containing the new value
499
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
500
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
501
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
502
+ *
503
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
504
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
505
+ * selected attribute
506
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
507
+ */
508
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( //
509
+ cublasLtMatrixLayout_t matLayout,
510
+ cublasLtMatrixLayoutAttribute_t attr,
511
+ void* buf,
512
+ size_t sizeInBytes,
513
+ size_t* sizeWritten);
514
+
515
+ /* ---------------------------------------------------------------------------------------*/
516
+ /* Helper functions for cublasLtMatmulDesc_t */
517
+ /* ---------------------------------------------------------------------------------------*/
518
+
519
+ /** Matmul descriptor attributes to define details of the operation. */
520
+ typedef enum {
521
+ /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the
522
+ * accumulator during matrix multiplication.
523
+ *
524
+ * int32_t
525
+ */
526
+ CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0,
527
+
528
+ /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are
529
+ * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix
530
+ * D before being stored in memory.
531
+ *
532
+ * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE
533
+ */
534
+ CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1,
535
+
536
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use,
537
+ * alpha/beta vector lenghts must match number of output matrix rows.
538
+ *
539
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
540
+ */
541
+ CUBLASLT_MATMUL_DESC_POINTER_MODE = 2,
542
+
543
+ /** Transform of matrix A, see cublasOperation_t.
544
+ *
545
+ * int32_t, default: CUBLAS_OP_N
546
+ */
547
+ CUBLASLT_MATMUL_DESC_TRANSA = 3,
548
+
549
+ /** Transform of matrix B, see cublasOperation_t.
550
+ *
551
+ * int32_t, default: CUBLAS_OP_N
552
+ */
553
+ CUBLASLT_MATMUL_DESC_TRANSB = 4,
554
+
555
+ /** Transform of matrix C, see cublasOperation_t.
556
+ *
557
+ * Currently only CUBLAS_OP_N is supported.
558
+ *
559
+ * int32_t, default: CUBLAS_OP_N
560
+ */
561
+ CUBLASLT_MATMUL_DESC_TRANSC = 5,
562
+
563
+ /** Matrix fill mode, see cublasFillMode_t.
564
+ *
565
+ * int32_t, default: CUBLAS_FILL_MODE_FULL
566
+ */
567
+ CUBLASLT_MATMUL_DESC_FILL_MODE = 6,
568
+
569
+ /** Epilogue function, see cublasLtEpilogue_t.
570
+ *
571
+ * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT
572
+ */
573
+ CUBLASLT_MATMUL_DESC_EPILOGUE = 7,
574
+
575
+ /** Bias or bias gradient vector pointer in the device memory.
576
+ *
577
+ * Bias case. See CUBLASLT_EPILOGUE_BIAS.
578
+ * Bias vector elements are the same type as
579
+ * the output elements (Ctype) with the exception of IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I
580
+ * where the bias vector elements are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F).
581
+ * Bias vector length must match matrix D rows count.
582
+ *
583
+ * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD.
584
+ * Bias gradient vector elements are the same type as the output elements
585
+ * (Ctype) with the exception of IMMA kernels (see above).
586
+ *
587
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
588
+ * depend on its value to determine expected pointer alignment.
589
+ *
590
+ * Bias case: const void *, default: NULL
591
+ * Bias gradient case: void *, default: NULL
592
+ */
593
+ CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8,
594
+
595
+ /** Batch stride for bias or bias gradient vector.
596
+ *
597
+ * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1.
598
+ *
599
+ * int64_t, default: 0
600
+ */
601
+ CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10,
602
+
603
+ /** Pointer for epilogue auxiliary buffer.
604
+ *
605
+ * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX
606
+ * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used.
607
+ * - Input vector for ReLu bit-mask in backward pass when
608
+ * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used.
609
+ *
610
+ * - Output of GELU input matrix in forward pass when
611
+ * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used.
612
+ * - Input of GELU input matrix for backward pass when
613
+ * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used.
614
+ *
615
+ * GELU input matrix elements type is the same as the type of elements of
616
+ * the output matrix.
617
+ *
618
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
619
+ * depend on its value to determine expected pointer alignment.
620
+ *
621
+ * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute.
622
+ *
623
+ * Forward pass: void *, default: NULL
624
+ * Backward pass: const void *, default: NULL
625
+ */
626
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11,
627
+
628
+ /** Leading dimension for epilogue auxiliary buffer.
629
+ *
630
+ * - ReLu bit-mask matrix leading dimension in elements (i.e. bits)
631
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
632
+ * used. Must be divisible by 128 and be no less than the number of rows in the output matrix.
633
+ *
634
+ * - GELU input matrix leading dimension in elements
635
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
636
+ * Must be divisible by 8 and be no less than the number of rows in the output matrix.
637
+ *
638
+ * int64_t, default: 0
639
+ */
640
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12,
641
+
642
+ /** Batch stride for epilogue auxiliary buffer.
643
+ *
644
+ * - ReLu bit-mask matrix batch stride in elements (i.e. bits)
645
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
646
+ * used. Must be divisible by 128.
647
+ *
648
+ * - GELU input matrix batch stride in elements
649
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
650
+ * Must be divisible by 8.
651
+ *
652
+ * int64_t, default: 0
653
+ */
654
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13,
655
+
656
+ /** Batch stride for alpha vector.
657
+ *
658
+ * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's
659
+ * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then
660
+ * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector.
661
+ *
662
+ * int64_t, default: 0
663
+ */
664
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14,
665
+
666
+ /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs
667
+ * when user expects a concurrent stream to be using some of the device resources.
668
+ *
669
+ * int32_t, default: 0 - use the number reported by the device.
670
+ */
671
+ CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15,
672
+ } cublasLtMatmulDescAttributes_t;
673
+
674
+ /** Internal. Do not use directly.
675
+ */
676
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( //
677
+ cublasLtMatmulDesc_t matmulDesc,
678
+ size_t size,
679
+ cublasComputeType_t computeType,
680
+ cudaDataType_t scaleType);
681
+
682
+ /** Initialize matmul operation descriptor in pre-allocated space.
683
+ *
684
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
685
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully
686
+ */
687
+ static inline cublasStatus_t cublasLtMatmulDescInit( //
688
+ cublasLtMatmulDesc_t matmulDesc,
689
+ cublasComputeType_t computeType,
690
+ cudaDataType_t scaleType) {
691
+ return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType);
692
+ }
693
+
694
+ /** Create new matmul operation descriptor.
695
+ *
696
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
697
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
698
+ */
699
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc,
700
+ cublasComputeType_t computeType,
701
+ cudaDataType_t scaleType);
702
+
703
+ /** Destroy matmul operation descriptor.
704
+ *
705
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
706
+ */
707
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc);
708
+
709
+ /** Set matmul operation descriptor attribute.
710
+ *
711
+ * \param[in] matmulDesc The descriptor
712
+ * \param[in] attr The attribute
713
+ * \param[in] buf memory address containing the new value
714
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
715
+ *
716
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
717
+ * selected attribute
718
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
719
+ */
720
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( //
721
+ cublasLtMatmulDesc_t matmulDesc,
722
+ cublasLtMatmulDescAttributes_t attr,
723
+ const void* buf,
724
+ size_t sizeInBytes);
725
+
726
+ /** Get matmul operation descriptor attribute.
727
+ *
728
+ * \param[in] matmulDesc The descriptor
729
+ * \param[in] attr The attribute
730
+ * \param[out] buf memory address containing the new value
731
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
732
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
733
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
734
+ *
735
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
736
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
737
+ * selected attribute
738
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
739
+ */
740
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( //
741
+ cublasLtMatmulDesc_t matmulDesc,
742
+ cublasLtMatmulDescAttributes_t attr,
743
+ void* buf,
744
+ size_t sizeInBytes,
745
+ size_t* sizeWritten);
746
+
747
+ /* ---------------------------------------------------------------------------------------*/
748
+ /* Helper functions for cublasLtMatrixTransformDesc_t */
749
+ /* ---------------------------------------------------------------------------------------*/
750
+
751
+ /** Matrix transform descriptor attributes to define details of the operation.
752
+ */
753
+ typedef enum {
754
+ /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then
755
+ * converted to output type to store in memory.
756
+ *
757
+ * int32_t
758
+ */
759
+ CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE,
760
+
761
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t.
762
+ *
763
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
764
+ */
765
+ CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE,
766
+
767
+ /** Transform of matrix A, see cublasOperation_t.
768
+ *
769
+ * int32_t, default: CUBLAS_OP_N
770
+ */
771
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA,
772
+
773
+ /** Transform of matrix B, see cublasOperation_t.
774
+ *
775
+ * int32_t, default: CUBLAS_OP_N
776
+ */
777
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB,
778
+ } cublasLtMatrixTransformDescAttributes_t;
779
+
780
+ /** Internal. Do not use directly.
781
+ */
782
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc,
783
+ size_t size,
784
+ cudaDataType scaleType);
785
+
786
+ /** Initialize matrix transform operation descriptor in pre-allocated space.
787
+ *
788
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
789
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
790
+ */
791
+ static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc,
792
+ cudaDataType scaleType) {
793
+ return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType);
794
+ }
795
+
796
+ /** Create new matrix transform operation descriptor.
797
+ *
798
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
799
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
800
+ */
801
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc,
802
+ cudaDataType scaleType);
803
+
804
+ /** Destroy matrix transform operation descriptor.
805
+ *
806
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
807
+ */
808
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc);
809
+
810
+ /** Set matrix transform operation descriptor attribute.
811
+ *
812
+ * \param[in] transformDesc The descriptor
813
+ * \param[in] attr The attribute
814
+ * \param[in] buf memory address containing the new value
815
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
816
+ *
817
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
818
+ * selected attribute
819
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
820
+ */
821
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( //
822
+ cublasLtMatrixTransformDesc_t transformDesc,
823
+ cublasLtMatrixTransformDescAttributes_t attr,
824
+ const void* buf,
825
+ size_t sizeInBytes);
826
+
827
+ /** Get matrix transform operation descriptor attribute.
828
+ *
829
+ * \param[in] transformDesc The descriptor
830
+ * \param[in] attr The attribute
831
+ * \param[out] buf memory address containing the new value
832
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
833
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number
834
+ * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
835
+ *
836
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
837
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
838
+ * selected attribute
839
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
840
+ */
841
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( //
842
+ cublasLtMatrixTransformDesc_t transformDesc,
843
+ cublasLtMatrixTransformDescAttributes_t attr,
844
+ void* buf,
845
+ size_t sizeInBytes,
846
+ size_t* sizeWritten);
847
+
848
+ /** For computation with complex numbers, this enum allows to apply the Gauss Complexity reduction algorithm
849
+ */
850
+ typedef enum {
851
+ CUBLASLT_3M_MODE_DISALLOWED = 0,
852
+ CUBLASLT_3M_MODE_ALLOWED = 1,
853
+ } cublasLt3mMode_t;
854
+
855
+ /** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K").
856
+ */
857
+ typedef enum {
858
+ /** No reduction scheme, dot-product shall be performed in one sequence.
859
+ */
860
+ CUBLASLT_REDUCTION_SCHEME_NONE = 0,
861
+
862
+ /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to
863
+ * guarantee the sequentiality.
864
+ */
865
+ CUBLASLT_REDUCTION_SCHEME_INPLACE = 1,
866
+
867
+ /** Intermediate results are stored in compute type in the workspace and reduced in a separate step.
868
+ */
869
+ CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2,
870
+
871
+ /** Intermediate results are stored in output type in the workspace and reduced in a separate step.
872
+ */
873
+ CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4,
874
+
875
+ CUBLASLT_REDUCTION_SCHEME_MASK = 0x7,
876
+ } cublasLtReductionScheme_t;
877
+
878
+ /** Postprocessing options for the epilogue
879
+ */
880
+ typedef enum {
881
+ /** No special postprocessing, just scale and quantize results if necessary.
882
+ */
883
+ CUBLASLT_EPILOGUE_DEFAULT = 1,
884
+
885
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
886
+ */
887
+ CUBLASLT_EPILOGUE_RELU = 2,
888
+
889
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
890
+ *
891
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
892
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
893
+ */
894
+ CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128),
895
+
896
+ /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed
897
+ * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final
898
+ * postprocessing.
899
+ */
900
+ CUBLASLT_EPILOGUE_BIAS = 4,
901
+
902
+ /** ReLu and Bias, apply Bias and then ReLu transform
903
+ */
904
+ CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS),
905
+
906
+ /** ReLu and Bias, apply Bias and then ReLu transform
907
+ *
908
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
909
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
910
+ */
911
+ CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS),
912
+
913
+ /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix.
914
+ *
915
+ * This epilogue mode requires an extra input,
916
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
917
+ */
918
+ CUBLASLT_EPILOGUE_DRELU = 8 | 128,
919
+
920
+ /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to
921
+ * matmul output. Store ReLu gradient in the output matrix, and Bias gradient
922
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
923
+ *
924
+ * This epilogue mode requires an extra input,
925
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
926
+ */
927
+ CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16,
928
+
929
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
930
+ */
931
+ CUBLASLT_EPILOGUE_GELU = 32,
932
+
933
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
934
+ *
935
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
936
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
937
+ */
938
+ CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128),
939
+
940
+ /** GELU and Bias, apply Bias and then GELU transform
941
+ */
942
+ CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS),
943
+
944
+ /** GELU and Bias, apply Bias and then GELU transform
945
+ *
946
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
947
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
948
+ */
949
+ CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS),
950
+
951
+ /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix.
952
+ *
953
+ * This epilogue mode requires an extra input,
954
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
955
+ */
956
+ CUBLASLT_EPILOGUE_DGELU = 64 | 128,
957
+
958
+ /* GELU and Bias gradients. Apply independently GELU and Bias gradient to
959
+ * matmul output. Store GELU gradient in the output matrix, and Bias gradient
960
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
961
+ *
962
+ * This epilogue mode requires an extra input,
963
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
964
+ */
965
+ CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16,
966
+
967
+ /** Bias gradient based on the input matrix A.
968
+ *
969
+ * The bias size corresponds to the number of rows of the matrix D.
970
+ * The reduction happens over the GEMM's "k" dimension.
971
+ *
972
+ * Stores Bias gradient in the auxiliary output
973
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
974
+ */
975
+ CUBLASLT_EPILOGUE_BGRADA = 256,
976
+
977
+ /** Bias gradient based on the input matrix B.
978
+ *
979
+ * The bias size corresponds to the number of columns of the matrix D.
980
+ * The reduction happens over the GEMM's "k" dimension.
981
+ *
982
+ * Stores Bias gradient in the auxiliary output
983
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
984
+ */
985
+ CUBLASLT_EPILOGUE_BGRADB = 512,
986
+ } cublasLtEpilogue_t;
987
+
988
+ /** Matmul heuristic search mode
989
+ */
990
+ typedef enum {
991
+ /** ask heuristics for best algo for given usecase
992
+ */
993
+ CUBLASLT_SEARCH_BEST_FIT = 0,
994
+ /** only try to find best config for preconfigured algo id
995
+ */
996
+ CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1,
997
+ /** reserved for future use
998
+ */
999
+ CUBLASLT_SEARCH_RESERVED_02 = 2,
1000
+ /** reserved for future use
1001
+ */
1002
+ CUBLASLT_SEARCH_RESERVED_03 = 3,
1003
+ /** reserved for future use
1004
+ */
1005
+ CUBLASLT_SEARCH_RESERVED_04 = 4,
1006
+ /** reserved for future use
1007
+ */
1008
+ CUBLASLT_SEARCH_RESERVED_05 = 5,
1009
+ } cublasLtMatmulSearch_t;
1010
+
1011
+ /** Algo search preference to fine tune the heuristic function. */
1012
+ typedef enum {
1013
+ /** Search mode, see cublasLtMatmulSearch_t.
1014
+ *
1015
+ * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT
1016
+ */
1017
+ CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0,
1018
+
1019
+ /** Maximum allowed workspace size in bytes.
1020
+ *
1021
+ * uint64_t, default: 0 - no workspace allowed
1022
+ */
1023
+ CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1,
1024
+
1025
+ /** Math mode mask, see cublasMath_t.
1026
+ *
1027
+ * Only algorithms with CUBLASLT_ALGO_CAP_MATHMODE_IMPL that is not masked out by this attribute are allowed.
1028
+ *
1029
+ * uint32_t, default: 1 (allows both default and tensor op math)
1030
+ * DEPRECATED, will be removed in a future release, see cublasLtNumericalImplFlags_t for replacement
1031
+ */
1032
+ CUBLASLT_MATMUL_PREF_MATH_MODE_MASK = 2,
1033
+
1034
+ /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that
1035
+ * use one of the required modes.
1036
+ *
1037
+ * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes.
1038
+ *
1039
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes)
1040
+ */
1041
+ CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3,
1042
+
1043
+ /** Gaussian mode mask, see cublasLt3mMode_t.
1044
+ *
1045
+ * Only algorithms with CUBLASLT_ALGO_CAP_GAUSSIAN_IMPL that is not masked out by this attribute are allowed.
1046
+ *
1047
+ * uint32_t, default: CUBLASLT_3M_MODE_ALLOWED (allows both gaussian and non-gaussian algorithms)
1048
+ * DEPRECATED, will be removed in a future release, see cublasLtNumericalImplFlags_t for replacement
1049
+ */
1050
+ CUBLASLT_MATMUL_PREF_GAUSSIAN_MODE_MASK = 4,
1051
+
1052
+ /** Minimum buffer alignment for matrix A (in bytes).
1053
+ *
1054
+ * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned
1055
+ * as they need.
1056
+ *
1057
+ * uint32_t, default: 256
1058
+ */
1059
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5,
1060
+
1061
+ /** Minimum buffer alignment for matrix B (in bytes).
1062
+ *
1063
+ * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned
1064
+ * as they need.
1065
+ *
1066
+ * uint32_t, default: 256
1067
+ */
1068
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6,
1069
+
1070
+ /** Minimum buffer alignment for matrix C (in bytes).
1071
+ *
1072
+ * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned
1073
+ * as they need.
1074
+ *
1075
+ * uint32_t, default: 256
1076
+ */
1077
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7,
1078
+
1079
+ /** Minimum buffer alignment for matrix D (in bytes).
1080
+ *
1081
+ * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned
1082
+ * as they need.
1083
+ *
1084
+ * uint32_t, default: 256
1085
+ */
1086
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8,
1087
+
1088
+ /** Maximum wave count.
1089
+ *
1090
+ * See cublasLtMatmulHeuristicResult_t::wavesCount.
1091
+ *
1092
+ * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified.
1093
+ *
1094
+ * float, default: 0.0f
1095
+ */
1096
+ CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9,
1097
+
1098
+ /** Pointer mode mask, see cublasLtPointerModeMask_t. Filters heuristic result to only include algorithms that support
1099
+ * all required modes.
1100
+ *
1101
+ * uint32_t, default: (CUBLASLT_POINTER_MODE_MASK_HOST | CUBLASLT_POINTER_MODE_MASK_DEVICE) (only allows algorithms
1102
+ * that support both regular host and device pointers)
1103
+ */
1104
+ CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK = 10,
1105
+
1106
+ /** Epilogue selector mask, see cublasLtEpilogue_t. Filters heuristic result to only include algorithms that support
1107
+ * all required operations.
1108
+ *
1109
+ * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT (only allows algorithms that support default epilogue)
1110
+ */
1111
+ CUBLASLT_MATMUL_PREF_EPILOGUE_MASK = 11,
1112
+
1113
+ /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include
1114
+ * algorithms that use the allowed implementations.
1115
+ *
1116
+ * uint64_t, default: uint64_t(-1) (allow everything)
1117
+ */
1118
+ CUBLASLT_MATMUL_PREF_IMPL_MASK = 12,
1119
+
1120
+ /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs
1121
+ * when user expects a concurrent stream to be using some of the device resources.
1122
+ *
1123
+ * Overrides the SM count target set in the matrix multiplication descriptor (see cublasLtMatmulDescAttributes_t).
1124
+ *
1125
+ * int32_t, default: 0 - use the number reported by the device.
1126
+ * DEPRECATED, will be removed in a future release, see cublasLtMatmulDescAttributes_t for replacement
1127
+ */
1128
+ CUBLASLT_MATMUL_PREF_SM_COUNT_TARGET = 13,
1129
+ } cublasLtMatmulPreferenceAttributes_t;
1130
+
1131
+ /** Internal. Do not use directly.
1132
+ */
1133
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size);
1134
+
1135
+ /** Initialize matmul heuristic search preference descriptor in pre-allocated space.
1136
+ *
1137
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1138
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1139
+ */
1140
+ static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) {
1141
+ return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref));
1142
+ }
1143
+
1144
+ /** Create new matmul heuristic search preference descriptor.
1145
+ *
1146
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1147
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1148
+ */
1149
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref);
1150
+
1151
+ /** Destroy matmul heuristic search preference descriptor.
1152
+ *
1153
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1154
+ */
1155
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref);
1156
+
1157
+ /** Set matmul heuristic search preference descriptor attribute.
1158
+ *
1159
+ * \param[in] pref The descriptor
1160
+ * \param[in] attr The attribute
1161
+ * \param[in] buf memory address containing the new value
1162
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1163
+ *
1164
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1165
+ * selected attribute
1166
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1167
+ */
1168
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( //
1169
+ cublasLtMatmulPreference_t pref,
1170
+ cublasLtMatmulPreferenceAttributes_t attr,
1171
+ const void* buf,
1172
+ size_t sizeInBytes);
1173
+
1174
+ /** Get matmul heuristic search preference descriptor attribute.
1175
+ *
1176
+ * \param[in] pref The descriptor
1177
+ * \param[in] attr The attribute
1178
+ * \param[out] buf memory address containing the new value
1179
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1180
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1181
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1182
+ *
1183
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1184
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1185
+ * selected attribute
1186
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1187
+ */
1188
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( //
1189
+ cublasLtMatmulPreference_t pref,
1190
+ cublasLtMatmulPreferenceAttributes_t attr,
1191
+ void* buf,
1192
+ size_t sizeInBytes,
1193
+ size_t* sizeWritten);
1194
+
1195
+ /** Results structure used by cublasLtMatmulGetAlgo.
1196
+ *
1197
+ * Holds returned configured algo descriptor and its runtime properties.
1198
+ */
1199
+ typedef struct {
1200
+ /** Matmul algorithm descriptor.
1201
+ *
1202
+ * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to
1203
+ * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID
1204
+ */
1205
+ cublasLtMatmulAlgo_t algo;
1206
+
1207
+ /** Actual size of workspace memory required.
1208
+ */
1209
+ size_t workspaceSize;
1210
+
1211
+ /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to
1212
+ * CUBLAS_STATUS_SUCCESS.
1213
+ */
1214
+ cublasStatus_t state;
1215
+
1216
+ /** Waves count - a device utilization metric.
1217
+ *
1218
+ * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU.
1219
+ */
1220
+ float wavesCount;
1221
+
1222
+ int reserved[4];
1223
+ } cublasLtMatmulHeuristicResult_t;
1224
+
1225
+ /** Query cublasLt heuristic for algorithm appropriate for given use case.
1226
+ *
1227
+ * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt
1228
+ * context. See cublasLtHandle_t.
1229
+ * \param[in] operationDesc Handle to the matrix multiplication descriptor.
1230
+ * \param[in] Adesc Handle to the layout descriptors for matrix A.
1231
+ * \param[in] Bdesc Handle to the layout descriptors for matrix B.
1232
+ * \param[in] Cdesc Handle to the layout descriptors for matrix C.
1233
+ * \param[in] Ddesc Handle to the layout descriptors for matrix D.
1234
+ * \param[in] preference Pointer to the structure holding the heuristic search
1235
+ * preferences descriptor. See cublasLtMatrixLayout_t.
1236
+ * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested
1237
+ * maximum number of algorithms to return.
1238
+ * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics,
1239
+ * ordered in increasing estimated compute time.
1240
+ * \param[out] returnAlgoCount The number of heuristicResultsArray elements written.
1241
+ *
1242
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1243
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration
1244
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect
1245
+ * heuristicResultsArray[0 to (returnAlgoCount - 1)].state
1246
+ * for detail status of results
1247
+ */
1248
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle,
1249
+ cublasLtMatmulDesc_t operationDesc,
1250
+ cublasLtMatrixLayout_t Adesc,
1251
+ cublasLtMatrixLayout_t Bdesc,
1252
+ cublasLtMatrixLayout_t Cdesc,
1253
+ cublasLtMatrixLayout_t Ddesc,
1254
+ cublasLtMatmulPreference_t preference,
1255
+ int requestedAlgoCount,
1256
+ cublasLtMatmulHeuristicResult_t heuristicResultsArray[],
1257
+ int* returnAlgoCount);
1258
+
1259
+ /* ---------------------------------------------------------------------------------------*/
1260
+ /* Lower level API to be able to implement own Heuristic and Find routines */
1261
+ /* ---------------------------------------------------------------------------------------*/
1262
+
1263
+ /** Routine to get all algo IDs that can potentially run
1264
+ *
1265
+ * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA
1266
+ * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds
1267
+ * actually written
1268
+ *
1269
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1270
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs
1271
+ * available
1272
+ */
1273
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle,
1274
+ cublasComputeType_t computeType,
1275
+ cudaDataType_t scaleType,
1276
+ cudaDataType_t Atype,
1277
+ cudaDataType_t Btype,
1278
+ cudaDataType_t Ctype,
1279
+ cudaDataType_t Dtype,
1280
+ int requestedAlgoCount,
1281
+ int algoIdsArray[],
1282
+ int* returnAlgoCount);
1283
+
1284
+ /** Initialize algo structure
1285
+ *
1286
+ * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range
1287
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types
1288
+ * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized
1289
+ */
1290
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle,
1291
+ cublasComputeType_t computeType,
1292
+ cudaDataType_t scaleType,
1293
+ cudaDataType_t Atype,
1294
+ cudaDataType_t Btype,
1295
+ cudaDataType_t Ctype,
1296
+ cudaDataType_t Dtype,
1297
+ int algoId,
1298
+ cublasLtMatmulAlgo_t* algo);
1299
+
1300
+ /** Check configured algo descriptor for correctness and support on current device.
1301
+ *
1302
+ * Result includes required workspace size and calculated wave count.
1303
+ *
1304
+ * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned);
1305
+ * but if cublasLtMatmulAlgoCheck fails, the algo will not run.
1306
+ *
1307
+ * \param[in] algo algo configuration to check
1308
+ * \param[out] result result structure to report algo runtime characteristics; algo field is never updated
1309
+ *
1310
+ * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo
1311
+ * descriptor
1312
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on
1313
+ * given device
1314
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device
1315
+ * \retval CUBLAS_STATUS_SUCCESS if check was successful
1316
+ */
1317
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( //
1318
+ cublasLtHandle_t lightHandle,
1319
+ cublasLtMatmulDesc_t operationDesc,
1320
+ cublasLtMatrixLayout_t Adesc,
1321
+ cublasLtMatrixLayout_t Bdesc,
1322
+ cublasLtMatrixLayout_t Cdesc,
1323
+ cublasLtMatrixLayout_t Ddesc,
1324
+ const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo
1325
+ cublasLtMatmulHeuristicResult_t* result);
1326
+
1327
+ /** Capabilities Attributes that can be retrieved from an initialized Algo structure
1328
+ */
1329
+ typedef enum {
1330
+ /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM
1331
+ *
1332
+ * int32_t, 0 means no support, supported otherwise
1333
+ */
1334
+ CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0,
1335
+ /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is
1336
+ * not masked out it is supported.
1337
+ *
1338
+ * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) ==
1339
+ * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0;
1340
+ *
1341
+ * uint32_t
1342
+ */
1343
+ CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1,
1344
+ /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING
1345
+ *
1346
+ * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved
1347
+ */
1348
+ CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2,
1349
+ /** support strided batch
1350
+ *
1351
+ * int32_t, 0 means no support, supported otherwise
1352
+ */
1353
+ CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3,
1354
+ /** support results out of place (D != C in D = alpha.A.B + beta.C)
1355
+ *
1356
+ * int32_t, 0 means no support, supported otherwise
1357
+ */
1358
+ CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4,
1359
+ /** syrk/herk support (on top of regular gemm)
1360
+ *
1361
+ * int32_t, 0 means no support, supported otherwise
1362
+ */
1363
+ CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5,
1364
+ /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use
1365
+ * CUBLASLT_MATMUL_TILE_UNDEFINED
1366
+ *
1367
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1368
+ *
1369
+ * array of uint32_t
1370
+ */
1371
+ CUBLASLT_ALGO_CAP_TILE_IDS = 6,
1372
+ /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see
1373
+ * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION
1374
+ *
1375
+ * int32_t
1376
+ */
1377
+ CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7,
1378
+ /** whether algorithm is using regular compute or tensor operations
1379
+ *
1380
+ * int32_t 0 means regular compute, 1 means tensor operations;
1381
+ * DEPRECATED
1382
+ */
1383
+ CUBLASLT_ALGO_CAP_MATHMODE_IMPL = 8,
1384
+ /** whether algorithm implements gaussian optimization of complex matrix multiplication, see cublasMath_t
1385
+ *
1386
+ * int32_t 0 means regular compute, 1 means gaussian;
1387
+ * DEPRECATED
1388
+ */
1389
+ CUBLASLT_ALGO_CAP_GAUSSIAN_IMPL = 9,
1390
+ /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t
1391
+ *
1392
+ * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different
1393
+ * requirements;
1394
+ */
1395
+ CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10,
1396
+
1397
+ /** bitmask enumerating pointer modes algorithm supports
1398
+ *
1399
+ * uint32_t, see cublasLtPointerModeMask_t
1400
+ */
1401
+ CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11,
1402
+
1403
+ /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue
1404
+ *
1405
+ * uint32_t, see cublasLtEpilogue_t
1406
+ */
1407
+ CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12,
1408
+ /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use
1409
+ * CUBLASLT_MATMUL_STAGES_UNDEFINED
1410
+ *
1411
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1412
+ *
1413
+ * array of uint32_t
1414
+ */
1415
+ CUBLASLT_ALGO_CAP_STAGES_IDS = 13,
1416
+ /** support for nagative ld for all of the matrices
1417
+ *
1418
+ * int32_t 0 means no support, supported otherwise
1419
+ */
1420
+ CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14,
1421
+ /** details about algorithm's implementation that affect it's numerical behavior
1422
+ *
1423
+ * uint64_t, see cublasLtNumericalImplFlags_t
1424
+ */
1425
+ CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15,
1426
+ /** minimum alignment required for A matrix in bytes
1427
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1428
+ *
1429
+ * uint32_t
1430
+ */
1431
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16,
1432
+ /** minimum alignment required for B matrix in bytes
1433
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1434
+ *
1435
+ * uint32_t
1436
+ */
1437
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17,
1438
+ /** minimum alignment required for C matrix in bytes
1439
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1440
+ *
1441
+ * uint32_t
1442
+ */
1443
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18,
1444
+ /** minimum alignment required for D matrix in bytes
1445
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1446
+ *
1447
+ * uint32_t
1448
+ */
1449
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19,
1450
+ } cublasLtMatmulAlgoCapAttributes_t;
1451
+
1452
+ /** Get algo capability attribute.
1453
+ *
1454
+ * E.g. to get list of supported Tile IDs:
1455
+ * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END];
1456
+ * size_t num_tiles, size_written;
1457
+ * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) ==
1458
+ * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]);
1459
+ * }
1460
+ *
1461
+ * \param[in] algo The algo descriptor
1462
+ * \param[in] attr The attribute
1463
+ * \param[out] buf memory address containing the new value
1464
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1465
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1466
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1467
+ *
1468
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1469
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1470
+ * selected attribute
1471
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1472
+ */
1473
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo,
1474
+ cublasLtMatmulAlgoCapAttributes_t attr,
1475
+ void* buf,
1476
+ size_t sizeInBytes,
1477
+ size_t* sizeWritten);
1478
+
1479
+ /** Algo Configuration Attributes that can be set according to the Algo capabilities
1480
+ */
1481
+ typedef enum {
1482
+ /** algorithm index, see cublasLtMatmulAlgoGetIds()
1483
+ *
1484
+ * readonly, set by cublasLtMatmulAlgoInit()
1485
+ * int32_t
1486
+ */
1487
+ CUBLASLT_ALGO_CONFIG_ID = 0,
1488
+ /** tile id, see cublasLtMatmulTile_t
1489
+ *
1490
+ * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED
1491
+ */
1492
+ CUBLASLT_ALGO_CONFIG_TILE_ID = 1,
1493
+ /** number of K splits, if != 1, SPLITK_NUM parts of matrix multiplication will be computed in parallel,
1494
+ * and then results accumulated according to REDUCTION_SCHEME
1495
+ *
1496
+ * uint32_t, default: 1
1497
+ */
1498
+ CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2,
1499
+ /** reduction scheme, see cublasLtReductionScheme_t
1500
+ *
1501
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE
1502
+ */
1503
+ CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3,
1504
+ /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices
1505
+ *
1506
+ * possible values: 0, 1, other values reserved
1507
+ *
1508
+ * uint32_t, default: 0
1509
+ */
1510
+ CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4,
1511
+ /** custom option, each algorithm can support some custom options that don't fit description of the other config
1512
+ * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case
1513
+ *
1514
+ * uint32_t, default: 0
1515
+ */
1516
+ CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5,
1517
+ /** stages id, see cublasLtMatmulStages_t
1518
+ *
1519
+ * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED
1520
+ */
1521
+ CUBLASLT_ALGO_CONFIG_STAGES_ID = 6,
1522
+ } cublasLtMatmulAlgoConfigAttributes_t;
1523
+
1524
+ /** Set algo configuration attribute.
1525
+ *
1526
+ * \param[in] algo The algo descriptor
1527
+ * \param[in] attr The attribute
1528
+ * \param[in] buf memory address containing the new value
1529
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1530
+ *
1531
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1532
+ * selected attribute
1533
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1534
+ */
1535
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo,
1536
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1537
+ const void* buf,
1538
+ size_t sizeInBytes);
1539
+
1540
+ /** Get algo configuration attribute.
1541
+ *
1542
+ * \param[in] algo The algo descriptor
1543
+ * \param[in] attr The attribute
1544
+ * \param[out] buf memory address containing the new value
1545
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1546
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1547
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1548
+ *
1549
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1550
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1551
+ * selected attribute
1552
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1553
+ */
1554
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo,
1555
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1556
+ void* buf,
1557
+ size_t sizeInBytes,
1558
+ size_t* sizeWritten);
1559
+
1560
+ /** Experimental: Logger callback type.
1561
+ */
1562
+ typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message);
1563
+
1564
+ /** Experimental: Logger callback setter.
1565
+ *
1566
+ * \param[in] callback a user defined callback function to be called by the logger
1567
+ *
1568
+ * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully
1569
+ */
1570
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback);
1571
+
1572
+ /** Experimental: Log file setter.
1573
+ *
1574
+ * \param[in] file an open file with write permissions
1575
+ *
1576
+ * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully
1577
+ */
1578
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file);
1579
+
1580
+ /** Experimental: Open log file.
1581
+ *
1582
+ * \param[in] logFile log file path. if the log file does not exist, it will be created
1583
+ *
1584
+ * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully
1585
+ */
1586
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile);
1587
+
1588
+ /** Experimental: Log level setter.
1589
+ *
1590
+ * \param[in] level log level, should be one of the following:
1591
+ * 0. Off
1592
+ * 1. Errors
1593
+ * 2. Performance Trace
1594
+ * 3. Performance Hints
1595
+ * 4. Heuristics Trace
1596
+ * 5. API Trace
1597
+ *
1598
+ * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels
1599
+ *
1600
+ * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully
1601
+ */
1602
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level);
1603
+
1604
+ /** Experimental: Log mask setter.
1605
+ *
1606
+ * \param[in] mask log mask, should be a combination of the following masks:
1607
+ * 0. Off
1608
+ * 1. Errors
1609
+ * 2. Performance Trace
1610
+ * 4. Performance Hints
1611
+ * 8. Heuristics Trace
1612
+ * 16. API Trace
1613
+ *
1614
+ * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully
1615
+ */
1616
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask);
1617
+
1618
+ /** Experimental: Disable logging for the entire session.
1619
+ *
1620
+ * \retval CUBLAS_STATUS_SUCCESS if disabled logging
1621
+ */
1622
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable();
1623
+
1624
+ #if defined(__cplusplus)
1625
+ }
1626
+ #endif /* __cplusplus */
mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library
51
+
52
+ */
53
+
54
+ #if !defined(CUBLAS_XT_H_)
55
+ #define CUBLAS_XT_H_
56
+
57
+ #include "driver_types.h"
58
+ #include "cuComplex.h" /* import complex data type */
59
+
60
+ #include "cublas_v2.h"
61
+
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif /* __cplusplus */
65
+
66
+ struct cublasXtContext;
67
+ typedef struct cublasXtContext* cublasXtHandle_t;
68
+
69
+ cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle);
70
+ cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle);
71
+ cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards);
72
+ cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards);
73
+ /* This routine selects the Gpus that the user want to use for CUBLAS-XT */
74
+ cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]);
75
+
76
+ /* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */
77
+ cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim);
78
+ cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim);
79
+
80
+ typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t;
81
+ /* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed
82
+ are not pinned : Pinning/Unpinning the Host memory is still a costly operation
83
+ It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary)
84
+ */
85
+ cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode);
86
+ cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode);
87
+
88
+ /* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */
89
+ typedef enum {
90
+ CUBLASXT_FLOAT = 0,
91
+ CUBLASXT_DOUBLE = 1,
92
+ CUBLASXT_COMPLEX = 2,
93
+ CUBLASXT_DOUBLECOMPLEX = 3,
94
+ } cublasXtOpType_t;
95
+
96
+ typedef enum {
97
+ CUBLASXT_GEMM = 0,
98
+ CUBLASXT_SYRK = 1,
99
+ CUBLASXT_HERK = 2,
100
+ CUBLASXT_SYMM = 3,
101
+ CUBLASXT_HEMM = 4,
102
+ CUBLASXT_TRSM = 5,
103
+ CUBLASXT_SYR2K = 6,
104
+ CUBLASXT_HER2K = 7,
105
+
106
+ CUBLASXT_SPMM = 8,
107
+ CUBLASXT_SYRKX = 9,
108
+ CUBLASXT_HERKX = 10,
109
+ CUBLASXT_TRMM = 11,
110
+ CUBLASXT_ROUTINE_MAX = 12,
111
+ } cublasXtBlasOp_t;
112
+
113
+ /* Currently only 32-bit integer BLAS routines are supported */
114
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle,
115
+ cublasXtBlasOp_t blasOp,
116
+ cublasXtOpType_t type,
117
+ void* blasFunctor);
118
+
119
+ /* Specified the percentage of work that should done by the CPU, default is 0 (no work) */
120
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle,
121
+ cublasXtBlasOp_t blasOp,
122
+ cublasXtOpType_t type,
123
+ float ratio);
124
+
125
+ /* GEMM */
126
+ cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle,
127
+ cublasOperation_t transa,
128
+ cublasOperation_t transb,
129
+ size_t m,
130
+ size_t n,
131
+ size_t k,
132
+ const float* alpha,
133
+ const float* A,
134
+ size_t lda,
135
+ const float* B,
136
+ size_t ldb,
137
+ const float* beta,
138
+ float* C,
139
+ size_t ldc);
140
+
141
+ cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle,
142
+ cublasOperation_t transa,
143
+ cublasOperation_t transb,
144
+ size_t m,
145
+ size_t n,
146
+ size_t k,
147
+ const double* alpha,
148
+ const double* A,
149
+ size_t lda,
150
+ const double* B,
151
+ size_t ldb,
152
+ const double* beta,
153
+ double* C,
154
+ size_t ldc);
155
+
156
+ cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle,
157
+ cublasOperation_t transa,
158
+ cublasOperation_t transb,
159
+ size_t m,
160
+ size_t n,
161
+ size_t k,
162
+ const cuComplex* alpha,
163
+ const cuComplex* A,
164
+ size_t lda,
165
+ const cuComplex* B,
166
+ size_t ldb,
167
+ const cuComplex* beta,
168
+ cuComplex* C,
169
+ size_t ldc);
170
+
171
+ cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle,
172
+ cublasOperation_t transa,
173
+ cublasOperation_t transb,
174
+ size_t m,
175
+ size_t n,
176
+ size_t k,
177
+ const cuDoubleComplex* alpha,
178
+ const cuDoubleComplex* A,
179
+ size_t lda,
180
+ const cuDoubleComplex* B,
181
+ size_t ldb,
182
+ const cuDoubleComplex* beta,
183
+ cuDoubleComplex* C,
184
+ size_t ldc);
185
+ /* ------------------------------------------------------- */
186
+ /* SYRK */
187
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle,
188
+ cublasFillMode_t uplo,
189
+ cublasOperation_t trans,
190
+ size_t n,
191
+ size_t k,
192
+ const float* alpha,
193
+ const float* A,
194
+ size_t lda,
195
+ const float* beta,
196
+ float* C,
197
+ size_t ldc);
198
+
199
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle,
200
+ cublasFillMode_t uplo,
201
+ cublasOperation_t trans,
202
+ size_t n,
203
+ size_t k,
204
+ const double* alpha,
205
+ const double* A,
206
+ size_t lda,
207
+ const double* beta,
208
+ double* C,
209
+ size_t ldc);
210
+
211
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle,
212
+ cublasFillMode_t uplo,
213
+ cublasOperation_t trans,
214
+ size_t n,
215
+ size_t k,
216
+ const cuComplex* alpha,
217
+ const cuComplex* A,
218
+ size_t lda,
219
+ const cuComplex* beta,
220
+ cuComplex* C,
221
+ size_t ldc);
222
+
223
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle,
224
+ cublasFillMode_t uplo,
225
+ cublasOperation_t trans,
226
+ size_t n,
227
+ size_t k,
228
+ const cuDoubleComplex* alpha,
229
+ const cuDoubleComplex* A,
230
+ size_t lda,
231
+ const cuDoubleComplex* beta,
232
+ cuDoubleComplex* C,
233
+ size_t ldc);
234
+ /* -------------------------------------------------------------------- */
235
+ /* HERK */
236
+ cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle,
237
+ cublasFillMode_t uplo,
238
+ cublasOperation_t trans,
239
+ size_t n,
240
+ size_t k,
241
+ const float* alpha,
242
+ const cuComplex* A,
243
+ size_t lda,
244
+ const float* beta,
245
+ cuComplex* C,
246
+ size_t ldc);
247
+
248
+ cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle,
249
+ cublasFillMode_t uplo,
250
+ cublasOperation_t trans,
251
+ size_t n,
252
+ size_t k,
253
+ const double* alpha,
254
+ const cuDoubleComplex* A,
255
+ size_t lda,
256
+ const double* beta,
257
+ cuDoubleComplex* C,
258
+ size_t ldc);
259
+ /* -------------------------------------------------------------------- */
260
+ /* SYR2K */
261
+ cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle,
262
+ cublasFillMode_t uplo,
263
+ cublasOperation_t trans,
264
+ size_t n,
265
+ size_t k,
266
+ const float* alpha,
267
+ const float* A,
268
+ size_t lda,
269
+ const float* B,
270
+ size_t ldb,
271
+ const float* beta,
272
+ float* C,
273
+ size_t ldc);
274
+
275
+ cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle,
276
+ cublasFillMode_t uplo,
277
+ cublasOperation_t trans,
278
+ size_t n,
279
+ size_t k,
280
+ const double* alpha,
281
+ const double* A,
282
+ size_t lda,
283
+ const double* B,
284
+ size_t ldb,
285
+ const double* beta,
286
+ double* C,
287
+ size_t ldc);
288
+
289
+ cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle,
290
+ cublasFillMode_t uplo,
291
+ cublasOperation_t trans,
292
+ size_t n,
293
+ size_t k,
294
+ const cuComplex* alpha,
295
+ const cuComplex* A,
296
+ size_t lda,
297
+ const cuComplex* B,
298
+ size_t ldb,
299
+ const cuComplex* beta,
300
+ cuComplex* C,
301
+ size_t ldc);
302
+
303
+ cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle,
304
+ cublasFillMode_t uplo,
305
+ cublasOperation_t trans,
306
+ size_t n,
307
+ size_t k,
308
+ const cuDoubleComplex* alpha,
309
+ const cuDoubleComplex* A,
310
+ size_t lda,
311
+ const cuDoubleComplex* B,
312
+ size_t ldb,
313
+ const cuDoubleComplex* beta,
314
+ cuDoubleComplex* C,
315
+ size_t ldc);
316
+ /* -------------------------------------------------------------------- */
317
+ /* HERKX : variant extension of HERK */
318
+ cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle,
319
+ cublasFillMode_t uplo,
320
+ cublasOperation_t trans,
321
+ size_t n,
322
+ size_t k,
323
+ const cuComplex* alpha,
324
+ const cuComplex* A,
325
+ size_t lda,
326
+ const cuComplex* B,
327
+ size_t ldb,
328
+ const float* beta,
329
+ cuComplex* C,
330
+ size_t ldc);
331
+
332
+ cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle,
333
+ cublasFillMode_t uplo,
334
+ cublasOperation_t trans,
335
+ size_t n,
336
+ size_t k,
337
+ const cuDoubleComplex* alpha,
338
+ const cuDoubleComplex* A,
339
+ size_t lda,
340
+ const cuDoubleComplex* B,
341
+ size_t ldb,
342
+ const double* beta,
343
+ cuDoubleComplex* C,
344
+ size_t ldc);
345
+
346
+ /* -------------------------------------------------------------------- */
347
+ /* TRSM */
348
+ cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle,
349
+ cublasSideMode_t side,
350
+ cublasFillMode_t uplo,
351
+ cublasOperation_t trans,
352
+ cublasDiagType_t diag,
353
+ size_t m,
354
+ size_t n,
355
+ const float* alpha,
356
+ const float* A,
357
+ size_t lda,
358
+ float* B,
359
+ size_t ldb);
360
+
361
+ cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle,
362
+ cublasSideMode_t side,
363
+ cublasFillMode_t uplo,
364
+ cublasOperation_t trans,
365
+ cublasDiagType_t diag,
366
+ size_t m,
367
+ size_t n,
368
+ const double* alpha,
369
+ const double* A,
370
+ size_t lda,
371
+ double* B,
372
+ size_t ldb);
373
+
374
+ cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle,
375
+ cublasSideMode_t side,
376
+ cublasFillMode_t uplo,
377
+ cublasOperation_t trans,
378
+ cublasDiagType_t diag,
379
+ size_t m,
380
+ size_t n,
381
+ const cuComplex* alpha,
382
+ const cuComplex* A,
383
+ size_t lda,
384
+ cuComplex* B,
385
+ size_t ldb);
386
+
387
+ cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle,
388
+ cublasSideMode_t side,
389
+ cublasFillMode_t uplo,
390
+ cublasOperation_t trans,
391
+ cublasDiagType_t diag,
392
+ size_t m,
393
+ size_t n,
394
+ const cuDoubleComplex* alpha,
395
+ const cuDoubleComplex* A,
396
+ size_t lda,
397
+ cuDoubleComplex* B,
398
+ size_t ldb);
399
+ /* -------------------------------------------------------------------- */
400
+ /* SYMM : Symmetric Multiply Matrix*/
401
+ cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle,
402
+ cublasSideMode_t side,
403
+ cublasFillMode_t uplo,
404
+ size_t m,
405
+ size_t n,
406
+ const float* alpha,
407
+ const float* A,
408
+ size_t lda,
409
+ const float* B,
410
+ size_t ldb,
411
+ const float* beta,
412
+ float* C,
413
+ size_t ldc);
414
+
415
+ cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle,
416
+ cublasSideMode_t side,
417
+ cublasFillMode_t uplo,
418
+ size_t m,
419
+ size_t n,
420
+ const double* alpha,
421
+ const double* A,
422
+ size_t lda,
423
+ const double* B,
424
+ size_t ldb,
425
+ const double* beta,
426
+ double* C,
427
+ size_t ldc);
428
+
429
+ cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle,
430
+ cublasSideMode_t side,
431
+ cublasFillMode_t uplo,
432
+ size_t m,
433
+ size_t n,
434
+ const cuComplex* alpha,
435
+ const cuComplex* A,
436
+ size_t lda,
437
+ const cuComplex* B,
438
+ size_t ldb,
439
+ const cuComplex* beta,
440
+ cuComplex* C,
441
+ size_t ldc);
442
+
443
+ cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle,
444
+ cublasSideMode_t side,
445
+ cublasFillMode_t uplo,
446
+ size_t m,
447
+ size_t n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* A,
450
+ size_t lda,
451
+ const cuDoubleComplex* B,
452
+ size_t ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* C,
455
+ size_t ldc);
456
+ /* -------------------------------------------------------------------- */
457
+ /* HEMM : Hermitian Matrix Multiply */
458
+ cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle,
459
+ cublasSideMode_t side,
460
+ cublasFillMode_t uplo,
461
+ size_t m,
462
+ size_t n,
463
+ const cuComplex* alpha,
464
+ const cuComplex* A,
465
+ size_t lda,
466
+ const cuComplex* B,
467
+ size_t ldb,
468
+ const cuComplex* beta,
469
+ cuComplex* C,
470
+ size_t ldc);
471
+
472
+ cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle,
473
+ cublasSideMode_t side,
474
+ cublasFillMode_t uplo,
475
+ size_t m,
476
+ size_t n,
477
+ const cuDoubleComplex* alpha,
478
+ const cuDoubleComplex* A,
479
+ size_t lda,
480
+ const cuDoubleComplex* B,
481
+ size_t ldb,
482
+ const cuDoubleComplex* beta,
483
+ cuDoubleComplex* C,
484
+ size_t ldc);
485
+
486
+ /* -------------------------------------------------------------------- */
487
+ /* SYRKX : variant extension of SYRK */
488
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle,
489
+ cublasFillMode_t uplo,
490
+ cublasOperation_t trans,
491
+ size_t n,
492
+ size_t k,
493
+ const float* alpha,
494
+ const float* A,
495
+ size_t lda,
496
+ const float* B,
497
+ size_t ldb,
498
+ const float* beta,
499
+ float* C,
500
+ size_t ldc);
501
+
502
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle,
503
+ cublasFillMode_t uplo,
504
+ cublasOperation_t trans,
505
+ size_t n,
506
+ size_t k,
507
+ const double* alpha,
508
+ const double* A,
509
+ size_t lda,
510
+ const double* B,
511
+ size_t ldb,
512
+ const double* beta,
513
+ double* C,
514
+ size_t ldc);
515
+
516
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle,
517
+ cublasFillMode_t uplo,
518
+ cublasOperation_t trans,
519
+ size_t n,
520
+ size_t k,
521
+ const cuComplex* alpha,
522
+ const cuComplex* A,
523
+ size_t lda,
524
+ const cuComplex* B,
525
+ size_t ldb,
526
+ const cuComplex* beta,
527
+ cuComplex* C,
528
+ size_t ldc);
529
+
530
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle,
531
+ cublasFillMode_t uplo,
532
+ cublasOperation_t trans,
533
+ size_t n,
534
+ size_t k,
535
+ const cuDoubleComplex* alpha,
536
+ const cuDoubleComplex* A,
537
+ size_t lda,
538
+ const cuDoubleComplex* B,
539
+ size_t ldb,
540
+ const cuDoubleComplex* beta,
541
+ cuDoubleComplex* C,
542
+ size_t ldc);
543
+ /* -------------------------------------------------------------------- */
544
+ /* HER2K : variant extension of HERK */
545
+ cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle,
546
+ cublasFillMode_t uplo,
547
+ cublasOperation_t trans,
548
+ size_t n,
549
+ size_t k,
550
+ const cuComplex* alpha,
551
+ const cuComplex* A,
552
+ size_t lda,
553
+ const cuComplex* B,
554
+ size_t ldb,
555
+ const float* beta,
556
+ cuComplex* C,
557
+ size_t ldc);
558
+
559
+ cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle,
560
+ cublasFillMode_t uplo,
561
+ cublasOperation_t trans,
562
+ size_t n,
563
+ size_t k,
564
+ const cuDoubleComplex* alpha,
565
+ const cuDoubleComplex* A,
566
+ size_t lda,
567
+ const cuDoubleComplex* B,
568
+ size_t ldb,
569
+ const double* beta,
570
+ cuDoubleComplex* C,
571
+ size_t ldc);
572
+
573
+ /* -------------------------------------------------------------------- */
574
+ /* SPMM : Symmetric Packed Multiply Matrix*/
575
+ cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle,
576
+ cublasSideMode_t side,
577
+ cublasFillMode_t uplo,
578
+ size_t m,
579
+ size_t n,
580
+ const float* alpha,
581
+ const float* AP,
582
+ const float* B,
583
+ size_t ldb,
584
+ const float* beta,
585
+ float* C,
586
+ size_t ldc);
587
+
588
+ cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle,
589
+ cublasSideMode_t side,
590
+ cublasFillMode_t uplo,
591
+ size_t m,
592
+ size_t n,
593
+ const double* alpha,
594
+ const double* AP,
595
+ const double* B,
596
+ size_t ldb,
597
+ const double* beta,
598
+ double* C,
599
+ size_t ldc);
600
+
601
+ cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle,
602
+ cublasSideMode_t side,
603
+ cublasFillMode_t uplo,
604
+ size_t m,
605
+ size_t n,
606
+ const cuComplex* alpha,
607
+ const cuComplex* AP,
608
+ const cuComplex* B,
609
+ size_t ldb,
610
+ const cuComplex* beta,
611
+ cuComplex* C,
612
+ size_t ldc);
613
+
614
+ cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle,
615
+ cublasSideMode_t side,
616
+ cublasFillMode_t uplo,
617
+ size_t m,
618
+ size_t n,
619
+ const cuDoubleComplex* alpha,
620
+ const cuDoubleComplex* AP,
621
+ const cuDoubleComplex* B,
622
+ size_t ldb,
623
+ const cuDoubleComplex* beta,
624
+ cuDoubleComplex* C,
625
+ size_t ldc);
626
+
627
+ /* -------------------------------------------------------------------- */
628
+ /* TRMM */
629
+ cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle,
630
+ cublasSideMode_t side,
631
+ cublasFillMode_t uplo,
632
+ cublasOperation_t trans,
633
+ cublasDiagType_t diag,
634
+ size_t m,
635
+ size_t n,
636
+ const float* alpha,
637
+ const float* A,
638
+ size_t lda,
639
+ const float* B,
640
+ size_t ldb,
641
+ float* C,
642
+ size_t ldc);
643
+
644
+ cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle,
645
+ cublasSideMode_t side,
646
+ cublasFillMode_t uplo,
647
+ cublasOperation_t trans,
648
+ cublasDiagType_t diag,
649
+ size_t m,
650
+ size_t n,
651
+ const double* alpha,
652
+ const double* A,
653
+ size_t lda,
654
+ const double* B,
655
+ size_t ldb,
656
+ double* C,
657
+ size_t ldc);
658
+
659
+ cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle,
660
+ cublasSideMode_t side,
661
+ cublasFillMode_t uplo,
662
+ cublasOperation_t trans,
663
+ cublasDiagType_t diag,
664
+ size_t m,
665
+ size_t n,
666
+ const cuComplex* alpha,
667
+ const cuComplex* A,
668
+ size_t lda,
669
+ const cuComplex* B,
670
+ size_t ldb,
671
+ cuComplex* C,
672
+ size_t ldc);
673
+
674
+ cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle,
675
+ cublasSideMode_t side,
676
+ cublasFillMode_t uplo,
677
+ cublasOperation_t trans,
678
+ cublasDiagType_t diag,
679
+ size_t m,
680
+ size_t n,
681
+ const cuDoubleComplex* alpha,
682
+ const cuDoubleComplex* A,
683
+ size_t lda,
684
+ const cuDoubleComplex* B,
685
+ size_t ldb,
686
+ cuDoubleComplex* C,
687
+ size_t ldc);
688
+
689
+ #if defined(__cplusplus)
690
+ }
691
+ #endif /* __cplusplus */
692
+
693
+ #endif /* !defined(CUBLAS_XT_H_) */
mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h ADDED
The diff for this file is too large to render. See raw diff
 
mgm/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the new CUBLAS library API, it mapped the generic
52
+ * Cublas name functions to the actual _v2 implementations.
53
+ */
54
+
55
+ #if !defined(CUBLAS_V2_H_)
56
+ #define CUBLAS_V2_H_
57
+
58
+ #undef CUBLASAPI
59
+ #ifdef __CUDACC__
60
+ #define CUBLASAPI __host__ __device__
61
+ #else
62
+ #define CUBLASAPI
63
+ #endif
64
+
65
+ #include "cublas_api.h"
66
+
67
+ #define cublasCreate cublasCreate_v2
68
+ #define cublasDestroy cublasDestroy_v2
69
+ #define cublasGetVersion cublasGetVersion_v2
70
+ #define cublasSetWorkspace cublasSetWorkspace_v2
71
+ #define cublasSetStream cublasSetStream_v2
72
+ #define cublasGetStream cublasGetStream_v2
73
+ #define cublasGetPointerMode cublasGetPointerMode_v2
74
+ #define cublasSetPointerMode cublasSetPointerMode_v2
75
+
76
+ /* Blas3 Routines */
77
+
78
+ #define cublasSnrm2 cublasSnrm2_v2
79
+ #define cublasDnrm2 cublasDnrm2_v2
80
+ #define cublasScnrm2 cublasScnrm2_v2
81
+ #define cublasDznrm2 cublasDznrm2_v2
82
+
83
+ #define cublasSdot cublasSdot_v2
84
+ #define cublasDdot cublasDdot_v2
85
+ #define cublasCdotu cublasCdotu_v2
86
+ #define cublasCdotc cublasCdotc_v2
87
+ #define cublasZdotu cublasZdotu_v2
88
+ #define cublasZdotc cublasZdotc_v2
89
+
90
+ #define cublasSscal cublasSscal_v2
91
+ #define cublasDscal cublasDscal_v2
92
+ #define cublasCscal cublasCscal_v2
93
+ #define cublasCsscal cublasCsscal_v2
94
+ #define cublasZscal cublasZscal_v2
95
+ #define cublasZdscal cublasZdscal_v2
96
+
97
+ #define cublasSaxpy cublasSaxpy_v2
98
+ #define cublasDaxpy cublasDaxpy_v2
99
+ #define cublasCaxpy cublasCaxpy_v2
100
+ #define cublasZaxpy cublasZaxpy_v2
101
+
102
+ #define cublasScopy cublasScopy_v2
103
+ #define cublasDcopy cublasDcopy_v2
104
+ #define cublasCcopy cublasCcopy_v2
105
+ #define cublasZcopy cublasZcopy_v2
106
+
107
+ #define cublasSswap cublasSswap_v2
108
+ #define cublasDswap cublasDswap_v2
109
+ #define cublasCswap cublasCswap_v2
110
+ #define cublasZswap cublasZswap_v2
111
+
112
+ #define cublasIsamax cublasIsamax_v2
113
+ #define cublasIdamax cublasIdamax_v2
114
+ #define cublasIcamax cublasIcamax_v2
115
+ #define cublasIzamax cublasIzamax_v2
116
+
117
+ #define cublasIsamin cublasIsamin_v2
118
+ #define cublasIdamin cublasIdamin_v2
119
+ #define cublasIcamin cublasIcamin_v2
120
+ #define cublasIzamin cublasIzamin_v2
121
+
122
+ #define cublasSasum cublasSasum_v2
123
+ #define cublasDasum cublasDasum_v2
124
+ #define cublasScasum cublasScasum_v2
125
+ #define cublasDzasum cublasDzasum_v2
126
+
127
+ #define cublasSrot cublasSrot_v2
128
+ #define cublasDrot cublasDrot_v2
129
+ #define cublasCrot cublasCrot_v2
130
+ #define cublasCsrot cublasCsrot_v2
131
+ #define cublasZrot cublasZrot_v2
132
+ #define cublasZdrot cublasZdrot_v2
133
+
134
+ #define cublasSrotg cublasSrotg_v2
135
+ #define cublasDrotg cublasDrotg_v2
136
+ #define cublasCrotg cublasCrotg_v2
137
+ #define cublasZrotg cublasZrotg_v2
138
+
139
+ #define cublasSrotm cublasSrotm_v2
140
+ #define cublasDrotm cublasDrotm_v2
141
+
142
+ #define cublasSrotmg cublasSrotmg_v2
143
+ #define cublasDrotmg cublasDrotmg_v2
144
+
145
+ /* Blas2 Routines */
146
+
147
+ #define cublasSgemv cublasSgemv_v2
148
+ #define cublasDgemv cublasDgemv_v2
149
+ #define cublasCgemv cublasCgemv_v2
150
+ #define cublasZgemv cublasZgemv_v2
151
+
152
+ #define cublasSgbmv cublasSgbmv_v2
153
+ #define cublasDgbmv cublasDgbmv_v2
154
+ #define cublasCgbmv cublasCgbmv_v2
155
+ #define cublasZgbmv cublasZgbmv_v2
156
+
157
+ #define cublasStrmv cublasStrmv_v2
158
+ #define cublasDtrmv cublasDtrmv_v2
159
+ #define cublasCtrmv cublasCtrmv_v2
160
+ #define cublasZtrmv cublasZtrmv_v2
161
+
162
+ #define cublasStbmv cublasStbmv_v2
163
+ #define cublasDtbmv cublasDtbmv_v2
164
+ #define cublasCtbmv cublasCtbmv_v2
165
+ #define cublasZtbmv cublasZtbmv_v2
166
+
167
+ #define cublasStpmv cublasStpmv_v2
168
+ #define cublasDtpmv cublasDtpmv_v2
169
+ #define cublasCtpmv cublasCtpmv_v2
170
+ #define cublasZtpmv cublasZtpmv_v2
171
+
172
+ #define cublasStrsv cublasStrsv_v2
173
+ #define cublasDtrsv cublasDtrsv_v2
174
+ #define cublasCtrsv cublasCtrsv_v2
175
+ #define cublasZtrsv cublasZtrsv_v2
176
+
177
+ #define cublasStpsv cublasStpsv_v2
178
+ #define cublasDtpsv cublasDtpsv_v2
179
+ #define cublasCtpsv cublasCtpsv_v2
180
+ #define cublasZtpsv cublasZtpsv_v2
181
+
182
+ #define cublasStbsv cublasStbsv_v2
183
+ #define cublasDtbsv cublasDtbsv_v2
184
+ #define cublasCtbsv cublasCtbsv_v2
185
+ #define cublasZtbsv cublasZtbsv_v2
186
+
187
+ #define cublasSsymv cublasSsymv_v2
188
+ #define cublasDsymv cublasDsymv_v2
189
+ #define cublasCsymv cublasCsymv_v2
190
+ #define cublasZsymv cublasZsymv_v2
191
+ #define cublasChemv cublasChemv_v2
192
+ #define cublasZhemv cublasZhemv_v2
193
+
194
+ #define cublasSsbmv cublasSsbmv_v2
195
+ #define cublasDsbmv cublasDsbmv_v2
196
+ #define cublasChbmv cublasChbmv_v2
197
+ #define cublasZhbmv cublasZhbmv_v2
198
+
199
+ #define cublasSspmv cublasSspmv_v2
200
+ #define cublasDspmv cublasDspmv_v2
201
+ #define cublasChpmv cublasChpmv_v2
202
+ #define cublasZhpmv cublasZhpmv_v2
203
+
204
+ #define cublasSger cublasSger_v2
205
+ #define cublasDger cublasDger_v2
206
+ #define cublasCgeru cublasCgeru_v2
207
+ #define cublasCgerc cublasCgerc_v2
208
+ #define cublasZgeru cublasZgeru_v2
209
+ #define cublasZgerc cublasZgerc_v2
210
+
211
+ #define cublasSsyr cublasSsyr_v2
212
+ #define cublasDsyr cublasDsyr_v2
213
+ #define cublasCsyr cublasCsyr_v2
214
+ #define cublasZsyr cublasZsyr_v2
215
+ #define cublasCher cublasCher_v2
216
+ #define cublasZher cublasZher_v2
217
+
218
+ #define cublasSspr cublasSspr_v2
219
+ #define cublasDspr cublasDspr_v2
220
+ #define cublasChpr cublasChpr_v2
221
+ #define cublasZhpr cublasZhpr_v2
222
+
223
+ #define cublasSsyr2 cublasSsyr2_v2
224
+ #define cublasDsyr2 cublasDsyr2_v2
225
+ #define cublasCsyr2 cublasCsyr2_v2
226
+ #define cublasZsyr2 cublasZsyr2_v2
227
+ #define cublasCher2 cublasCher2_v2
228
+ #define cublasZher2 cublasZher2_v2
229
+
230
+ #define cublasSspr2 cublasSspr2_v2
231
+ #define cublasDspr2 cublasDspr2_v2
232
+ #define cublasChpr2 cublasChpr2_v2
233
+ #define cublasZhpr2 cublasZhpr2_v2
234
+
235
+ /* Blas3 Routines */
236
+
237
+ #define cublasSgemm cublasSgemm_v2
238
+ #define cublasDgemm cublasDgemm_v2
239
+ #define cublasCgemm cublasCgemm_v2
240
+ #define cublasZgemm cublasZgemm_v2
241
+
242
+ #define cublasSsyrk cublasSsyrk_v2
243
+ #define cublasDsyrk cublasDsyrk_v2
244
+ #define cublasCsyrk cublasCsyrk_v2
245
+ #define cublasZsyrk cublasZsyrk_v2
246
+ #define cublasCherk cublasCherk_v2
247
+ #define cublasZherk cublasZherk_v2
248
+
249
+ #define cublasSsyr2k cublasSsyr2k_v2
250
+ #define cublasDsyr2k cublasDsyr2k_v2
251
+ #define cublasCsyr2k cublasCsyr2k_v2
252
+ #define cublasZsyr2k cublasZsyr2k_v2
253
+ #define cublasCher2k cublasCher2k_v2
254
+ #define cublasZher2k cublasZher2k_v2
255
+
256
+ #define cublasSsymm cublasSsymm_v2
257
+ #define cublasDsymm cublasDsymm_v2
258
+ #define cublasCsymm cublasCsymm_v2
259
+ #define cublasZsymm cublasZsymm_v2
260
+ #define cublasChemm cublasChemm_v2
261
+ #define cublasZhemm cublasZhemm_v2
262
+
263
+ #define cublasStrsm cublasStrsm_v2
264
+ #define cublasDtrsm cublasDtrsm_v2
265
+ #define cublasCtrsm cublasCtrsm_v2
266
+ #define cublasZtrsm cublasZtrsm_v2
267
+
268
+ #define cublasStrmm cublasStrmm_v2
269
+ #define cublasDtrmm cublasDtrmm_v2
270
+ #define cublasCtrmm cublasCtrmm_v2
271
+ #define cublasZtrmm cublasZtrmm_v2
272
+
273
+ #endif /* !defined(CUBLAS_V2_H_) */
mgm/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(NVBLAS_H_)
51
+ #define NVBLAS_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h" /* import complex data type */
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif
59
+
60
+ /* GEMM */
61
+ void sgemm_(const char* transa,
62
+ const char* transb,
63
+ const int* m,
64
+ const int* n,
65
+ const int* k,
66
+ const float* alpha,
67
+ const float* a,
68
+ const int* lda,
69
+ const float* b,
70
+ const int* ldb,
71
+ const float* beta,
72
+ float* c,
73
+ const int* ldc);
74
+
75
+ void dgemm_(const char* transa,
76
+ const char* transb,
77
+ const int* m,
78
+ const int* n,
79
+ const int* k,
80
+ const double* alpha,
81
+ const double* a,
82
+ const int* lda,
83
+ const double* b,
84
+ const int* ldb,
85
+ const double* beta,
86
+ double* c,
87
+ const int* ldc);
88
+
89
+ void cgemm_(const char* transa,
90
+ const char* transb,
91
+ const int* m,
92
+ const int* n,
93
+ const int* k,
94
+ const cuComplex* alpha,
95
+ const cuComplex* a,
96
+ const int* lda,
97
+ const cuComplex* b,
98
+ const int* ldb,
99
+ const cuComplex* beta,
100
+ cuComplex* c,
101
+ const int* ldc);
102
+
103
+ void zgemm_(const char* transa,
104
+ const char* transb,
105
+ const int* m,
106
+ const int* n,
107
+ const int* k,
108
+ const cuDoubleComplex* alpha,
109
+ const cuDoubleComplex* a,
110
+ const int* lda,
111
+ const cuDoubleComplex* b,
112
+ const int* ldb,
113
+ const cuDoubleComplex* beta,
114
+ cuDoubleComplex* c,
115
+ const int* ldc);
116
+
117
+ void sgemm(const char* transa,
118
+ const char* transb,
119
+ const int* m,
120
+ const int* n,
121
+ const int* k,
122
+ const float* alpha,
123
+ const float* a,
124
+ const int* lda,
125
+ const float* b,
126
+ const int* ldb,
127
+ const float* beta,
128
+ float* c,
129
+ const int* ldc);
130
+
131
+ void dgemm(const char* transa,
132
+ const char* transb,
133
+ const int* m,
134
+ const int* n,
135
+ const int* k,
136
+ const double* alpha,
137
+ const double* a,
138
+ const int* lda,
139
+ const double* b,
140
+ const int* ldb,
141
+ const double* beta,
142
+ double* c,
143
+ const int* ldc);
144
+
145
+ void cgemm(const char* transa,
146
+ const char* transb,
147
+ const int* m,
148
+ const int* n,
149
+ const int* k,
150
+ const cuComplex* alpha,
151
+ const cuComplex* a,
152
+ const int* lda,
153
+ const cuComplex* b,
154
+ const int* ldb,
155
+ const cuComplex* beta,
156
+ cuComplex* c,
157
+ const int* ldc);
158
+
159
+ void zgemm(const char* transa,
160
+ const char* transb,
161
+ const int* m,
162
+ const int* n,
163
+ const int* k,
164
+ const cuDoubleComplex* alpha,
165
+ const cuDoubleComplex* a,
166
+ const int* lda,
167
+ const cuDoubleComplex* b,
168
+ const int* ldb,
169
+ const cuDoubleComplex* beta,
170
+ cuDoubleComplex* c,
171
+ const int* ldc);
172
+
173
+ /* SYRK */
174
+ void ssyrk_(const char* uplo,
175
+ const char* trans,
176
+ const int* n,
177
+ const int* k,
178
+ const float* alpha,
179
+ const float* a,
180
+ const int* lda,
181
+ const float* beta,
182
+ float* c,
183
+ const int* ldc);
184
+
185
+ void dsyrk_(const char* uplo,
186
+ const char* trans,
187
+ const int* n,
188
+ const int* k,
189
+ const double* alpha,
190
+ const double* a,
191
+ const int* lda,
192
+ const double* beta,
193
+ double* c,
194
+ const int* ldc);
195
+
196
+ void csyrk_(const char* uplo,
197
+ const char* trans,
198
+ const int* n,
199
+ const int* k,
200
+ const cuComplex* alpha,
201
+ const cuComplex* a,
202
+ const int* lda,
203
+ const cuComplex* beta,
204
+ cuComplex* c,
205
+ const int* ldc);
206
+
207
+ void zsyrk_(const char* uplo,
208
+ const char* trans,
209
+ const int* n,
210
+ const int* k,
211
+ const cuDoubleComplex* alpha,
212
+ const cuDoubleComplex* a,
213
+ const int* lda,
214
+ const cuDoubleComplex* beta,
215
+ cuDoubleComplex* c,
216
+ const int* ldc);
217
+
218
+ void ssyrk(const char* uplo,
219
+ const char* trans,
220
+ const int* n,
221
+ const int* k,
222
+ const float* alpha,
223
+ const float* a,
224
+ const int* lda,
225
+ const float* beta,
226
+ float* c,
227
+ const int* ldc);
228
+
229
+ void dsyrk(const char* uplo,
230
+ const char* trans,
231
+ const int* n,
232
+ const int* k,
233
+ const double* alpha,
234
+ const double* a,
235
+ const int* lda,
236
+ const double* beta,
237
+ double* c,
238
+ const int* ldc);
239
+
240
+ void csyrk(const char* uplo,
241
+ const char* trans,
242
+ const int* n,
243
+ const int* k,
244
+ const cuComplex* alpha,
245
+ const cuComplex* a,
246
+ const int* lda,
247
+ const cuComplex* beta,
248
+ cuComplex* c,
249
+ const int* ldc);
250
+
251
+ void zsyrk(const char* uplo,
252
+ const char* trans,
253
+ const int* n,
254
+ const int* k,
255
+ const cuDoubleComplex* alpha,
256
+ const cuDoubleComplex* a,
257
+ const int* lda,
258
+ const cuDoubleComplex* beta,
259
+ cuDoubleComplex* c,
260
+ const int* ldc);
261
+
262
+ /* HERK */
263
+ void cherk_(const char* uplo,
264
+ const char* trans,
265
+ const int* n,
266
+ const int* k,
267
+ const float* alpha,
268
+ const cuComplex* a,
269
+ const int* lda,
270
+ const float* beta,
271
+ cuComplex* c,
272
+ const int* ldc);
273
+
274
+ void zherk_(const char* uplo,
275
+ const char* trans,
276
+ const int* n,
277
+ const int* k,
278
+ const double* alpha,
279
+ const cuDoubleComplex* a,
280
+ const int* lda,
281
+ const double* beta,
282
+ cuDoubleComplex* c,
283
+ const int* ldc);
284
+
285
+ void cherk(const char* uplo,
286
+ const char* trans,
287
+ const int* n,
288
+ const int* k,
289
+ const float* alpha,
290
+ const cuComplex* a,
291
+ const int* lda,
292
+ const float* beta,
293
+ cuComplex* c,
294
+ const int* ldc);
295
+
296
+ void zherk(const char* uplo,
297
+ const char* trans,
298
+ const int* n,
299
+ const int* k,
300
+ const double* alpha,
301
+ const cuDoubleComplex* a,
302
+ const int* lda,
303
+ const double* beta,
304
+ cuDoubleComplex* c,
305
+ const int* ldc);
306
+
307
+ /* TRSM */
308
+ void strsm_(const char* side,
309
+ const char* uplo,
310
+ const char* transa,
311
+ const char* diag,
312
+ const int* m,
313
+ const int* n,
314
+ const float* alpha,
315
+ const float* a,
316
+ const int* lda,
317
+ float* b,
318
+ const int* ldb);
319
+
320
+ void dtrsm_(const char* side,
321
+ const char* uplo,
322
+ const char* transa,
323
+ const char* diag,
324
+ const int* m,
325
+ const int* n,
326
+ const double* alpha,
327
+ const double* a,
328
+ const int* lda,
329
+ double* b,
330
+ const int* ldb);
331
+
332
+ void ctrsm_(const char* side,
333
+ const char* uplo,
334
+ const char* transa,
335
+ const char* diag,
336
+ const int* m,
337
+ const int* n,
338
+ const cuComplex* alpha,
339
+ const cuComplex* a,
340
+ const int* lda,
341
+ cuComplex* b,
342
+ const int* ldb);
343
+
344
+ void ztrsm_(const char* side,
345
+ const char* uplo,
346
+ const char* transa,
347
+ const char* diag,
348
+ const int* m,
349
+ const int* n,
350
+ const cuDoubleComplex* alpha,
351
+ const cuDoubleComplex* a,
352
+ const int* lda,
353
+ cuDoubleComplex* b,
354
+ const int* ldb);
355
+
356
+ void strsm(const char* side,
357
+ const char* uplo,
358
+ const char* transa,
359
+ const char* diag,
360
+ const int* m,
361
+ const int* n,
362
+ const float* alpha,
363
+ const float* a,
364
+ const int* lda,
365
+ float* b,
366
+ const int* ldb);
367
+
368
+ void dtrsm(const char* side,
369
+ const char* uplo,
370
+ const char* transa,
371
+ const char* diag,
372
+ const int* m,
373
+ const int* n,
374
+ const double* alpha,
375
+ const double* a,
376
+ const int* lda,
377
+ double* b,
378
+ const int* ldb);
379
+
380
+ void ctrsm(const char* side,
381
+ const char* uplo,
382
+ const char* transa,
383
+ const char* diag,
384
+ const int* m,
385
+ const int* n,
386
+ const cuComplex* alpha,
387
+ const cuComplex* a,
388
+ const int* lda,
389
+ cuComplex* b,
390
+ const int* ldb);
391
+
392
+ void ztrsm(const char* side,
393
+ const char* uplo,
394
+ const char* transa,
395
+ const char* diag,
396
+ const int* m,
397
+ const int* n,
398
+ const cuDoubleComplex* alpha,
399
+ const cuDoubleComplex* a,
400
+ const int* lda,
401
+ cuDoubleComplex* b,
402
+ const int* ldb);
403
+
404
+ /* SYMM */
405
+ void ssymm_(const char* side,
406
+ const char* uplo,
407
+ const int* m,
408
+ const int* n,
409
+ const float* alpha,
410
+ const float* a,
411
+ const int* lda,
412
+ const float* b,
413
+ const int* ldb,
414
+ const float* beta,
415
+ float* c,
416
+ const int* ldc);
417
+
418
+ void dsymm_(const char* side,
419
+ const char* uplo,
420
+ const int* m,
421
+ const int* n,
422
+ const double* alpha,
423
+ const double* a,
424
+ const int* lda,
425
+ const double* b,
426
+ const int* ldb,
427
+ const double* beta,
428
+ double* c,
429
+ const int* ldc);
430
+
431
+ void csymm_(const char* side,
432
+ const char* uplo,
433
+ const int* m,
434
+ const int* n,
435
+ const cuComplex* alpha,
436
+ const cuComplex* a,
437
+ const int* lda,
438
+ const cuComplex* b,
439
+ const int* ldb,
440
+ const cuComplex* beta,
441
+ cuComplex* c,
442
+ const int* ldc);
443
+
444
+ void zsymm_(const char* side,
445
+ const char* uplo,
446
+ const int* m,
447
+ const int* n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* a,
450
+ const int* lda,
451
+ const cuDoubleComplex* b,
452
+ const int* ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* c,
455
+ const int* ldc);
456
+
457
+ void ssymm(const char* side,
458
+ const char* uplo,
459
+ const int* m,
460
+ const int* n,
461
+ const float* alpha,
462
+ const float* a,
463
+ const int* lda,
464
+ const float* b,
465
+ const int* ldb,
466
+ const float* beta,
467
+ float* c,
468
+ const int* ldc);
469
+
470
+ void dsymm(const char* side,
471
+ const char* uplo,
472
+ const int* m,
473
+ const int* n,
474
+ const double* alpha,
475
+ const double* a,
476
+ const int* lda,
477
+ const double* b,
478
+ const int* ldb,
479
+ const double* beta,
480
+ double* c,
481
+ const int* ldc);
482
+
483
+ void csymm(const char* side,
484
+ const char* uplo,
485
+ const int* m,
486
+ const int* n,
487
+ const cuComplex* alpha,
488
+ const cuComplex* a,
489
+ const int* lda,
490
+ const cuComplex* b,
491
+ const int* ldb,
492
+ const cuComplex* beta,
493
+ cuComplex* c,
494
+ const int* ldc);
495
+
496
+ void zsymm(const char* side,
497
+ const char* uplo,
498
+ const int* m,
499
+ const int* n,
500
+ const cuDoubleComplex* alpha,
501
+ const cuDoubleComplex* a,
502
+ const int* lda,
503
+ const cuDoubleComplex* b,
504
+ const int* ldb,
505
+ const cuDoubleComplex* beta,
506
+ cuDoubleComplex* c,
507
+ const int* ldc);
508
+
509
+ /* HEMM */
510
+ void chemm_(const char* side,
511
+ const char* uplo,
512
+ const int* m,
513
+ const int* n,
514
+ const cuComplex* alpha,
515
+ const cuComplex* a,
516
+ const int* lda,
517
+ const cuComplex* b,
518
+ const int* ldb,
519
+ const cuComplex* beta,
520
+ cuComplex* c,
521
+ const int* ldc);
522
+
523
+ void zhemm_(const char* side,
524
+ const char* uplo,
525
+ const int* m,
526
+ const int* n,
527
+ const cuDoubleComplex* alpha,
528
+ const cuDoubleComplex* a,
529
+ const int* lda,
530
+ const cuDoubleComplex* b,
531
+ const int* ldb,
532
+ const cuDoubleComplex* beta,
533
+ cuDoubleComplex* c,
534
+ const int* ldc);
535
+
536
+ /* HEMM with no underscore*/
537
+ void chemm(const char* side,
538
+ const char* uplo,
539
+ const int* m,
540
+ const int* n,
541
+ const cuComplex* alpha,
542
+ const cuComplex* a,
543
+ const int* lda,
544
+ const cuComplex* b,
545
+ const int* ldb,
546
+ const cuComplex* beta,
547
+ cuComplex* c,
548
+ const int* ldc);
549
+
550
+ void zhemm(const char* side,
551
+ const char* uplo,
552
+ const int* m,
553
+ const int* n,
554
+ const cuDoubleComplex* alpha,
555
+ const cuDoubleComplex* a,
556
+ const int* lda,
557
+ const cuDoubleComplex* b,
558
+ const int* ldb,
559
+ const cuDoubleComplex* beta,
560
+ cuDoubleComplex* c,
561
+ const int* ldc);
562
+
563
+ /* SYR2K */
564
+ void ssyr2k_(const char* uplo,
565
+ const char* trans,
566
+ const int* n,
567
+ const int* k,
568
+ const float* alpha,
569
+ const float* a,
570
+ const int* lda,
571
+ const float* b,
572
+ const int* ldb,
573
+ const float* beta,
574
+ float* c,
575
+ const int* ldc);
576
+
577
+ void dsyr2k_(const char* uplo,
578
+ const char* trans,
579
+ const int* n,
580
+ const int* k,
581
+ const double* alpha,
582
+ const double* a,
583
+ const int* lda,
584
+ const double* b,
585
+ const int* ldb,
586
+ const double* beta,
587
+ double* c,
588
+ const int* ldc);
589
+
590
+ void csyr2k_(const char* uplo,
591
+ const char* trans,
592
+ const int* n,
593
+ const int* k,
594
+ const cuComplex* alpha,
595
+ const cuComplex* a,
596
+ const int* lda,
597
+ const cuComplex* b,
598
+ const int* ldb,
599
+ const cuComplex* beta,
600
+ cuComplex* c,
601
+ const int* ldc);
602
+
603
+ void zsyr2k_(const char* uplo,
604
+ const char* trans,
605
+ const int* n,
606
+ const int* k,
607
+ const cuDoubleComplex* alpha,
608
+ const cuDoubleComplex* a,
609
+ const int* lda,
610
+ const cuDoubleComplex* b,
611
+ const int* ldb,
612
+ const cuDoubleComplex* beta,
613
+ cuDoubleComplex* c,
614
+ const int* ldc);
615
+
616
+ /* SYR2K no_underscore*/
617
+ void ssyr2k(const char* uplo,
618
+ const char* trans,
619
+ const int* n,
620
+ const int* k,
621
+ const float* alpha,
622
+ const float* a,
623
+ const int* lda,
624
+ const float* b,
625
+ const int* ldb,
626
+ const float* beta,
627
+ float* c,
628
+ const int* ldc);
629
+
630
+ void dsyr2k(const char* uplo,
631
+ const char* trans,
632
+ const int* n,
633
+ const int* k,
634
+ const double* alpha,
635
+ const double* a,
636
+ const int* lda,
637
+ const double* b,
638
+ const int* ldb,
639
+ const double* beta,
640
+ double* c,
641
+ const int* ldc);
642
+
643
+ void csyr2k(const char* uplo,
644
+ const char* trans,
645
+ const int* n,
646
+ const int* k,
647
+ const cuComplex* alpha,
648
+ const cuComplex* a,
649
+ const int* lda,
650
+ const cuComplex* b,
651
+ const int* ldb,
652
+ const cuComplex* beta,
653
+ cuComplex* c,
654
+ const int* ldc);
655
+
656
+ void zsyr2k(const char* uplo,
657
+ const char* trans,
658
+ const int* n,
659
+ const int* k,
660
+ const cuDoubleComplex* alpha,
661
+ const cuDoubleComplex* a,
662
+ const int* lda,
663
+ const cuDoubleComplex* b,
664
+ const int* ldb,
665
+ const cuDoubleComplex* beta,
666
+ cuDoubleComplex* c,
667
+ const int* ldc);
668
+
669
+ /* HERK */
670
+ void cher2k_(const char* uplo,
671
+ const char* trans,
672
+ const int* n,
673
+ const int* k,
674
+ const cuComplex* alpha,
675
+ const cuComplex* a,
676
+ const int* lda,
677
+ const cuComplex* b,
678
+ const int* ldb,
679
+ const float* beta,
680
+ cuComplex* c,
681
+ const int* ldc);
682
+
683
+ void zher2k_(const char* uplo,
684
+ const char* trans,
685
+ const int* n,
686
+ const int* k,
687
+ const cuDoubleComplex* alpha,
688
+ const cuDoubleComplex* a,
689
+ const int* lda,
690
+ const cuDoubleComplex* b,
691
+ const int* ldb,
692
+ const double* beta,
693
+ cuDoubleComplex* c,
694
+ const int* ldc);
695
+
696
+ /* HER2K with no underscore */
697
+ void cher2k(const char* uplo,
698
+ const char* trans,
699
+ const int* n,
700
+ const int* k,
701
+ const cuComplex* alpha,
702
+ const cuComplex* a,
703
+ const int* lda,
704
+ const cuComplex* b,
705
+ const int* ldb,
706
+ const float* beta,
707
+ cuComplex* c,
708
+ const int* ldc);
709
+
710
+ void zher2k(const char* uplo,
711
+ const char* trans,
712
+ const int* n,
713
+ const int* k,
714
+ const cuDoubleComplex* alpha,
715
+ const cuDoubleComplex* a,
716
+ const int* lda,
717
+ const cuDoubleComplex* b,
718
+ const int* ldb,
719
+ const double* beta,
720
+ cuDoubleComplex* c,
721
+ const int* ldc);
722
+
723
+ /* TRMM */
724
+ void strmm_(const char* side,
725
+ const char* uplo,
726
+ const char* transa,
727
+ const char* diag,
728
+ const int* m,
729
+ const int* n,
730
+ const float* alpha,
731
+ const float* a,
732
+ const int* lda,
733
+ float* b,
734
+ const int* ldb);
735
+
736
+ void dtrmm_(const char* side,
737
+ const char* uplo,
738
+ const char* transa,
739
+ const char* diag,
740
+ const int* m,
741
+ const int* n,
742
+ const double* alpha,
743
+ const double* a,
744
+ const int* lda,
745
+ double* b,
746
+ const int* ldb);
747
+
748
+ void ctrmm_(const char* side,
749
+ const char* uplo,
750
+ const char* transa,
751
+ const char* diag,
752
+ const int* m,
753
+ const int* n,
754
+ const cuComplex* alpha,
755
+ const cuComplex* a,
756
+ const int* lda,
757
+ cuComplex* b,
758
+ const int* ldb);
759
+
760
+ void ztrmm_(const char* side,
761
+ const char* uplo,
762
+ const char* transa,
763
+ const char* diag,
764
+ const int* m,
765
+ const int* n,
766
+ const cuDoubleComplex* alpha,
767
+ const cuDoubleComplex* a,
768
+ const int* lda,
769
+ cuDoubleComplex* b,
770
+ const int* ldb);
771
+
772
+ void strmm(const char* side,
773
+ const char* uplo,
774
+ const char* transa,
775
+ const char* diag,
776
+ const int* m,
777
+ const int* n,
778
+ const float* alpha,
779
+ const float* a,
780
+ const int* lda,
781
+ float* b,
782
+ const int* ldb);
783
+
784
+ void dtrmm(const char* side,
785
+ const char* uplo,
786
+ const char* transa,
787
+ const char* diag,
788
+ const int* m,
789
+ const int* n,
790
+ const double* alpha,
791
+ const double* a,
792
+ const int* lda,
793
+ double* b,
794
+ const int* ldb);
795
+
796
+ void ctrmm(const char* side,
797
+ const char* uplo,
798
+ const char* transa,
799
+ const char* diag,
800
+ const int* m,
801
+ const int* n,
802
+ const cuComplex* alpha,
803
+ const cuComplex* a,
804
+ const int* lda,
805
+ cuComplex* b,
806
+ const int* ldb);
807
+
808
+ void ztrmm(const char* side,
809
+ const char* uplo,
810
+ const char* transa,
811
+ const char* diag,
812
+ const int* m,
813
+ const int* n,
814
+ const cuDoubleComplex* alpha,
815
+ const cuDoubleComplex* a,
816
+ const int* lda,
817
+ cuDoubleComplex* b,
818
+ const int* ldb);
819
+
820
+ #if defined(__cplusplus)
821
+ }
822
+ #endif /* __cplusplus */
823
+
824
+ #endif /* !defined(NVBLAS_H_) */
mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:797d41e6650bbab42bfebff997eb589ce726ed2fcfed8e79587b647142325430
3
+ size 733032
mgm/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c2a58dc54154208392301d0fe3d53a120e4c1ebeab9e80ce91fe9948baeadc9
3
+ size 757496
mgm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.11.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e301ec7626aa04df4c22711657eed167870c8e990a0d334ad3b11412e8e46c4b
3
+ size 45791368
mgm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f8c3b8f7c994be7fb1ce60bd567a2b49e169891d3515cc1e59533e9703b6f2c
3
+ size 4724176
mgm/lib/python3.10/site-packages/nvidia/cufft/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/nvidia/cufft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (161 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/cufft/include/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (169 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/cufft/include/cudalibxt.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2013,2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ /*!
50
+ * \file cudalibxt.h
51
+ * \brief Public header file for the NVIDIA library multi-GPU support structures
52
+ */
53
+
54
+ #ifndef _CUDA_LIB_XT_H_
55
+ #define _CUDA_LIB_XT_H_
56
+ #include <cuda_runtime.h>
57
+
58
+ #define CUDA_XT_DESCRIPTOR_VERSION 0x01000000 // This is added to CUDART_VERSION
59
+
60
+ enum cudaXtCopyType_t {
61
+ LIB_XT_COPY_HOST_TO_DEVICE,
62
+ LIB_XT_COPY_DEVICE_TO_HOST,
63
+ LIB_XT_COPY_DEVICE_TO_DEVICE
64
+ } ;
65
+ typedef enum cudaXtCopyType_t cudaLibXtCopyType;
66
+
67
+ enum libFormat_t {
68
+ LIB_FORMAT_CUFFT = 0x0,
69
+ LIB_FORMAT_UNDEFINED = 0x1
70
+ };
71
+
72
+ typedef enum libFormat_t libFormat;
73
+
74
+ #define MAX_CUDA_DESCRIPTOR_GPUS 64
75
+
76
+ struct cudaXtDesc_t{
77
+ int version; //descriptor version
78
+ int nGPUs; //number of GPUs
79
+ int GPUs[MAX_CUDA_DESCRIPTOR_GPUS]; //array of device IDs
80
+ void *data[MAX_CUDA_DESCRIPTOR_GPUS]; //array of pointers to data, one per GPU
81
+ size_t size[MAX_CUDA_DESCRIPTOR_GPUS]; //array of data sizes, one per GPU
82
+ void *cudaXtState; //opaque CUDA utility structure
83
+ };
84
+ typedef struct cudaXtDesc_t cudaXtDesc;
85
+
86
+ struct cudaLibXtDesc_t{
87
+ int version; //descriptor version
88
+ cudaXtDesc *descriptor; //multi-GPU memory descriptor
89
+ libFormat library; //which library recognizes the format
90
+ int subFormat; //library specific enumerator of sub formats
91
+ void *libDescriptor; //library specific descriptor e.g. FFT transform plan object
92
+ };
93
+ typedef struct cudaLibXtDesc_t cudaLibXtDesc;
94
+
95
+
96
+ #endif
97
+
mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufft.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ /*!
50
+ * \file cufft.h
51
+ * \brief Public header file for the NVIDIA CUDA FFT library (CUFFT)
52
+ */
53
+
54
+ #ifndef _CUFFT_H_
55
+ #define _CUFFT_H_
56
+
57
+
58
+ #include "cuComplex.h"
59
+ #include "driver_types.h"
60
+ #include "library_types.h"
61
+
62
+ #ifndef CUFFTAPI
63
+ #ifdef _WIN32
64
+ #define CUFFTAPI __stdcall
65
+ #elif __GNUC__ >= 4
66
+ #define CUFFTAPI __attribute__ ((visibility ("default")))
67
+ #else
68
+ #define CUFFTAPI
69
+ #endif
70
+ #endif
71
+
72
+ #ifdef __cplusplus
73
+ extern "C" {
74
+ #endif
75
+
76
+ #define CUFFT_VER_MAJOR 10
77
+ #define CUFFT_VER_MINOR 9
78
+ #define CUFFT_VER_PATCH 0
79
+ #define CUFFT_VER_BUILD 58
80
+
81
+ // cuFFT library version
82
+ //
83
+ // CUFFT_VERSION / 1000 - major version
84
+ // CUFFT_VERSION / 100 % 100 - minor version
85
+ // CUFFT_VERSION % 100 - patch level
86
+ #define CUFFT_VERSION 10900
87
+
88
+ // CUFFT API function return values
89
+ typedef enum cufftResult_t {
90
+ CUFFT_SUCCESS = 0x0,
91
+ CUFFT_INVALID_PLAN = 0x1,
92
+ CUFFT_ALLOC_FAILED = 0x2,
93
+ CUFFT_INVALID_TYPE = 0x3,
94
+ CUFFT_INVALID_VALUE = 0x4,
95
+ CUFFT_INTERNAL_ERROR = 0x5,
96
+ CUFFT_EXEC_FAILED = 0x6,
97
+ CUFFT_SETUP_FAILED = 0x7,
98
+ CUFFT_INVALID_SIZE = 0x8,
99
+ CUFFT_UNALIGNED_DATA = 0x9,
100
+ CUFFT_INCOMPLETE_PARAMETER_LIST = 0xA,
101
+ CUFFT_INVALID_DEVICE = 0xB,
102
+ CUFFT_PARSE_ERROR = 0xC,
103
+ CUFFT_NO_WORKSPACE = 0xD,
104
+ CUFFT_NOT_IMPLEMENTED = 0xE,
105
+ CUFFT_LICENSE_ERROR = 0x0F,
106
+ CUFFT_NOT_SUPPORTED = 0x10
107
+
108
+ } cufftResult;
109
+
110
+ #define MAX_CUFFT_ERROR 0x11
111
+
112
+
113
+ // CUFFT defines and supports the following data types
114
+
115
+
116
+ // cufftReal is a single-precision, floating-point real data type.
117
+ // cufftDoubleReal is a double-precision, real data type.
118
+ typedef float cufftReal;
119
+ typedef double cufftDoubleReal;
120
+
121
+ // cufftComplex is a single-precision, floating-point complex data type that
122
+ // consists of interleaved real and imaginary components.
123
+ // cufftDoubleComplex is the double-precision equivalent.
124
+ typedef cuComplex cufftComplex;
125
+ typedef cuDoubleComplex cufftDoubleComplex;
126
+
127
+ // CUFFT transform directions
128
+ #define CUFFT_FORWARD -1 // Forward FFT
129
+ #define CUFFT_INVERSE 1 // Inverse FFT
130
+
131
+ // CUFFT supports the following transform types
132
+ typedef enum cufftType_t {
133
+ CUFFT_R2C = 0x2a, // Real to Complex (interleaved)
134
+ CUFFT_C2R = 0x2c, // Complex (interleaved) to Real
135
+ CUFFT_C2C = 0x29, // Complex to Complex, interleaved
136
+ CUFFT_D2Z = 0x6a, // Double to Double-Complex
137
+ CUFFT_Z2D = 0x6c, // Double-Complex to Double
138
+ CUFFT_Z2Z = 0x69 // Double-Complex to Double-Complex
139
+ } cufftType;
140
+
141
+ // CUFFT supports the following data layouts
142
+ typedef enum cufftCompatibility_t {
143
+ CUFFT_COMPATIBILITY_FFTW_PADDING = 0x01 // The default value
144
+ } cufftCompatibility;
145
+
146
+ #define CUFFT_COMPATIBILITY_DEFAULT CUFFT_COMPATIBILITY_FFTW_PADDING
147
+
148
+ //
149
+ // structure definition used by the shim between old and new APIs
150
+ //
151
+ #define MAX_SHIM_RANK 3
152
+
153
+ // cufftHandle is a handle type used to store and access CUFFT plans.
154
+ typedef int cufftHandle;
155
+
156
+
157
+ cufftResult CUFFTAPI cufftPlan1d(cufftHandle *plan,
158
+ int nx,
159
+ cufftType type,
160
+ int batch);
161
+
162
+ cufftResult CUFFTAPI cufftPlan2d(cufftHandle *plan,
163
+ int nx, int ny,
164
+ cufftType type);
165
+
166
+ cufftResult CUFFTAPI cufftPlan3d(cufftHandle *plan,
167
+ int nx, int ny, int nz,
168
+ cufftType type);
169
+
170
+ cufftResult CUFFTAPI cufftPlanMany(cufftHandle *plan,
171
+ int rank,
172
+ int *n,
173
+ int *inembed, int istride, int idist,
174
+ int *onembed, int ostride, int odist,
175
+ cufftType type,
176
+ int batch);
177
+
178
+ cufftResult CUFFTAPI cufftMakePlan1d(cufftHandle plan,
179
+ int nx,
180
+ cufftType type,
181
+ int batch,
182
+ size_t *workSize);
183
+
184
+ cufftResult CUFFTAPI cufftMakePlan2d(cufftHandle plan,
185
+ int nx, int ny,
186
+ cufftType type,
187
+ size_t *workSize);
188
+
189
+ cufftResult CUFFTAPI cufftMakePlan3d(cufftHandle plan,
190
+ int nx, int ny, int nz,
191
+ cufftType type,
192
+ size_t *workSize);
193
+
194
+ cufftResult CUFFTAPI cufftMakePlanMany(cufftHandle plan,
195
+ int rank,
196
+ int *n,
197
+ int *inembed, int istride, int idist,
198
+ int *onembed, int ostride, int odist,
199
+ cufftType type,
200
+ int batch,
201
+ size_t *workSize);
202
+
203
+ cufftResult CUFFTAPI cufftMakePlanMany64(cufftHandle plan,
204
+ int rank,
205
+ long long int *n,
206
+ long long int *inembed,
207
+ long long int istride,
208
+ long long int idist,
209
+ long long int *onembed,
210
+ long long int ostride, long long int odist,
211
+ cufftType type,
212
+ long long int batch,
213
+ size_t * workSize);
214
+
215
+ cufftResult CUFFTAPI cufftGetSizeMany64(cufftHandle plan,
216
+ int rank,
217
+ long long int *n,
218
+ long long int *inembed,
219
+ long long int istride, long long int idist,
220
+ long long int *onembed,
221
+ long long int ostride, long long int odist,
222
+ cufftType type,
223
+ long long int batch,
224
+ size_t *workSize);
225
+
226
+
227
+
228
+
229
+ cufftResult CUFFTAPI cufftEstimate1d(int nx,
230
+ cufftType type,
231
+ int batch,
232
+ size_t *workSize);
233
+
234
+ cufftResult CUFFTAPI cufftEstimate2d(int nx, int ny,
235
+ cufftType type,
236
+ size_t *workSize);
237
+
238
+ cufftResult CUFFTAPI cufftEstimate3d(int nx, int ny, int nz,
239
+ cufftType type,
240
+ size_t *workSize);
241
+
242
+ cufftResult CUFFTAPI cufftEstimateMany(int rank,
243
+ int *n,
244
+ int *inembed, int istride, int idist,
245
+ int *onembed, int ostride, int odist,
246
+ cufftType type,
247
+ int batch,
248
+ size_t *workSize);
249
+
250
+ cufftResult CUFFTAPI cufftCreate(cufftHandle * handle);
251
+
252
+ cufftResult CUFFTAPI cufftGetSize1d(cufftHandle handle,
253
+ int nx,
254
+ cufftType type,
255
+ int batch,
256
+ size_t *workSize );
257
+
258
+ cufftResult CUFFTAPI cufftGetSize2d(cufftHandle handle,
259
+ int nx, int ny,
260
+ cufftType type,
261
+ size_t *workSize);
262
+
263
+ cufftResult CUFFTAPI cufftGetSize3d(cufftHandle handle,
264
+ int nx, int ny, int nz,
265
+ cufftType type,
266
+ size_t *workSize);
267
+
268
+ cufftResult CUFFTAPI cufftGetSizeMany(cufftHandle handle,
269
+ int rank, int *n,
270
+ int *inembed, int istride, int idist,
271
+ int *onembed, int ostride, int odist,
272
+ cufftType type, int batch, size_t *workArea);
273
+
274
+ cufftResult CUFFTAPI cufftGetSize(cufftHandle handle, size_t *workSize);
275
+
276
+ cufftResult CUFFTAPI cufftSetWorkArea(cufftHandle plan, void *workArea);
277
+
278
+ cufftResult CUFFTAPI cufftSetAutoAllocation(cufftHandle plan, int autoAllocate);
279
+
280
+ cufftResult CUFFTAPI cufftExecC2C(cufftHandle plan,
281
+ cufftComplex *idata,
282
+ cufftComplex *odata,
283
+ int direction);
284
+
285
+ cufftResult CUFFTAPI cufftExecR2C(cufftHandle plan,
286
+ cufftReal *idata,
287
+ cufftComplex *odata);
288
+
289
+ cufftResult CUFFTAPI cufftExecC2R(cufftHandle plan,
290
+ cufftComplex *idata,
291
+ cufftReal *odata);
292
+
293
+ cufftResult CUFFTAPI cufftExecZ2Z(cufftHandle plan,
294
+ cufftDoubleComplex *idata,
295
+ cufftDoubleComplex *odata,
296
+ int direction);
297
+
298
+ cufftResult CUFFTAPI cufftExecD2Z(cufftHandle plan,
299
+ cufftDoubleReal *idata,
300
+ cufftDoubleComplex *odata);
301
+
302
+ cufftResult CUFFTAPI cufftExecZ2D(cufftHandle plan,
303
+ cufftDoubleComplex *idata,
304
+ cufftDoubleReal *odata);
305
+
306
+
307
+ // utility functions
308
+ cufftResult CUFFTAPI cufftSetStream(cufftHandle plan,
309
+ cudaStream_t stream);
310
+
311
+ cufftResult CUFFTAPI cufftDestroy(cufftHandle plan);
312
+
313
+ cufftResult CUFFTAPI cufftGetVersion(int *version);
314
+
315
+ cufftResult CUFFTAPI cufftGetProperty(libraryPropertyType type,
316
+ int *value);
317
+
318
+ #ifdef __cplusplus
319
+ }
320
+ #endif
321
+
322
+ #endif /* _CUFFT_H_ */
mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*!
51
+ * \file cufftXt.h
52
+ * \brief Public header file for the NVIDIA CUDA FFT library (CUFFT)
53
+ */
54
+
55
+ #ifndef _CUFFTXT_H_
56
+ #define _CUFFTXT_H_
57
+ #include "cudalibxt.h"
58
+ #include "cufft.h"
59
+
60
+
61
+ #ifndef CUFFTAPI
62
+ #ifdef _WIN32
63
+ #define CUFFTAPI __stdcall
64
+ #else
65
+ #define CUFFTAPI
66
+ #endif
67
+ #endif
68
+
69
+ #ifdef __cplusplus
70
+ extern "C" {
71
+ #endif
72
+
73
+ //
74
+ // cufftXtSubFormat identifies the data layout of
75
+ // a memory descriptor owned by cufft.
76
+ // note that multi GPU cufft does not yet support out-of-place transforms
77
+ //
78
+
79
+ typedef enum cufftXtSubFormat_t {
80
+ CUFFT_XT_FORMAT_INPUT = 0x00, //by default input is in linear order across GPUs
81
+ CUFFT_XT_FORMAT_OUTPUT = 0x01, //by default output is in scrambled order depending on transform
82
+ CUFFT_XT_FORMAT_INPLACE = 0x02, //by default inplace is input order, which is linear across GPUs
83
+ CUFFT_XT_FORMAT_INPLACE_SHUFFLED = 0x03, //shuffled output order after execution of the transform
84
+ CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED = 0x04, //shuffled input order prior to execution of 1D transforms
85
+ CUFFT_XT_FORMAT_DISTRIBUTED_INPUT = 0x05,
86
+ CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT = 0x06,
87
+ CUFFT_FORMAT_UNDEFINED = 0x07
88
+ } cufftXtSubFormat;
89
+
90
+ //
91
+ // cufftXtCopyType specifies the type of copy for cufftXtMemcpy
92
+ //
93
+ typedef enum cufftXtCopyType_t {
94
+ CUFFT_COPY_HOST_TO_DEVICE = 0x00,
95
+ CUFFT_COPY_DEVICE_TO_HOST = 0x01,
96
+ CUFFT_COPY_DEVICE_TO_DEVICE = 0x02,
97
+ CUFFT_COPY_UNDEFINED = 0x03
98
+ } cufftXtCopyType;
99
+
100
+ //
101
+ // cufftXtQueryType specifies the type of query for cufftXtQueryPlan
102
+ //
103
+ typedef enum cufftXtQueryType_t {
104
+ CUFFT_QUERY_1D_FACTORS = 0x00,
105
+ CUFFT_QUERY_UNDEFINED = 0x01
106
+ } cufftXtQueryType;
107
+
108
+ typedef struct cufftXt1dFactors_t {
109
+ long long int size;
110
+ long long int stringCount;
111
+ long long int stringLength;
112
+ long long int substringLength;
113
+ long long int factor1;
114
+ long long int factor2;
115
+ long long int stringMask;
116
+ long long int substringMask;
117
+ long long int factor1Mask;
118
+ long long int factor2Mask;
119
+ int stringShift;
120
+ int substringShift;
121
+ int factor1Shift;
122
+ int factor2Shift;
123
+ } cufftXt1dFactors;
124
+
125
+ //
126
+ // cufftXtWorkAreaPolicy specifies policy for cufftXtSetWorkAreaPolicy
127
+ //
128
+ typedef enum cufftXtWorkAreaPolicy_t {
129
+ CUFFT_WORKAREA_MINIMAL = 0, /* maximum reduction */
130
+ CUFFT_WORKAREA_USER = 1, /* use workSize parameter as limit */
131
+ CUFFT_WORKAREA_PERFORMANCE = 2, /* default - 1x overhead or more, maximum performance */
132
+ } cufftXtWorkAreaPolicy;
133
+
134
+ // multi-GPU routines
135
+ cufftResult CUFFTAPI cufftXtSetGPUs(cufftHandle handle, int nGPUs, int *whichGPUs);
136
+
137
+ cufftResult CUFFTAPI cufftXtMalloc(cufftHandle plan,
138
+ cudaLibXtDesc ** descriptor,
139
+ cufftXtSubFormat format);
140
+
141
+ cufftResult CUFFTAPI cufftXtMemcpy(cufftHandle plan,
142
+ void *dstPointer,
143
+ void *srcPointer,
144
+ cufftXtCopyType type);
145
+
146
+ cufftResult CUFFTAPI cufftXtFree(cudaLibXtDesc *descriptor);
147
+
148
+ cufftResult CUFFTAPI cufftXtSetWorkArea(cufftHandle plan, void **workArea);
149
+
150
+ cufftResult CUFFTAPI cufftXtExecDescriptorC2C(cufftHandle plan,
151
+ cudaLibXtDesc *input,
152
+ cudaLibXtDesc *output,
153
+ int direction);
154
+
155
+ cufftResult CUFFTAPI cufftXtExecDescriptorR2C(cufftHandle plan,
156
+ cudaLibXtDesc *input,
157
+ cudaLibXtDesc *output);
158
+
159
+ cufftResult CUFFTAPI cufftXtExecDescriptorC2R(cufftHandle plan,
160
+ cudaLibXtDesc *input,
161
+ cudaLibXtDesc *output);
162
+
163
+ cufftResult CUFFTAPI cufftXtExecDescriptorZ2Z(cufftHandle plan,
164
+ cudaLibXtDesc *input,
165
+ cudaLibXtDesc *output,
166
+ int direction);
167
+
168
+ cufftResult CUFFTAPI cufftXtExecDescriptorD2Z(cufftHandle plan,
169
+ cudaLibXtDesc *input,
170
+ cudaLibXtDesc *output);
171
+
172
+ cufftResult CUFFTAPI cufftXtExecDescriptorZ2D(cufftHandle plan,
173
+ cudaLibXtDesc *input,
174
+ cudaLibXtDesc *output);
175
+
176
+ // Utility functions
177
+
178
+ cufftResult CUFFTAPI cufftXtQueryPlan(cufftHandle plan, void *queryStruct, cufftXtQueryType queryType);
179
+
180
+
181
+ // callbacks
182
+
183
+
184
+ typedef enum cufftXtCallbackType_t {
185
+ CUFFT_CB_LD_COMPLEX = 0x0,
186
+ CUFFT_CB_LD_COMPLEX_DOUBLE = 0x1,
187
+ CUFFT_CB_LD_REAL = 0x2,
188
+ CUFFT_CB_LD_REAL_DOUBLE = 0x3,
189
+ CUFFT_CB_ST_COMPLEX = 0x4,
190
+ CUFFT_CB_ST_COMPLEX_DOUBLE = 0x5,
191
+ CUFFT_CB_ST_REAL = 0x6,
192
+ CUFFT_CB_ST_REAL_DOUBLE = 0x7,
193
+ CUFFT_CB_UNDEFINED = 0x8
194
+
195
+ } cufftXtCallbackType;
196
+
197
+ typedef cufftComplex (*cufftCallbackLoadC)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
198
+ typedef cufftDoubleComplex (*cufftCallbackLoadZ)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
199
+ typedef cufftReal (*cufftCallbackLoadR)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
200
+ typedef cufftDoubleReal(*cufftCallbackLoadD)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
201
+
202
+ typedef void (*cufftCallbackStoreC)(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPointer);
203
+ typedef void (*cufftCallbackStoreZ)(void *dataOut, size_t offset, cufftDoubleComplex element, void *callerInfo, void *sharedPointer);
204
+ typedef void (*cufftCallbackStoreR)(void *dataOut, size_t offset, cufftReal element, void *callerInfo, void *sharedPointer);
205
+ typedef void (*cufftCallbackStoreD)(void *dataOut, size_t offset, cufftDoubleReal element, void *callerInfo, void *sharedPointer);
206
+
207
+
208
+ cufftResult CUFFTAPI cufftXtSetCallback(cufftHandle plan, void **callback_routine, cufftXtCallbackType cbType, void **caller_info);
209
+ cufftResult CUFFTAPI cufftXtClearCallback(cufftHandle plan, cufftXtCallbackType cbType);
210
+ cufftResult CUFFTAPI cufftXtSetCallbackSharedSize(cufftHandle plan, cufftXtCallbackType cbType, size_t sharedSize);
211
+
212
+ cufftResult CUFFTAPI cufftXtMakePlanMany(cufftHandle plan,
213
+ int rank,
214
+ long long int *n,
215
+ long long int *inembed,
216
+ long long int istride,
217
+ long long int idist,
218
+ cudaDataType inputtype,
219
+ long long int *onembed,
220
+ long long int ostride,
221
+ long long int odist,
222
+ cudaDataType outputtype,
223
+ long long int batch,
224
+ size_t *workSize,
225
+ cudaDataType executiontype);
226
+
227
+ cufftResult CUFFTAPI cufftXtGetSizeMany(cufftHandle plan,
228
+ int rank,
229
+ long long int *n,
230
+ long long int *inembed,
231
+ long long int istride,
232
+ long long int idist,
233
+ cudaDataType inputtype,
234
+ long long int *onembed,
235
+ long long int ostride,
236
+ long long int odist,
237
+ cudaDataType outputtype,
238
+ long long int batch,
239
+ size_t *workSize,
240
+ cudaDataType executiontype);
241
+
242
+
243
+ cufftResult CUFFTAPI cufftXtExec(cufftHandle plan,
244
+ void *input,
245
+ void *output,
246
+ int direction);
247
+
248
+ cufftResult CUFFTAPI cufftXtExecDescriptor(cufftHandle plan,
249
+ cudaLibXtDesc *input,
250
+ cudaLibXtDesc *output,
251
+ int direction);
252
+
253
+ cufftResult CUFFTAPI cufftXtSetWorkAreaPolicy(cufftHandle plan, cufftXtWorkAreaPolicy policy, size_t *workSize);
254
+
255
+ typedef struct cufftBox3d_t {
256
+ size_t lower[3];
257
+ size_t upper[3];
258
+ size_t strides[3];
259
+ } cufftBox3d;
260
+
261
+ cufftResult CUFFTAPI cufftXtSetDistribution(cufftHandle plan,
262
+ const cufftBox3d *box_in,
263
+ const cufftBox3d *box_out);
264
+
265
+ #ifdef __cplusplus
266
+ }
267
+ #endif
268
+
269
+ #endif
mgm/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2005-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*!
51
+ * \file cufftw.h
52
+ * \brief Public header file for the NVIDIA CUDA FFTW library (CUFFTW)
53
+ */
54
+
55
+ #ifndef _CUFFTW_H_
56
+ #define _CUFFTW_H_
57
+
58
+
59
+ #include <stdio.h>
60
+ #include "cufft.h"
61
+
62
+ #ifdef __cplusplus
63
+ extern "C" {
64
+ #endif
65
+
66
+ // transform direction
67
+ #define FFTW_FORWARD -1
68
+ #define FFTW_INVERSE 1
69
+ #define FFTW_BACKWARD 1
70
+
71
+ // Planner flags
72
+
73
+ #define FFTW_ESTIMATE 0x01
74
+ #define FFTW_MEASURE 0x02
75
+ #define FFTW_PATIENT 0x03
76
+ #define FFTW_EXHAUSTIVE 0x04
77
+ #define FFTW_WISDOM_ONLY 0x05
78
+
79
+ //Algorithm restriction flags
80
+
81
+ #define FFTW_DESTROY_INPUT 0x08
82
+ #define FFTW_PRESERVE_INPUT 0x0C
83
+ #define FFTW_UNALIGNED 0x10
84
+
85
+ // CUFFTW defines and supports the following data types
86
+
87
+ // note if complex.h has been included we use the C99 complex types
88
+ #if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined (complex)
89
+ typedef double _Complex fftw_complex;
90
+ typedef float _Complex fftwf_complex;
91
+ #else
92
+ typedef double fftw_complex[2];
93
+ typedef float fftwf_complex[2];
94
+ #endif
95
+
96
+ typedef void *fftw_plan;
97
+
98
+ typedef void *fftwf_plan;
99
+
100
+ typedef struct {
101
+ int n;
102
+ int is;
103
+ int os;
104
+ } fftw_iodim;
105
+
106
+ typedef fftw_iodim fftwf_iodim;
107
+
108
+ typedef struct {
109
+ ptrdiff_t n;
110
+ ptrdiff_t is;
111
+ ptrdiff_t os;
112
+ } fftw_iodim64;
113
+
114
+ typedef fftw_iodim64 fftwf_iodim64;
115
+
116
+
117
+ // CUFFTW defines and supports the following double precision APIs
118
+
119
+
120
+ fftw_plan CUFFTAPI fftw_plan_dft_1d(int n,
121
+ fftw_complex *in,
122
+ fftw_complex *out,
123
+ int sign,
124
+ unsigned flags);
125
+
126
+ fftw_plan CUFFTAPI fftw_plan_dft_2d(int n0,
127
+ int n1,
128
+ fftw_complex *in,
129
+ fftw_complex *out,
130
+ int sign,
131
+ unsigned flags);
132
+
133
+ fftw_plan CUFFTAPI fftw_plan_dft_3d(int n0,
134
+ int n1,
135
+ int n2,
136
+ fftw_complex *in,
137
+ fftw_complex *out,
138
+ int sign,
139
+ unsigned flags);
140
+
141
+ fftw_plan CUFFTAPI fftw_plan_dft(int rank,
142
+ const int *n,
143
+ fftw_complex *in,
144
+ fftw_complex *out,
145
+ int sign,
146
+ unsigned flags);
147
+
148
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_1d(int n,
149
+ double *in,
150
+ fftw_complex *out,
151
+ unsigned flags);
152
+
153
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_2d(int n0,
154
+ int n1,
155
+ double *in,
156
+ fftw_complex *out,
157
+ unsigned flags);
158
+
159
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_3d(int n0,
160
+ int n1,
161
+ int n2,
162
+ double *in,
163
+ fftw_complex *out,
164
+ unsigned flags);
165
+
166
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c(int rank,
167
+ const int *n,
168
+ double *in,
169
+ fftw_complex *out,
170
+ unsigned flags);
171
+
172
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_1d(int n,
173
+ fftw_complex *in,
174
+ double *out,
175
+ unsigned flags);
176
+
177
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_2d(int n0,
178
+ int n1,
179
+ fftw_complex *in,
180
+ double *out,
181
+ unsigned flags);
182
+
183
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_3d(int n0,
184
+ int n1,
185
+ int n2,
186
+ fftw_complex *in,
187
+ double *out,
188
+ unsigned flags);
189
+
190
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r(int rank,
191
+ const int *n,
192
+ fftw_complex *in,
193
+ double *out,
194
+ unsigned flags);
195
+
196
+
197
+ fftw_plan CUFFTAPI fftw_plan_many_dft(int rank,
198
+ const int *n,
199
+ int batch,
200
+ fftw_complex *in,
201
+ const int *inembed, int istride, int idist,
202
+ fftw_complex *out,
203
+ const int *onembed, int ostride, int odist,
204
+ int sign, unsigned flags);
205
+
206
+ fftw_plan CUFFTAPI fftw_plan_many_dft_r2c(int rank,
207
+ const int *n,
208
+ int batch,
209
+ double *in,
210
+ const int *inembed, int istride, int idist,
211
+ fftw_complex *out,
212
+ const int *onembed, int ostride, int odist,
213
+ unsigned flags);
214
+
215
+ fftw_plan CUFFTAPI fftw_plan_many_dft_c2r(int rank,
216
+ const int *n,
217
+ int batch,
218
+ fftw_complex *in,
219
+ const int *inembed, int istride, int idist,
220
+ double *out,
221
+ const int *onembed, int ostride, int odist,
222
+ unsigned flags);
223
+
224
+ fftw_plan CUFFTAPI fftw_plan_guru_dft(int rank, const fftw_iodim *dims,
225
+ int batch_rank, const fftw_iodim *batch_dims,
226
+ fftw_complex *in, fftw_complex *out,
227
+ int sign, unsigned flags);
228
+
229
+ fftw_plan CUFFTAPI fftw_plan_guru_dft_r2c(int rank, const fftw_iodim *dims,
230
+ int batch_rank, const fftw_iodim *batch_dims,
231
+ double *in, fftw_complex *out,
232
+ unsigned flags);
233
+
234
+ fftw_plan CUFFTAPI fftw_plan_guru_dft_c2r(int rank, const fftw_iodim *dims,
235
+ int batch_rank, const fftw_iodim *batch_dims,
236
+ fftw_complex *in, double *out,
237
+ unsigned flags);
238
+
239
+ void CUFFTAPI fftw_execute(const fftw_plan plan);
240
+
241
+ void CUFFTAPI fftw_execute_dft(const fftw_plan plan,
242
+ fftw_complex *idata,
243
+ fftw_complex *odata);
244
+
245
+ void CUFFTAPI fftw_execute_dft_r2c(const fftw_plan plan,
246
+ double *idata,
247
+ fftw_complex *odata);
248
+
249
+ void CUFFTAPI fftw_execute_dft_c2r(const fftw_plan plan,
250
+ fftw_complex *idata,
251
+ double *odata);
252
+
253
+
254
+ // CUFFTW defines and supports the following single precision APIs
255
+
256
+ fftwf_plan CUFFTAPI fftwf_plan_dft_1d(int n,
257
+ fftwf_complex *in,
258
+ fftwf_complex *out,
259
+ int sign,
260
+ unsigned flags);
261
+
262
+ fftwf_plan CUFFTAPI fftwf_plan_dft_2d(int n0,
263
+ int n1,
264
+ fftwf_complex *in,
265
+ fftwf_complex *out,
266
+ int sign,
267
+ unsigned flags);
268
+
269
+ fftwf_plan CUFFTAPI fftwf_plan_dft_3d(int n0,
270
+ int n1,
271
+ int n2,
272
+ fftwf_complex *in,
273
+ fftwf_complex *out,
274
+ int sign,
275
+ unsigned flags);
276
+
277
+ fftwf_plan CUFFTAPI fftwf_plan_dft(int rank,
278
+ const int *n,
279
+ fftwf_complex *in,
280
+ fftwf_complex *out,
281
+ int sign,
282
+ unsigned flags);
283
+
284
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_1d(int n,
285
+ float *in,
286
+ fftwf_complex *out,
287
+ unsigned flags);
288
+
289
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_2d(int n0,
290
+ int n1,
291
+ float *in,
292
+ fftwf_complex *out,
293
+ unsigned flags);
294
+
295
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_3d(int n0,
296
+ int n1,
297
+ int n2,
298
+ float *in,
299
+ fftwf_complex *out,
300
+ unsigned flags);
301
+
302
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c(int rank,
303
+ const int *n,
304
+ float *in,
305
+ fftwf_complex *out,
306
+ unsigned flags);
307
+
308
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_1d(int n,
309
+ fftwf_complex *in,
310
+ float *out,
311
+ unsigned flags);
312
+
313
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_2d(int n0,
314
+ int n1,
315
+ fftwf_complex *in,
316
+ float *out,
317
+ unsigned flags);
318
+
319
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_3d(int n0,
320
+ int n1,
321
+ int n2,
322
+ fftwf_complex *in,
323
+ float *out,
324
+ unsigned flags);
325
+
326
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r(int rank,
327
+ const int *n,
328
+ fftwf_complex *in,
329
+ float *out,
330
+ unsigned flags);
331
+
332
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft(int rank,
333
+ const int *n,
334
+ int batch,
335
+ fftwf_complex *in,
336
+ const int *inembed, int istride, int idist,
337
+ fftwf_complex *out,
338
+ const int *onembed, int ostride, int odist,
339
+ int sign, unsigned flags);
340
+
341
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft_r2c(int rank,
342
+ const int *n,
343
+ int batch,
344
+ float *in,
345
+ const int *inembed, int istride, int idist,
346
+ fftwf_complex *out,
347
+ const int *onembed, int ostride, int odist,
348
+ unsigned flags);
349
+
350
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft_c2r(int rank,
351
+ const int *n,
352
+ int batch,
353
+ fftwf_complex *in,
354
+ const int *inembed, int istride, int idist,
355
+ float *out,
356
+ const int *onembed, int ostride, int odist,
357
+ unsigned flags);
358
+
359
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft(int rank, const fftwf_iodim *dims,
360
+ int batch_rank, const fftwf_iodim *batch_dims,
361
+ fftwf_complex *in, fftwf_complex *out,
362
+ int sign, unsigned flags);
363
+
364
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft_r2c(int rank, const fftwf_iodim *dims,
365
+ int batch_rank, const fftwf_iodim *batch_dims,
366
+ float *in, fftwf_complex *out,
367
+ unsigned flags);
368
+
369
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft_c2r(int rank, const fftwf_iodim *dims,
370
+ int batch_rank, const fftwf_iodim *batch_dims,
371
+ fftwf_complex *in, float *out,
372
+ unsigned flags);
373
+
374
+ void CUFFTAPI fftwf_execute(const fftw_plan plan);
375
+
376
+ void CUFFTAPI fftwf_execute_dft(const fftwf_plan plan,
377
+ fftwf_complex *idata,
378
+ fftwf_complex *odata);
379
+
380
+ void CUFFTAPI fftwf_execute_dft_r2c(const fftwf_plan plan,
381
+ float *idata,
382
+ fftwf_complex *odata);
383
+
384
+ void CUFFTAPI fftwf_execute_dft_c2r(const fftwf_plan plan,
385
+ fftwf_complex *idata,
386
+ float *odata);
387
+
388
+ /// CUFFTW 64-bit Guru Interface
389
+ /// dp
390
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, fftw_complex* out, int sign, unsigned flags);
391
+
392
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft_r2c(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, double* in, fftw_complex* out, unsigned flags);
393
+
394
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft_c2r(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, double* out, unsigned flags);
395
+
396
+ /// sp
397
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, fftwf_complex* out, int sign, unsigned flags);
398
+
399
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_r2c(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, float* in, fftwf_complex* out, unsigned flags);
400
+
401
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_c2r(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, float* out, unsigned flags);
402
+
403
+ #ifdef _WIN32
404
+ #define _CUFFTAPI(T) T CUFFTAPI
405
+ #else
406
+ #define _CUFFTAPI(T) CUFFTAPI T
407
+ #endif
408
+
409
+ // CUFFTW defines and supports the following support APIs
410
+ _CUFFTAPI(void *) fftw_malloc(size_t n);
411
+
412
+ _CUFFTAPI(void *) fftwf_malloc(size_t n);
413
+
414
+ void CUFFTAPI fftw_free(void *pointer);
415
+
416
+ void CUFFTAPI fftwf_free(void *pointer);
417
+
418
+ void CUFFTAPI fftw_export_wisdom_to_file(FILE * output_file);
419
+
420
+ void CUFFTAPI fftwf_export_wisdom_to_file(FILE * output_file);
421
+
422
+ void CUFFTAPI fftw_import_wisdom_from_file(FILE * input_file);
423
+
424
+ void CUFFTAPI fftwf_import_wisdom_from_file(FILE * input_file);
425
+
426
+ void CUFFTAPI fftw_print_plan(const fftw_plan plan);
427
+
428
+ void CUFFTAPI fftwf_print_plan(const fftwf_plan plan);
429
+
430
+ void CUFFTAPI fftw_set_timelimit(double seconds);
431
+
432
+ void CUFFTAPI fftwf_set_timelimit(double seconds);
433
+
434
+ double CUFFTAPI fftw_cost(const fftw_plan plan);
435
+
436
+ double CUFFTAPI fftwf_cost(const fftw_plan plan);
437
+
438
+ void CUFFTAPI fftw_flops(const fftw_plan plan, double *add, double *mul, double *fma);
439
+
440
+ void CUFFTAPI fftwf_flops(const fftw_plan plan, double *add, double *mul, double *fma);
441
+
442
+ void CUFFTAPI fftw_destroy_plan(fftw_plan plan);
443
+
444
+ void CUFFTAPI fftwf_destroy_plan(fftwf_plan plan);
445
+
446
+ void CUFFTAPI fftw_cleanup(void);
447
+
448
+ void CUFFTAPI fftwf_cleanup(void);
449
+
450
+ #ifdef __cplusplus
451
+ }
452
+ #endif
453
+
454
+ #endif /* _CUFFTW_H_ */
mgm/lib/python3.10/site-packages/nvidia/cufft/lib/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (165 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a592a5b2f359a9077550ee1fdadd58eb2cf9cc0bfab8fe397a374fb949da143
3
+ size 1618440
mgm/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2307a5acfccc9b40f989384038218cfead564cd43633701d30c893047e744f44
3
+ size 974888
mgm/lib/python3.10/site-packages/nvidia/curand/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/curand/include/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand.h ADDED
@@ -0,0 +1,1077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CURAND_H_)
51
+ #define CURAND_H_
52
+
53
+ /**
54
+ * \defgroup HOST Host API
55
+ *
56
+ * @{
57
+ */
58
+ #ifndef __CUDACC_RTC__
59
+ #include <cuda_runtime.h>
60
+ #endif
61
+
62
+ #ifndef CURANDAPI
63
+ #ifdef _WIN32
64
+ #define CURANDAPI __stdcall
65
+ #else
66
+ #define CURANDAPI
67
+ #endif
68
+ #endif
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif /* __cplusplus */
73
+
74
+ #define CURAND_VER_MAJOR 10
75
+ #define CURAND_VER_MINOR 2
76
+ #define CURAND_VER_PATCH 10
77
+ #define CURAND_VER_BUILD 91
78
+ #define CURAND_VERSION (CURAND_VER_MAJOR * 1000 + \
79
+ CURAND_VER_MINOR * 100 + \
80
+ CURAND_VER_PATCH)
81
+ /* CURAND Host API datatypes */
82
+
83
+ /**
84
+ * @{
85
+ */
86
+
87
+ /**
88
+ * CURAND function call status types
89
+ */
90
+ enum curandStatus {
91
+ CURAND_STATUS_SUCCESS = 0, ///< No errors
92
+ CURAND_STATUS_VERSION_MISMATCH = 100, ///< Header file and linked library version do not match
93
+ CURAND_STATUS_NOT_INITIALIZED = 101, ///< Generator not initialized
94
+ CURAND_STATUS_ALLOCATION_FAILED = 102, ///< Memory allocation failed
95
+ CURAND_STATUS_TYPE_ERROR = 103, ///< Generator is wrong type
96
+ CURAND_STATUS_OUT_OF_RANGE = 104, ///< Argument out of range
97
+ CURAND_STATUS_LENGTH_NOT_MULTIPLE = 105, ///< Length requested is not a multple of dimension
98
+ CURAND_STATUS_DOUBLE_PRECISION_REQUIRED = 106, ///< GPU does not have double precision required by MRG32k3a
99
+ CURAND_STATUS_LAUNCH_FAILURE = 201, ///< Kernel launch failure
100
+ CURAND_STATUS_PREEXISTING_FAILURE = 202, ///< Preexisting failure on library entry
101
+ CURAND_STATUS_INITIALIZATION_FAILED = 203, ///< Initialization of CUDA failed
102
+ CURAND_STATUS_ARCH_MISMATCH = 204, ///< Architecture mismatch, GPU does not support requested feature
103
+ CURAND_STATUS_INTERNAL_ERROR = 999 ///< Internal library error
104
+ };
105
+
106
+ /*
107
+ * CURAND function call status types
108
+ */
109
+ /** \cond UNHIDE_TYPEDEFS */
110
+ typedef enum curandStatus curandStatus_t;
111
+ /** \endcond */
112
+
113
+ /**
114
+ * CURAND generator types
115
+ */
116
+ enum curandRngType {
117
+ CURAND_RNG_TEST = 0,
118
+ CURAND_RNG_PSEUDO_DEFAULT = 100, ///< Default pseudorandom generator
119
+ CURAND_RNG_PSEUDO_XORWOW = 101, ///< XORWOW pseudorandom generator
120
+ CURAND_RNG_PSEUDO_MRG32K3A = 121, ///< MRG32k3a pseudorandom generator
121
+ CURAND_RNG_PSEUDO_MTGP32 = 141, ///< Mersenne Twister MTGP32 pseudorandom generator
122
+ CURAND_RNG_PSEUDO_MT19937 = 142, ///< Mersenne Twister MT19937 pseudorandom generator
123
+ CURAND_RNG_PSEUDO_PHILOX4_32_10 = 161, ///< PHILOX-4x32-10 pseudorandom generator
124
+ CURAND_RNG_QUASI_DEFAULT = 200, ///< Default quasirandom generator
125
+ CURAND_RNG_QUASI_SOBOL32 = 201, ///< Sobol32 quasirandom generator
126
+ CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 = 202, ///< Scrambled Sobol32 quasirandom generator
127
+ CURAND_RNG_QUASI_SOBOL64 = 203, ///< Sobol64 quasirandom generator
128
+ CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 = 204 ///< Scrambled Sobol64 quasirandom generator
129
+ };
130
+
131
+ /*
132
+ * CURAND generator types
133
+ */
134
+ /** \cond UNHIDE_TYPEDEFS */
135
+ typedef enum curandRngType curandRngType_t;
136
+ /** \endcond */
137
+
138
+ /**
139
+ * CURAND ordering of results in memory
140
+ */
141
+ enum curandOrdering {
142
+ CURAND_ORDERING_PSEUDO_BEST = 100, ///< Best ordering for pseudorandom results
143
+ CURAND_ORDERING_PSEUDO_DEFAULT = 101, ///< Specific default thread sequence for pseudorandom results, same as CURAND_ORDERING_PSEUDO_BEST
144
+ CURAND_ORDERING_PSEUDO_SEEDED = 102, ///< Specific seeding pattern for fast lower quality pseudorandom results
145
+ CURAND_ORDERING_PSEUDO_LEGACY = 103, ///< Specific legacy sequence for pseudorandom results, guaranteed to remain the same for all cuRAND release
146
+ CURAND_ORDERING_PSEUDO_DYNAMIC = 104, ///< Specific ordering adjusted to the device it is being executed on, provides the best performance
147
+ CURAND_ORDERING_QUASI_DEFAULT = 201 ///< Specific n-dimensional ordering for quasirandom results
148
+ };
149
+
150
+ /*
151
+ * CURAND ordering of results in memory
152
+ */
153
+ /** \cond UNHIDE_TYPEDEFS */
154
+ typedef enum curandOrdering curandOrdering_t;
155
+ /** \endcond */
156
+
157
+ /**
158
+ * CURAND choice of direction vector set
159
+ */
160
+ enum curandDirectionVectorSet {
161
+ CURAND_DIRECTION_VECTORS_32_JOEKUO6 = 101, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
162
+ CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 = 102, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
163
+ CURAND_DIRECTION_VECTORS_64_JOEKUO6 = 103, ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
164
+ CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 = 104 ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
165
+ };
166
+
167
+ /*
168
+ * CURAND choice of direction vector set
169
+ */
170
+ /** \cond UNHIDE_TYPEDEFS */
171
+ typedef enum curandDirectionVectorSet curandDirectionVectorSet_t;
172
+ /** \endcond */
173
+
174
+ /**
175
+ * CURAND array of 32-bit direction vectors
176
+ */
177
+ /** \cond UNHIDE_TYPEDEFS */
178
+ typedef unsigned int curandDirectionVectors32_t[32];
179
+ /** \endcond */
180
+
181
+ /**
182
+ * CURAND array of 64-bit direction vectors
183
+ */
184
+ /** \cond UNHIDE_TYPEDEFS */
185
+ typedef unsigned long long curandDirectionVectors64_t[64];
186
+ /** \endcond **/
187
+
188
+ /**
189
+ * CURAND generator (opaque)
190
+ */
191
+ struct curandGenerator_st;
192
+
193
+ /**
194
+ * CURAND generator
195
+ */
196
+ /** \cond UNHIDE_TYPEDEFS */
197
+ typedef struct curandGenerator_st *curandGenerator_t;
198
+ /** \endcond */
199
+
200
+ /**
201
+ * CURAND distribution
202
+ */
203
+ /** \cond UNHIDE_TYPEDEFS */
204
+ typedef double curandDistribution_st;
205
+ typedef curandDistribution_st *curandDistribution_t;
206
+ typedef struct curandDistributionShift_st *curandDistributionShift_t;
207
+ /** \endcond */
208
+ /**
209
+ * CURAND distribution M2
210
+ */
211
+ /** \cond UNHIDE_TYPEDEFS */
212
+ typedef struct curandDistributionM2Shift_st *curandDistributionM2Shift_t;
213
+ typedef struct curandHistogramM2_st *curandHistogramM2_t;
214
+ typedef unsigned int curandHistogramM2K_st;
215
+ typedef curandHistogramM2K_st *curandHistogramM2K_t;
216
+ typedef curandDistribution_st curandHistogramM2V_st;
217
+ typedef curandHistogramM2V_st *curandHistogramM2V_t;
218
+
219
+ typedef struct curandDiscreteDistribution_st *curandDiscreteDistribution_t;
220
+ /** \endcond */
221
+
222
+ /*
223
+ * CURAND METHOD
224
+ */
225
+ /** \cond UNHIDE_ENUMS */
226
+ enum curandMethod {
227
+ CURAND_CHOOSE_BEST = 0, // choose best depends on args
228
+ CURAND_ITR = 1,
229
+ CURAND_KNUTH = 2,
230
+ CURAND_HITR = 3,
231
+ CURAND_M1 = 4,
232
+ CURAND_M2 = 5,
233
+ CURAND_BINARY_SEARCH = 6,
234
+ CURAND_DISCRETE_GAUSS = 7,
235
+ CURAND_REJECTION = 8,
236
+ CURAND_DEVICE_API = 9,
237
+ CURAND_FAST_REJECTION = 10,
238
+ CURAND_3RD = 11,
239
+ CURAND_DEFINITION = 12,
240
+ CURAND_POISSON = 13
241
+ };
242
+
243
+ typedef enum curandMethod curandMethod_t;
244
+ /** \endcond */
245
+
246
+
247
+ #ifndef __CUDACC_RTC__
248
+
249
+ /**
250
+ * @}
251
+ */
252
+
253
+ /**
254
+ * \brief Create new random number generator.
255
+ *
256
+ * Creates a new random number generator of type \p rng_type
257
+ * and returns it in \p *generator.
258
+ *
259
+ * Legal values for \p rng_type are:
260
+ * - CURAND_RNG_PSEUDO_DEFAULT
261
+ * - CURAND_RNG_PSEUDO_XORWOW
262
+ * - CURAND_RNG_PSEUDO_MRG32K3A
263
+ * - CURAND_RNG_PSEUDO_MTGP32
264
+ * - CURAND_RNG_PSEUDO_MT19937
265
+ * - CURAND_RNG_PSEUDO_PHILOX4_32_10
266
+ * - CURAND_RNG_QUASI_DEFAULT
267
+ * - CURAND_RNG_QUASI_SOBOL32
268
+ * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL32
269
+ * - CURAND_RNG_QUASI_SOBOL64
270
+ * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL64
271
+ *
272
+ * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
273
+ * is CURAND_RNG_PSEUDO_XORWOW. \n
274
+ * When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
275
+ * the type chosen is CURAND_RNG_QUASI_SOBOL32.
276
+ *
277
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
278
+ * - \p seed = 0
279
+ * - \p offset = 0
280
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
281
+ *
282
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
283
+ * - \p seed = 0
284
+ * - \p offset = 0
285
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
286
+ *
287
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
288
+ * - \p seed = 0
289
+ * - \p offset = 0
290
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
291
+ *
292
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
293
+ * - \p seed = 0
294
+ * - \p offset = 0
295
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
296
+ *
297
+ * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
298
+ * - \p seed = 0
299
+ * - \p offset = 0
300
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
301
+ *
302
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
303
+ * - \p dimensions = 1
304
+ * - \p offset = 0
305
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
306
+ *
307
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
308
+ * - \p dimensions = 1
309
+ * - \p offset = 0
310
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
311
+ *
312
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBBLED_SOBOL32 are:
313
+ * - \p dimensions = 1
314
+ * - \p offset = 0
315
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
316
+ *
317
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
318
+ * - \p dimensions = 1
319
+ * - \p offset = 0
320
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
321
+ *
322
+ * \param generator - Pointer to generator
323
+ * \param rng_type - Type of generator to create
324
+ *
325
+ * \return
326
+ * - CURAND_STATUS_ALLOCATION_FAILED, if memory could not be allocated \n
327
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
328
+ * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
329
+ * dynamically linked library version \n
330
+ * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
331
+ * - CURAND_STATUS_SUCCESS if generator was created successfully \n
332
+ *
333
+ */
334
+ curandStatus_t CURANDAPI
335
+ curandCreateGenerator(curandGenerator_t *generator, curandRngType_t rng_type);
336
+
337
+ /**
338
+ * \brief Create new host CPU random number generator.
339
+ *
340
+ * Creates a new host CPU random number generator of type \p rng_type
341
+ * and returns it in \p *generator.
342
+ *
343
+ * Legal values for \p rng_type are:
344
+ * - CURAND_RNG_PSEUDO_DEFAULT
345
+ * - CURAND_RNG_PSEUDO_XORWOW
346
+ * - CURAND_RNG_PSEUDO_MRG32K3A
347
+ * - CURAND_RNG_PSEUDO_MTGP32
348
+ * - CURAND_RNG_PSEUDO_MT19937
349
+ * - CURAND_RNG_PSEUDO_PHILOX4_32_10
350
+ * - CURAND_RNG_QUASI_DEFAULT
351
+ * - CURAND_RNG_QUASI_SOBOL32
352
+ *
353
+ * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
354
+ * is CURAND_RNG_PSEUDO_XORWOW. \n
355
+ * When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
356
+ * the type chosen is CURAND_RNG_QUASI_SOBOL32.
357
+ *
358
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
359
+ * - \p seed = 0
360
+ * - \p offset = 0
361
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
362
+ *
363
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
364
+ * - \p seed = 0
365
+ * - \p offset = 0
366
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
367
+ *
368
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
369
+ * - \p seed = 0
370
+ * - \p offset = 0
371
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
372
+ *
373
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
374
+ * - \p seed = 0
375
+ * - \p offset = 0
376
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
377
+ *
378
+ * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
379
+ * - \p seed = 0
380
+ * - \p offset = 0
381
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
382
+ *
383
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
384
+ * - \p dimensions = 1
385
+ * - \p offset = 0
386
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
387
+ *
388
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
389
+ * - \p dimensions = 1
390
+ * - \p offset = 0
391
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
392
+ *
393
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 are:
394
+ * - \p dimensions = 1
395
+ * - \p offset = 0
396
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
397
+ *
398
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
399
+ * - \p dimensions = 1
400
+ * - \p offset = 0
401
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
402
+ *
403
+ * \param generator - Pointer to generator
404
+ * \param rng_type - Type of generator to create
405
+ *
406
+ * \return
407
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
408
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
409
+ * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
410
+ * dynamically linked library version \n
411
+ * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
412
+ * - CURAND_STATUS_SUCCESS if generator was created successfully \n
413
+ */
414
+ curandStatus_t CURANDAPI
415
+ curandCreateGeneratorHost(curandGenerator_t *generator, curandRngType_t rng_type);
416
+
417
+ /**
418
+ * \brief Destroy an existing generator.
419
+ *
420
+ * Destroy an existing generator and free all memory associated with its state.
421
+ *
422
+ * \param generator - Generator to destroy
423
+ *
424
+ * \return
425
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
426
+ * - CURAND_STATUS_SUCCESS if generator was destroyed successfully \n
427
+ */
428
+ curandStatus_t CURANDAPI
429
+ curandDestroyGenerator(curandGenerator_t generator);
430
+
431
+ /**
432
+ * \brief Return the version number of the library.
433
+ *
434
+ * Return in \p *version the version number of the dynamically linked CURAND
435
+ * library. The format is the same as CUDART_VERSION from the CUDA Runtime.
436
+ * The only supported configuration is CURAND version equal to CUDA Runtime
437
+ * version.
438
+ *
439
+ * \param version - CURAND library version
440
+ *
441
+ * \return
442
+ * - CURAND_STATUS_SUCCESS if the version number was successfully returned \n
443
+ */
444
+ curandStatus_t CURANDAPI
445
+ curandGetVersion(int *version);
446
+
447
+ /**
448
+ * \brief Return the value of the curand property.
449
+ *
450
+ * Return in \p *value the number for the property described by \p type of the
451
+ * dynamically linked CURAND library.
452
+ *
453
+ * \param type - CUDA library property
454
+ * \param value - integer value for the requested property
455
+ *
456
+ * \return
457
+ * - CURAND_STATUS_SUCCESS if the property value was successfully returned \n
458
+ * - CURAND_STATUS_OUT_OF_RANGE if the property type is not recognized \n
459
+ */
460
+ curandStatus_t CURANDAPI
461
+ curandGetProperty(libraryPropertyType type, int *value);
462
+
463
+
464
+ /**
465
+ * \brief Set the current stream for CURAND kernel launches.
466
+ *
467
+ * Set the current stream for CURAND kernel launches. All library functions
468
+ * will use this stream until set again.
469
+ *
470
+ * \param generator - Generator to modify
471
+ * \param stream - Stream to use or ::NULL for null stream
472
+ *
473
+ * \return
474
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
475
+ * - CURAND_STATUS_SUCCESS if stream was set successfully \n
476
+ */
477
+ curandStatus_t CURANDAPI
478
+ curandSetStream(curandGenerator_t generator, cudaStream_t stream);
479
+
480
+ /**
481
+ * \brief Set the seed value of the pseudo-random number generator.
482
+ *
483
+ * Set the seed value of the pseudorandom number generator.
484
+ * All values of seed are valid. Different seeds will produce different sequences.
485
+ * Different seeds will often not be statistically correlated with each other,
486
+ * but some pairs of seed values may generate sequences which are statistically correlated.
487
+ *
488
+ * \param generator - Generator to modify
489
+ * \param seed - Seed value
490
+ *
491
+ * \return
492
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
493
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a pseudorandom number generator \n
494
+ * - CURAND_STATUS_SUCCESS if generator seed was set successfully \n
495
+ */
496
+ curandStatus_t CURANDAPI
497
+ curandSetPseudoRandomGeneratorSeed(curandGenerator_t generator, unsigned long long seed);
498
+
499
+ /**
500
+ * \brief Set the absolute offset of the pseudo or quasirandom number generator.
501
+ *
502
+ * Set the absolute offset of the pseudo or quasirandom number generator.
503
+ *
504
+ * All values of offset are valid. The offset position is absolute, not
505
+ * relative to the current position in the sequence.
506
+ *
507
+ * \param generator - Generator to modify
508
+ * \param offset - Absolute offset position
509
+ *
510
+ * \return
511
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
512
+ * - CURAND_STATUS_SUCCESS if generator offset was set successfully \n
513
+ */
514
+ curandStatus_t CURANDAPI
515
+ curandSetGeneratorOffset(curandGenerator_t generator, unsigned long long offset);
516
+
517
+ /**
518
+ * \brief Set the ordering of results of the pseudo or quasirandom number generator.
519
+ *
520
+ * Set the ordering of results of the pseudo or quasirandom number generator.
521
+ *
522
+ * Legal values of \p order for pseudorandom generators are:
523
+ * - CURAND_ORDERING_PSEUDO_DEFAULT
524
+ * - CURAND_ORDERING_PSEUDO_BEST
525
+ * - CURAND_ORDERING_PSEUDO_SEEDED
526
+ * - CURAND_ORDERING_PSEUDO_LEGACY
527
+ *
528
+ * Legal values of \p order for quasirandom generators are:
529
+ * - CURAND_ORDERING_QUASI_DEFAULT
530
+ *
531
+ * \param generator - Generator to modify
532
+ * \param order - Ordering of results
533
+ *
534
+ * \return
535
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
536
+ * - CURAND_STATUS_OUT_OF_RANGE if the ordering is not valid \n
537
+ * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
538
+ */
539
+ curandStatus_t CURANDAPI
540
+ curandSetGeneratorOrdering(curandGenerator_t generator, curandOrdering_t order);
541
+
542
+ /**
543
+ * \brief Set the number of dimensions.
544
+ *
545
+ * Set the number of dimensions to be generated by the quasirandom number
546
+ * generator.
547
+ *
548
+ * Legal values for \p num_dimensions are 1 to 20000.
549
+ *
550
+ * \param generator - Generator to modify
551
+ * \param num_dimensions - Number of dimensions
552
+ *
553
+ * \return
554
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
555
+ * - CURAND_STATUS_OUT_OF_RANGE if num_dimensions is not valid \n
556
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a quasirandom number generator \n
557
+ * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
558
+ */
559
+ curandStatus_t CURANDAPI
560
+ curandSetQuasiRandomGeneratorDimensions(curandGenerator_t generator, unsigned int num_dimensions);
561
+
562
+ /**
563
+ * \brief Generate 32-bit pseudo or quasirandom numbers.
564
+ *
565
+ * Use \p generator to generate \p num 32-bit results into the device memory at
566
+ * \p outputPtr. The device memory must have been previously allocated and be
567
+ * large enough to hold all the results. Launches are done with the stream
568
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
569
+ *
570
+ * Results are 32-bit values with every bit random.
571
+ *
572
+ * \param generator - Generator to use
573
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
574
+ * Pointer to host memory to store CPU-generated results
575
+ * \param num - Number of random 32-bit values to generate
576
+ *
577
+ * \return
578
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
579
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
580
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
581
+ * a previous kernel launch \n
582
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
583
+ * not a multiple of the quasirandom dimension \n
584
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
585
+ * - CURAND_STATUS_TYPE_ERROR if the generator is a 64 bit quasirandom generator.
586
+ * (use ::curandGenerateLongLong() with 64 bit quasirandom generators)
587
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
588
+ */
589
+ curandStatus_t CURANDAPI
590
+ curandGenerate(curandGenerator_t generator, unsigned int *outputPtr, size_t num);
591
+
592
+ /**
593
+ * \brief Generate 64-bit quasirandom numbers.
594
+ *
595
+ * Use \p generator to generate \p num 64-bit results into the device memory at
596
+ * \p outputPtr. The device memory must have been previously allocated and be
597
+ * large enough to hold all the results. Launches are done with the stream
598
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
599
+ *
600
+ * Results are 64-bit values with every bit random.
601
+ *
602
+ * \param generator - Generator to use
603
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
604
+ * Pointer to host memory to store CPU-generated results
605
+ * \param num - Number of random 64-bit values to generate
606
+ *
607
+ * \return
608
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
609
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
610
+ * a previous kernel launch \n
611
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
612
+ * not a multiple of the quasirandom dimension \n
613
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
614
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a 64 bit quasirandom generator\n
615
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
616
+ */
617
+ curandStatus_t CURANDAPI
618
+ curandGenerateLongLong(curandGenerator_t generator, unsigned long long *outputPtr, size_t num);
619
+
620
+ /**
621
+ * \brief Generate uniformly distributed floats.
622
+ *
623
+ * Use \p generator to generate \p num float results into the device memory at
624
+ * \p outputPtr. The device memory must have been previously allocated and be
625
+ * large enough to hold all the results. Launches are done with the stream
626
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
627
+ *
628
+ * Results are 32-bit floating point values between \p 0.0f and \p 1.0f,
629
+ * excluding \p 0.0f and including \p 1.0f.
630
+ *
631
+ * \param generator - Generator to use
632
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
633
+ * Pointer to host memory to store CPU-generated results
634
+ * \param num - Number of floats to generate
635
+ *
636
+ * \return
637
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
638
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
639
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
640
+ * a previous kernel launch \n
641
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
642
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
643
+ * not a multiple of the quasirandom dimension \n
644
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
645
+ */
646
+ curandStatus_t CURANDAPI
647
+ curandGenerateUniform(curandGenerator_t generator, float *outputPtr, size_t num);
648
+
649
+ /**
650
+ * \brief Generate uniformly distributed doubles.
651
+ *
652
+ * Use \p generator to generate \p num double results into the device memory at
653
+ * \p outputPtr. The device memory must have been previously allocated and be
654
+ * large enough to hold all the results. Launches are done with the stream
655
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
656
+ *
657
+ * Results are 64-bit double precision floating point values between
658
+ * \p 0.0 and \p 1.0, excluding \p 0.0 and including \p 1.0.
659
+ *
660
+ * \param generator - Generator to use
661
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
662
+ * Pointer to host memory to store CPU-generated results
663
+ * \param num - Number of doubles to generate
664
+ *
665
+ * \return
666
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
667
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
668
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
669
+ * a previous kernel launch \n
670
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
671
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
672
+ * not a multiple of the quasirandom dimension \n
673
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
674
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
675
+ */
676
+ curandStatus_t CURANDAPI
677
+ curandGenerateUniformDouble(curandGenerator_t generator, double *outputPtr, size_t num);
678
+
679
+ /**
680
+ * \brief Generate normally distributed doubles.
681
+ *
682
+ * Use \p generator to generate \p n float results into the device memory at
683
+ * \p outputPtr. The device memory must have been previously allocated and be
684
+ * large enough to hold all the results. Launches are done with the stream
685
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
686
+ *
687
+ * Results are 32-bit floating point values with mean \p mean and standard
688
+ * deviation \p stddev.
689
+ *
690
+ * Normally distributed results are generated from pseudorandom generators
691
+ * with a Box-Muller transform, and so require \p n to be even.
692
+ * Quasirandom generators use an inverse cumulative distribution
693
+ * function to preserve dimensionality.
694
+ *
695
+ * There may be slight numerical differences between results generated
696
+ * on the GPU with generators created with ::curandCreateGenerator()
697
+ * and results calculated on the CPU with generators created with
698
+ * ::curandCreateGeneratorHost(). These differences arise because of
699
+ * differences in results for transcendental functions. In addition,
700
+ * future versions of CURAND may use newer versions of the CUDA math
701
+ * library, so different versions of CURAND may give slightly different
702
+ * numerical values.
703
+ *
704
+ * \param generator - Generator to use
705
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
706
+ * Pointer to host memory to store CPU-generated results
707
+ * \param n - Number of floats to generate
708
+ * \param mean - Mean of normal distribution
709
+ * \param stddev - Standard deviation of normal distribution
710
+ *
711
+ * \return
712
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
713
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
714
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
715
+ * a previous kernel launch \n
716
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
717
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
718
+ * not a multiple of the quasirandom dimension, or is not a multiple
719
+ * of two for pseudorandom generators \n
720
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
721
+ */
722
+ curandStatus_t CURANDAPI
723
+ curandGenerateNormal(curandGenerator_t generator, float *outputPtr,
724
+ size_t n, float mean, float stddev);
725
+
726
+ /**
727
+ * \brief Generate normally distributed doubles.
728
+ *
729
+ * Use \p generator to generate \p n double results into the device memory at
730
+ * \p outputPtr. The device memory must have been previously allocated and be
731
+ * large enough to hold all the results. Launches are done with the stream
732
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
733
+ *
734
+ * Results are 64-bit floating point values with mean \p mean and standard
735
+ * deviation \p stddev.
736
+ *
737
+ * Normally distributed results are generated from pseudorandom generators
738
+ * with a Box-Muller transform, and so require \p n to be even.
739
+ * Quasirandom generators use an inverse cumulative distribution
740
+ * function to preserve dimensionality.
741
+ *
742
+ * There may be slight numerical differences between results generated
743
+ * on the GPU with generators created with ::curandCreateGenerator()
744
+ * and results calculated on the CPU with generators created with
745
+ * ::curandCreateGeneratorHost(). These differences arise because of
746
+ * differences in results for transcendental functions. In addition,
747
+ * future versions of CURAND may use newer versions of the CUDA math
748
+ * library, so different versions of CURAND may give slightly different
749
+ * numerical values.
750
+ *
751
+ * \param generator - Generator to use
752
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
753
+ * Pointer to host memory to store CPU-generated results
754
+ * \param n - Number of doubles to generate
755
+ * \param mean - Mean of normal distribution
756
+ * \param stddev - Standard deviation of normal distribution
757
+ *
758
+ * \return
759
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
760
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
761
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
762
+ * a previous kernel launch \n
763
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
764
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
765
+ * not a multiple of the quasirandom dimension, or is not a multiple
766
+ * of two for pseudorandom generators \n
767
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
768
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
769
+ */
770
+ curandStatus_t CURANDAPI
771
+ curandGenerateNormalDouble(curandGenerator_t generator, double *outputPtr,
772
+ size_t n, double mean, double stddev);
773
+
774
+ /**
775
+ * \brief Generate log-normally distributed floats.
776
+ *
777
+ * Use \p generator to generate \p n float results into the device memory at
778
+ * \p outputPtr. The device memory must have been previously allocated and be
779
+ * large enough to hold all the results. Launches are done with the stream
780
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
781
+ *
782
+ * Results are 32-bit floating point values with log-normal distribution based on
783
+ * an associated normal distribution with mean \p mean and standard deviation \p stddev.
784
+ *
785
+ * Normally distributed results are generated from pseudorandom generators
786
+ * with a Box-Muller transform, and so require \p n to be even.
787
+ * Quasirandom generators use an inverse cumulative distribution
788
+ * function to preserve dimensionality.
789
+ * The normally distributed results are transformed into log-normal distribution.
790
+ *
791
+ * There may be slight numerical differences between results generated
792
+ * on the GPU with generators created with ::curandCreateGenerator()
793
+ * and results calculated on the CPU with generators created with
794
+ * ::curandCreateGeneratorHost(). These differences arise because of
795
+ * differences in results for transcendental functions. In addition,
796
+ * future versions of CURAND may use newer versions of the CUDA math
797
+ * library, so different versions of CURAND may give slightly different
798
+ * numerical values.
799
+ *
800
+ * \param generator - Generator to use
801
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
802
+ * Pointer to host memory to store CPU-generated results
803
+ * \param n - Number of floats to generate
804
+ * \param mean - Mean of associated normal distribution
805
+ * \param stddev - Standard deviation of associated normal distribution
806
+ *
807
+ * \return
808
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
809
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
810
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
811
+ * a previous kernel launch \n
812
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
813
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
814
+ * not a multiple of the quasirandom dimension, or is not a multiple
815
+ * of two for pseudorandom generators \n
816
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
817
+ */
818
+ curandStatus_t CURANDAPI
819
+ curandGenerateLogNormal(curandGenerator_t generator, float *outputPtr,
820
+ size_t n, float mean, float stddev);
821
+
822
+ /**
823
+ * \brief Generate log-normally distributed doubles.
824
+ *
825
+ * Use \p generator to generate \p n double results into the device memory at
826
+ * \p outputPtr. The device memory must have been previously allocated and be
827
+ * large enough to hold all the results. Launches are done with the stream
828
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
829
+ *
830
+ * Results are 64-bit floating point values with log-normal distribution based on
831
+ * an associated normal distribution with mean \p mean and standard deviation \p stddev.
832
+ *
833
+ * Normally distributed results are generated from pseudorandom generators
834
+ * with a Box-Muller transform, and so require \p n to be even.
835
+ * Quasirandom generators use an inverse cumulative distribution
836
+ * function to preserve dimensionality.
837
+ * The normally distributed results are transformed into log-normal distribution.
838
+ *
839
+ * There may be slight numerical differences between results generated
840
+ * on the GPU with generators created with ::curandCreateGenerator()
841
+ * and results calculated on the CPU with generators created with
842
+ * ::curandCreateGeneratorHost(). These differences arise because of
843
+ * differences in results for transcendental functions. In addition,
844
+ * future versions of CURAND may use newer versions of the CUDA math
845
+ * library, so different versions of CURAND may give slightly different
846
+ * numerical values.
847
+ *
848
+ * \param generator - Generator to use
849
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
850
+ * Pointer to host memory to store CPU-generated results
851
+ * \param n - Number of doubles to generate
852
+ * \param mean - Mean of normal distribution
853
+ * \param stddev - Standard deviation of normal distribution
854
+ *
855
+ * \return
856
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
857
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
858
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
859
+ * a previous kernel launch \n
860
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
861
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
862
+ * not a multiple of the quasirandom dimension, or is not a multiple
863
+ * of two for pseudorandom generators \n
864
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
865
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
866
+ */
867
+ curandStatus_t CURANDAPI
868
+ curandGenerateLogNormalDouble(curandGenerator_t generator, double *outputPtr,
869
+ size_t n, double mean, double stddev);
870
+
871
+ /**
872
+ * \brief Construct the histogram array for a Poisson distribution.
873
+ *
874
+ * Construct the histogram array for the Poisson distribution with lambda \p lambda.
875
+ * For lambda greater than 2000, an approximation with a normal distribution is used.
876
+ *
877
+ * \param lambda - lambda for the Poisson distribution
878
+ *
879
+ *
880
+ * \param discrete_distribution - pointer to the histogram in device memory
881
+ *
882
+ * \return
883
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
884
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
885
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
886
+ * - CURAND_STATUS_NOT_INITIALIZED if the distribution pointer was null \n
887
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
888
+ * a previous kernel launch \n
889
+ * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
890
+ * - CURAND_STATUS_SUCCESS if the histogram was generated successfully \n
891
+ */
892
+
893
+ curandStatus_t CURANDAPI
894
+ curandCreatePoissonDistribution(double lambda, curandDiscreteDistribution_t *discrete_distribution);
895
+
896
+
897
+
898
+ /**
899
+ * \brief Destroy the histogram array for a discrete distribution (e.g. Poisson).
900
+ *
901
+ * Destroy the histogram array for a discrete distribution created by curandCreatePoissonDistribution.
902
+ *
903
+ * \param discrete_distribution - pointer to device memory where the histogram is stored
904
+ *
905
+ * \return
906
+ * - CURAND_STATUS_NOT_INITIALIZED if the histogram was never created \n
907
+ * - CURAND_STATUS_SUCCESS if the histogram was destroyed successfully \n
908
+ */
909
+ curandStatus_t CURANDAPI
910
+ curandDestroyDistribution(curandDiscreteDistribution_t discrete_distribution);
911
+
912
+
913
+ /**
914
+ * \brief Generate Poisson-distributed unsigned ints.
915
+ *
916
+ * Use \p generator to generate \p n unsigned int results into device memory at
917
+ * \p outputPtr. The device memory must have been previously allocated and must be
918
+ * large enough to hold all the results. Launches are done with the stream
919
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
920
+ *
921
+ * Results are 32-bit unsigned int point values with Poisson distribution, with lambda \p lambda.
922
+ *
923
+ * \param generator - Generator to use
924
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
925
+ * Pointer to host memory to store CPU-generated results
926
+ * \param n - Number of unsigned ints to generate
927
+ * \param lambda - lambda for the Poisson distribution
928
+ *
929
+ * \return
930
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
931
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
932
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
933
+ * a previous kernel launch \n
934
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
935
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
936
+ * not a multiple of the quasirandom dimension\n
937
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU or sm does not support double precision \n
938
+ * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
939
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
940
+ */
941
+
942
+ curandStatus_t CURANDAPI
943
+ curandGeneratePoisson(curandGenerator_t generator, unsigned int *outputPtr,
944
+ size_t n, double lambda);
945
+ // just for internal usage
946
+ curandStatus_t CURANDAPI
947
+ curandGeneratePoissonMethod(curandGenerator_t generator, unsigned int *outputPtr,
948
+ size_t n, double lambda, curandMethod_t method);
949
+
950
+
951
+ curandStatus_t CURANDAPI
952
+ curandGenerateBinomial(curandGenerator_t generator, unsigned int *outputPtr,
953
+ size_t num, unsigned int n, double p);
954
+ // just for internal usage
955
+ curandStatus_t CURANDAPI
956
+ curandGenerateBinomialMethod(curandGenerator_t generator,
957
+ unsigned int *outputPtr,
958
+ size_t num, unsigned int n, double p,
959
+ curandMethod_t method);
960
+
961
+
962
+ /**
963
+ * \brief Setup starting states.
964
+ *
965
+ * Generate the starting state of the generator. This function is
966
+ * automatically called by generation functions such as
967
+ * ::curandGenerate() and ::curandGenerateUniform().
968
+ * It can be called manually for performance testing reasons to separate
969
+ * timings for starting state generation and random number generation.
970
+ *
971
+ * \param generator - Generator to update
972
+ *
973
+ * \return
974
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
975
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
976
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
977
+ * a previous kernel launch \n
978
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
979
+ * - CURAND_STATUS_SUCCESS if the seeds were generated successfully \n
980
+ */
981
+ curandStatus_t CURANDAPI
982
+ curandGenerateSeeds(curandGenerator_t generator);
983
+
984
+ /**
985
+ * \brief Get direction vectors for 32-bit quasirandom number generation.
986
+ *
987
+ * Get a pointer to an array of direction vectors that can be used
988
+ * for quasirandom number generation. The resulting pointer will
989
+ * reference an array of direction vectors in host memory.
990
+ *
991
+ * The array contains vectors for many dimensions. Each dimension
992
+ * has 32 vectors. Each individual vector is an unsigned int.
993
+ *
994
+ * Legal values for \p set are:
995
+ * - CURAND_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
996
+ * - CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
997
+ *
998
+ * \param vectors - Address of pointer in which to return direction vectors
999
+ * \param set - Which set of direction vectors to use
1000
+ *
1001
+ * \return
1002
+ * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
1003
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1004
+ */
1005
+ curandStatus_t CURANDAPI
1006
+ curandGetDirectionVectors32(curandDirectionVectors32_t *vectors[], curandDirectionVectorSet_t set);
1007
+
1008
+ /**
1009
+ * \brief Get scramble constants for 32-bit scrambled Sobol' .
1010
+ *
1011
+ * Get a pointer to an array of scramble constants that can be used
1012
+ * for quasirandom number generation. The resulting pointer will
1013
+ * reference an array of unsinged ints in host memory.
1014
+ *
1015
+ * The array contains constants for many dimensions. Each dimension
1016
+ * has a single unsigned int constant.
1017
+ *
1018
+ * \param constants - Address of pointer in which to return scramble constants
1019
+ *
1020
+ * \return
1021
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1022
+ */
1023
+ curandStatus_t CURANDAPI
1024
+ curandGetScrambleConstants32(unsigned int * * constants);
1025
+
1026
+ /**
1027
+ * \brief Get direction vectors for 64-bit quasirandom number generation.
1028
+ *
1029
+ * Get a pointer to an array of direction vectors that can be used
1030
+ * for quasirandom number generation. The resulting pointer will
1031
+ * reference an array of direction vectors in host memory.
1032
+ *
1033
+ * The array contains vectors for many dimensions. Each dimension
1034
+ * has 64 vectors. Each individual vector is an unsigned long long.
1035
+ *
1036
+ * Legal values for \p set are:
1037
+ * - CURAND_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
1038
+ * - CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
1039
+ *
1040
+ * \param vectors - Address of pointer in which to return direction vectors
1041
+ * \param set - Which set of direction vectors to use
1042
+ *
1043
+ * \return
1044
+ * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
1045
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1046
+ */
1047
+ curandStatus_t CURANDAPI
1048
+ curandGetDirectionVectors64(curandDirectionVectors64_t *vectors[], curandDirectionVectorSet_t set);
1049
+
1050
+ /**
1051
+ * \brief Get scramble constants for 64-bit scrambled Sobol' .
1052
+ *
1053
+ * Get a pointer to an array of scramble constants that can be used
1054
+ * for quasirandom number generation. The resulting pointer will
1055
+ * reference an array of unsinged long longs in host memory.
1056
+ *
1057
+ * The array contains constants for many dimensions. Each dimension
1058
+ * has a single unsigned long long constant.
1059
+ *
1060
+ * \param constants - Address of pointer in which to return scramble constants
1061
+ *
1062
+ * \return
1063
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1064
+ */
1065
+ curandStatus_t CURANDAPI
1066
+ curandGetScrambleConstants64(unsigned long long * * constants);
1067
+
1068
+ /** @} */
1069
+
1070
+ #endif // __CUDACC_RTC__
1071
+
1072
+ #if defined(__cplusplus)
1073
+ }
1074
+ #endif /* __cplusplus */
1075
+
1076
+
1077
+ #endif /* !defined(CURAND_H_) */
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #if !defined(CURANDDISCRETE_H_)
50
+ #define CURANDDISCRETE_H_
51
+
52
+ struct curandDistributionShift_st {
53
+ curandDistribution_t probability;
54
+ curandDistribution_t host_probability;
55
+ unsigned int shift;
56
+ unsigned int length;
57
+ unsigned int host_gen;
58
+ };
59
+
60
+ struct curandHistogramM2_st {
61
+ curandHistogramM2V_t V;
62
+ curandHistogramM2V_t host_V;
63
+ curandHistogramM2K_t K;
64
+ curandHistogramM2K_t host_K;
65
+ unsigned int host_gen;
66
+ };
67
+
68
+
69
+ struct curandDistributionM2Shift_st {
70
+ curandHistogramM2_t histogram;
71
+ curandHistogramM2_t host_histogram;
72
+ unsigned int shift;
73
+ unsigned int length;
74
+ unsigned int host_gen;
75
+ };
76
+
77
+ struct curandDiscreteDistribution_st {
78
+ curandDiscreteDistribution_t self_host_ptr;
79
+ curandDistributionM2Shift_t M2;
80
+ curandDistributionM2Shift_t host_M2;
81
+ double stddev;
82
+ double mean;
83
+ curandMethod_t method;
84
+ unsigned int host_gen;
85
+ };
86
+
87
+ #endif // !defined(CURANDDISCRETE_H_)
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_DISCRETE_H_)
52
+ #define CURAND_DISCRETE_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+
69
+ template <typename T>
70
+ QUALIFIERS unsigned int _curand_discrete(T x, curandDiscreteDistribution_t discrete_distribution){
71
+ if (discrete_distribution->method == CURAND_M2){
72
+ return _curand_M2_double(x, discrete_distribution->M2);
73
+ }
74
+ return (unsigned int)((discrete_distribution->stddev * _curand_normal_icdf_double(x)) + discrete_distribution->mean + 0.5);
75
+ }
76
+
77
+
78
+ template <typename STATE>
79
+ QUALIFIERS unsigned int curand__discrete(STATE state, curandDiscreteDistribution_t discrete_distribution){
80
+ if (discrete_distribution->method == CURAND_M2){
81
+ return curand_M2_double(state, discrete_distribution->M2);
82
+ }
83
+ return (unsigned int)((discrete_distribution->stddev * curand_normal_double(state)) + discrete_distribution->mean + 0.5); //Round to nearest
84
+ }
85
+
86
+ template <typename STATE>
87
+ QUALIFIERS uint4 curand__discrete4(STATE state, curandDiscreteDistribution_t discrete_distribution){
88
+ if (discrete_distribution->method == CURAND_M2){
89
+ return curand_M2_double4(state, discrete_distribution->M2);
90
+ }
91
+ double4 _res;
92
+ uint4 result;
93
+ _res = curand_normal4_double(state);
94
+ result.x = (unsigned int)((discrete_distribution->stddev * _res.x) + discrete_distribution->mean + 0.5); //Round to nearest
95
+ result.y = (unsigned int)((discrete_distribution->stddev * _res.y) + discrete_distribution->mean + 0.5); //Round to nearest
96
+ result.z = (unsigned int)((discrete_distribution->stddev * _res.z) + discrete_distribution->mean + 0.5); //Round to nearest
97
+ result.w = (unsigned int)((discrete_distribution->stddev * _res.w) + discrete_distribution->mean + 0.5); //Round to nearest
98
+ return result;
99
+ }
100
+
101
+ /*
102
+ * \brief Return a discrete distributed unsigned int from a XORWOW generator.
103
+ *
104
+ * Return a single discrete distributed unsigned int derived from a
105
+ * distribution defined by \p discrete_distribution from the XORWOW generator in \p state,
106
+ * increment position of generator by one.
107
+ *
108
+ * \param state - Pointer to state to update
109
+ * \param discrete_distribution - ancillary structure for discrete distribution
110
+ *
111
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
112
+ */
113
+ QUALIFIERS unsigned int curand_discrete(curandStateXORWOW_t *state, curandDiscreteDistribution_t discrete_distribution)
114
+ {
115
+ return curand__discrete(state, discrete_distribution);
116
+ }
117
+
118
+ /*
119
+ * \brief Return a discrete distributed unsigned int from a Philox4_32_10 generator.
120
+ *
121
+ * Return a single discrete distributed unsigned int derived from a
122
+ * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
123
+ * increment position of generator by one.
124
+ *
125
+ * \param state - Pointer to state to update
126
+ * \param discrete_distribution - ancillary structure for discrete distribution
127
+ *
128
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
129
+ */
130
+ QUALIFIERS unsigned int curand_discrete(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
131
+ {
132
+ return curand__discrete(state, discrete_distribution);
133
+ }
134
+
135
+ /*
136
+ * \brief Return four discrete distributed unsigned ints from a Philox4_32_10 generator.
137
+ *
138
+ * Return four single discrete distributed unsigned ints derived from a
139
+ * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
140
+ * increment position of generator by one.
141
+ *
142
+ * \param state - Pointer to state to update
143
+ * \param discrete_distribution - ancillary structure for discrete distribution
144
+ *
145
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
146
+ */
147
+ QUALIFIERS uint4 curand_discrete4(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
148
+ {
149
+ return curand__discrete4(state, discrete_distribution);
150
+ }
151
+ /*
152
+ * \brief Return a discrete distributed unsigned int from a MRG32k3a generator.
153
+ *
154
+ * Re turn a single discrete distributed unsigned int derived from a
155
+ * distribution defined by \p discrete_distribution from the MRG32k3a generator in \p state,
156
+ * increment position of generator by one.
157
+ *
158
+ * \param state - Pointer to state to update
159
+ * \param discrete_distribution - ancillary structure for discrete distribution
160
+ *
161
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
162
+ */
163
+ QUALIFIERS unsigned int curand_discrete(curandStateMRG32k3a_t *state, curandDiscreteDistribution_t discrete_distribution)
164
+ {
165
+ return curand__discrete(state, discrete_distribution);
166
+ }
167
+
168
+ /*
169
+ * \brief Return a discrete distributed unsigned int from a MTGP32 generator.
170
+ *
171
+ * Return a single discrete distributed unsigned int derived from a
172
+ * distribution defined by \p discrete_distribution from the MTGP32 generator in \p state,
173
+ * increment position of generator by one.
174
+ *
175
+ * \param state - Pointer to state to update
176
+ * \param discrete_distribution - ancillary structure for discrete distribution
177
+ *
178
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
179
+ */
180
+ QUALIFIERS unsigned int curand_discrete(curandStateMtgp32_t *state, curandDiscreteDistribution_t discrete_distribution)
181
+ {
182
+ return curand__discrete(state, discrete_distribution);
183
+ }
184
+
185
+ /*
186
+ * \brief Return a discrete distributed unsigned int from a Sobol32 generator.
187
+ *
188
+ * Return a single discrete distributed unsigned int derived from a
189
+ * distribution defined by \p discrete_distribution from the Sobol32 generator in \p state,
190
+ * increment position of generator by one.
191
+ *
192
+ * \param state - Pointer to state to update
193
+ * \param discrete_distribution - ancillary structure for discrete distribution
194
+ *
195
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
196
+ */
197
+ QUALIFIERS unsigned int curand_discrete(curandStateSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
198
+ {
199
+ return curand__discrete(state, discrete_distribution);
200
+ }
201
+
202
+ /*
203
+ * \brief Return a discrete distributed unsigned int from a scrambled Sobol32 generator.
204
+ *
205
+ * Return a single discrete distributed unsigned int derived from a
206
+ * distribution defined by \p discrete_distribution from the scrambled Sobol32 generator in \p state,
207
+ * increment position of generator by one.
208
+ *
209
+ * \param state - Pointer to state to update
210
+ * \param discrete_distribution - ancillary structure for discrete distribution
211
+ *
212
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
213
+ */
214
+ QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
215
+ {
216
+ return curand__discrete(state, discrete_distribution);
217
+ }
218
+
219
+ /*
220
+ * \brief Return a discrete distributed unsigned int from a Sobol64 generator.
221
+ *
222
+ * Return a single discrete distributed unsigned int derived from a
223
+ * distribution defined by \p discrete_distribution from the Sobol64 generator in \p state,
224
+ * increment position of generator by one.
225
+ *
226
+ * \param state - Pointer to state to update
227
+ * \param discrete_distribution - ancillary structure for discrete distribution
228
+ *
229
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
230
+ */
231
+ QUALIFIERS unsigned int curand_discrete(curandStateSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
232
+ {
233
+ return curand__discrete(state, discrete_distribution);
234
+ }
235
+
236
+ /*
237
+ * \brief Return a discrete distributed unsigned int from a scrambled Sobol64 generator.
238
+ *
239
+ * Return a single discrete distributed unsigned int derived from a
240
+ * distribution defined by \p discrete_distribution from the scrambled Sobol64 generator in \p state,
241
+ * increment position of generator by one.
242
+ *
243
+ * \param state - Pointer to state to update
244
+ * \param discrete_distribution - ancillary structure for discrete distribution
245
+ *
246
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
247
+ */
248
+ QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
249
+ {
250
+ return curand__discrete(state, discrete_distribution);
251
+ }
252
+
253
+ #endif // !defined(CURAND_DISCRETE_H_)
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+ #ifndef CURAND_GLOBALS_H
49
+ #define CURAND_GLOBALS_H
50
+
51
+ #define MAX_XOR_N (5)
52
+ #define SKIPAHEAD_BLOCKSIZE (4)
53
+ #define SKIPAHEAD_MASK ((1<<SKIPAHEAD_BLOCKSIZE)-1)
54
+ #define CURAND_2POW32 (4294967296.f)
55
+ #define CURAND_2POW32_DOUBLE (4294967296.)
56
+ #define CURAND_2POW32_INV (2.3283064e-10f)
57
+ #define CURAND_2POW32_INV_DOUBLE (2.3283064365386963e-10)
58
+ #define CURAND_2POW53_INV_DOUBLE (1.1102230246251565e-16)
59
+ #define CURAND_2POW32_INV_2PI (2.3283064e-10f * 6.2831855f)
60
+ #define CURAND_2PI (6.2831855f)
61
+ #define CURAND_2POW53_INV_2PI_DOUBLE (1.1102230246251565e-16 * 6.2831853071795860)
62
+ #define CURAND_PI_DOUBLE (3.1415926535897932)
63
+ #define CURAND_2PI_DOUBLE (6.2831853071795860)
64
+ #define CURAND_SQRT2 (-1.4142135f)
65
+ #define CURAND_SQRT2_DOUBLE (-1.4142135623730951)
66
+
67
+ #define SOBOL64_ITR_BINARY_DIVIDE 2
68
+ #define SOBOL_M2_BINARY_DIVIDE 10
69
+ #define MTGP32_M2_BINARY_DIVIDE 32
70
+ #define MAX_LAMBDA 400000
71
+ #define MIN_GAUSS_LAMBDA 2000
72
+
73
+ struct normal_args_st {
74
+ float mean;
75
+ float stddev;
76
+ };
77
+
78
+ typedef struct normal_args_st normal_args_t;
79
+
80
+ struct normal_args_double_st {
81
+ double mean;
82
+ double stddev;
83
+ };
84
+
85
+ typedef struct normal_args_double_st normal_args_double_t;
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+ #endif
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h ADDED
@@ -0,0 +1,1665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_KERNEL_H_)
52
+ #define CURAND_KERNEL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #if !defined(QUALIFIERS)
61
+ #define QUALIFIERS static __forceinline__ __device__
62
+ #endif
63
+
64
+
65
+ #ifdef __CUDACC_RTC__
66
+ #define CURAND_DETAIL_USE_CUDA_STL
67
+ #endif
68
+
69
+ #if __cplusplus >= 201103L
70
+ # ifdef CURAND_DETAIL_USE_CUDA_STL
71
+ # define CURAND_STD cuda::std
72
+ # include <cuda/std/type_traits>
73
+ # else
74
+ # define CURAND_STD std
75
+ # include <type_traits>
76
+ # endif // CURAND_DETAIL_USE_CUDA_STL
77
+ #else
78
+ // To support C++03 compilation
79
+ # define CURAND_STD curand_detail
80
+ namespace curand_detail {
81
+ template<bool B, class T = void>
82
+ struct enable_if {};
83
+
84
+ template<class T>
85
+ struct enable_if<true, T> { typedef T type; };
86
+
87
+ template<class T, class U>
88
+ struct is_same { static const bool value = false; };
89
+
90
+ template<class T>
91
+ struct is_same<T, T> { static const bool value = true; };
92
+ } // namespace curand_detail
93
+ #endif // __cplusplus >= 201103L
94
+
95
+ #ifndef __CUDACC_RTC__
96
+ #include <math.h>
97
+ #endif // __CUDACC_RTC__
98
+
99
+ #include "curand.h"
100
+ #include "curand_discrete.h"
101
+ #include "curand_precalc.h"
102
+ #include "curand_mrg32k3a.h"
103
+ #include "curand_mtgp32_kernel.h"
104
+ #include "curand_philox4x32_x.h"
105
+ #include "curand_globals.h"
106
+
107
+ /* Test RNG */
108
+ /* This generator uses the formula:
109
+ x_n = x_(n-1) + 1 mod 2^32
110
+ x_0 = (unsigned int)seed * 3
111
+ Subsequences are spaced 31337 steps apart.
112
+ */
113
+ struct curandStateTest {
114
+ unsigned int v;
115
+ };
116
+
117
+ /** \cond UNHIDE_TYPEDEFS */
118
+ typedef struct curandStateTest curandStateTest_t;
119
+ /** \endcond */
120
+
121
+ /* XORSHIFT FAMILY RNGs */
122
+ /* These generators are a family proposed by Marsaglia. They keep state
123
+ in 32 bit chunks, then use repeated shift and xor operations to scramble
124
+ the bits. The following generators are a combination of a simple Weyl
125
+ generator with an N variable XORSHIFT generator.
126
+ */
127
+
128
+ /* XORSHIFT RNG */
129
+ /* This generator uses the xorwow formula of
130
+ www.jstatsoft.org/v08/i14/paper page 5
131
+ Has period 2^192 - 2^32.
132
+ */
133
+ /**
134
+ * CURAND XORWOW state
135
+ */
136
+ struct curandStateXORWOW;
137
+
138
+ /*
139
+ * Implementation details not in reference documentation */
140
+ struct curandStateXORWOW {
141
+ unsigned int d, v[5];
142
+ int boxmuller_flag;
143
+ int boxmuller_flag_double;
144
+ float boxmuller_extra;
145
+ double boxmuller_extra_double;
146
+ };
147
+
148
+ /*
149
+ * CURAND XORWOW state
150
+ */
151
+ /** \cond UNHIDE_TYPEDEFS */
152
+ typedef struct curandStateXORWOW curandStateXORWOW_t;
153
+
154
+ #define EXTRA_FLAG_NORMAL 0x00000001
155
+ #define EXTRA_FLAG_LOG_NORMAL 0x00000002
156
+ /** \endcond */
157
+
158
+ /* Combined Multiple Recursive Generators */
159
+ /* These generators are a family proposed by L'Ecuyer. They keep state
160
+ in sets of doubles, then use repeated modular arithmetic multiply operations
161
+ to scramble the bits in each set, and combine the result.
162
+ */
163
+
164
+ /* MRG32k3a RNG */
165
+ /* This generator uses the MRG32k3A formula of
166
+ http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf
167
+ Has period 2^191.
168
+ */
169
+
170
+ /* moduli for the recursions */
171
+ /** \cond UNHIDE_DEFINES */
172
+ #define MRG32K3A_MOD1 4294967087.
173
+ #define MRG32K3A_MOD2 4294944443.
174
+
175
+ /* Constants used in generation */
176
+
177
+ #define MRG32K3A_A12 1403580.
178
+ #define MRG32K3A_A13N 810728.
179
+ #define MRG32K3A_A21 527612.
180
+ #define MRG32K3A_A23N 1370589.
181
+ #define MRG32K3A_NORM (2.3283065498378288e-10)
182
+ //
183
+ // #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1))
184
+ // above constant, used verbatim, rounds differently on some host systems.
185
+ #define MRG32K3A_BITS_NORM 1.000000048662
186
+
187
+ /** \endcond */
188
+
189
+
190
+
191
+
192
+ /**
193
+ * CURAND MRG32K3A state
194
+ */
195
+ struct curandStateMRG32k3a;
196
+
197
+ /* Implementation details not in reference documentation */
198
+ struct curandStateMRG32k3a {
199
+ unsigned int s1[3];
200
+ unsigned int s2[3];
201
+ int boxmuller_flag;
202
+ int boxmuller_flag_double;
203
+ float boxmuller_extra;
204
+ double boxmuller_extra_double;
205
+ };
206
+
207
+ /*
208
+ * CURAND MRG32K3A state
209
+ */
210
+ /** \cond UNHIDE_TYPEDEFS */
211
+ typedef struct curandStateMRG32k3a curandStateMRG32k3a_t;
212
+ /** \endcond */
213
+
214
+ /* SOBOL QRNG */
215
+ /**
216
+ * CURAND Sobol32 state
217
+ */
218
+ struct curandStateSobol32;
219
+
220
+ /* Implementation details not in reference documentation */
221
+ struct curandStateSobol32 {
222
+ unsigned int i, x, c;
223
+ unsigned int direction_vectors[32];
224
+ };
225
+
226
+ /*
227
+ * CURAND Sobol32 state
228
+ */
229
+ /** \cond UNHIDE_TYPEDEFS */
230
+ typedef struct curandStateSobol32 curandStateSobol32_t;
231
+ /** \endcond */
232
+
233
+ /**
234
+ * CURAND Scrambled Sobol32 state
235
+ */
236
+ struct curandStateScrambledSobol32;
237
+
238
+ /* Implementation details not in reference documentation */
239
+ struct curandStateScrambledSobol32 {
240
+ unsigned int i, x, c;
241
+ unsigned int direction_vectors[32];
242
+ };
243
+
244
+ /*
245
+ * CURAND Scrambled Sobol32 state
246
+ */
247
+ /** \cond UNHIDE_TYPEDEFS */
248
+ typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t;
249
+ /** \endcond */
250
+
251
+ /**
252
+ * CURAND Sobol64 state
253
+ */
254
+ struct curandStateSobol64;
255
+
256
+ /* Implementation details not in reference documentation */
257
+ struct curandStateSobol64 {
258
+ unsigned long long i, x, c;
259
+ unsigned long long direction_vectors[64];
260
+ };
261
+
262
+ /*
263
+ * CURAND Sobol64 state
264
+ */
265
+ /** \cond UNHIDE_TYPEDEFS */
266
+ typedef struct curandStateSobol64 curandStateSobol64_t;
267
+ /** \endcond */
268
+
269
+ /**
270
+ * CURAND Scrambled Sobol64 state
271
+ */
272
+ struct curandStateScrambledSobol64;
273
+
274
+ /* Implementation details not in reference documentation */
275
+ struct curandStateScrambledSobol64 {
276
+ unsigned long long i, x, c;
277
+ unsigned long long direction_vectors[64];
278
+ };
279
+
280
+ /*
281
+ * CURAND Scrambled Sobol64 state
282
+ */
283
+ /** \cond UNHIDE_TYPEDEFS */
284
+ typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t;
285
+ /** \endcond */
286
+
287
+ /*
288
+ * Default RNG
289
+ */
290
+ /** \cond UNHIDE_TYPEDEFS */
291
+ typedef struct curandStateXORWOW curandState_t;
292
+ typedef struct curandStateXORWOW curandState;
293
+ /** \endcond */
294
+
295
+ /****************************************************************************/
296
+ /* Utility functions needed by RNGs */
297
+ /****************************************************************************/
298
+ /** \cond UNHIDE_UTILITIES */
299
+ /*
300
+ multiply vector by matrix, store in result
301
+ matrix is n x n, measured in 32 bit units
302
+ matrix is stored in row major order
303
+ vector and result cannot be same pointer
304
+ */
305
+ template<int N>
306
+ QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix)
307
+ {
308
+ unsigned int result[N] = { 0 };
309
+ for(int i = 0; i < N; i++) {
310
+ #ifdef __CUDA_ARCH__
311
+ #pragma unroll 16
312
+ #endif
313
+ for(int j = 0; j < 32; j++) {
314
+ if(vector[i] & (1 << j)) {
315
+ for(int k = 0; k < N; k++) {
316
+ result[k] ^= matrix[N * (i * 32 + j) + k];
317
+ }
318
+ }
319
+ }
320
+ }
321
+ for(int i = 0; i < N; i++) {
322
+ vector[i] = result[i];
323
+ }
324
+ }
325
+
326
+ QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix,
327
+ unsigned int *result, int n)
328
+ {
329
+ for(int i = 0; i < n; i++) {
330
+ result[i] = 0;
331
+ }
332
+ for(int i = 0; i < n; i++) {
333
+ for(int j = 0; j < 32; j++) {
334
+ if(vector[i] & (1 << j)) {
335
+ for(int k = 0; k < n; k++) {
336
+ result[k] ^= matrix[n * (i * 32 + j) + k];
337
+ }
338
+ }
339
+ }
340
+ }
341
+ }
342
+
343
+ /* generate identity matrix */
344
+ QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n)
345
+ {
346
+ int r;
347
+ for(int i = 0; i < n * 32; i++) {
348
+ for(int j = 0; j < n; j++) {
349
+ r = i & 31;
350
+ if(i / 32 == j) {
351
+ matrix[i * n + j] = (1 << r);
352
+ } else {
353
+ matrix[i * n + j] = 0;
354
+ }
355
+ }
356
+ }
357
+ }
358
+
359
+ /* multiply matrixA by matrixB, store back in matrixA
360
+ matrixA and matrixB must not be same matrix */
361
+ QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n)
362
+ {
363
+ unsigned int result[MAX_XOR_N];
364
+ for(int i = 0; i < n * 32; i++) {
365
+ __curand_matvec(matrixA + i * n, matrixB, result, n);
366
+ for(int j = 0; j < n; j++) {
367
+ matrixA[i * n + j] = result[j];
368
+ }
369
+ }
370
+ }
371
+
372
+ /* copy vectorA to vector */
373
+ QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n)
374
+ {
375
+ for(int i = 0; i < n; i++) {
376
+ vector[i] = vectorA[i];
377
+ }
378
+ }
379
+
380
+ /* copy matrixA to matrix */
381
+ QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n)
382
+ {
383
+ for(int i = 0; i < n * n * 32; i++) {
384
+ matrix[i] = matrixA[i];
385
+ }
386
+ }
387
+
388
+ /* compute matrixA to power p, store result in matrix */
389
+ QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA,
390
+ unsigned long long p, int n)
391
+ {
392
+ unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32];
393
+ unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32];
394
+ __curand_matidentity(matrix, n);
395
+ __curand_matcopy(matrixR, matrixA, n);
396
+ while(p) {
397
+ if(p & 1) {
398
+ __curand_matmat(matrix, matrixR, n);
399
+ }
400
+ __curand_matcopy(matrixS, matrixR, n);
401
+ __curand_matmat(matrixR, matrixS, n);
402
+ p >>= 1;
403
+ }
404
+ }
405
+
406
+ /****************************************************************************/
407
+ /* Utility functions needed by MRG32k3a RNG */
408
+ /* Matrix operations modulo some integer less than 2**32, done in */
409
+ /* double precision floating point, with care not to overflow 53 bits */
410
+ /****************************************************************************/
411
+
412
+ /* return i mod m. */
413
+ /* assumes i and m are integers represented accurately in doubles */
414
+
415
+ QUALIFIERS double curand_MRGmod(double i, double m)
416
+ {
417
+ double quo;
418
+ double rem;
419
+ quo = floor(i/m);
420
+ rem = i - (quo*m);
421
+ if (rem < 0.0) rem += m;
422
+ return rem;
423
+ }
424
+
425
+ /* Multiplication modulo m. Inputs i and j less than 2**32 */
426
+ /* Ensure intermediate results do not exceed 2**53 */
427
+
428
+ QUALIFIERS double curand_MRGmodMul(double i, double j, double m)
429
+ {
430
+ double tempHi;
431
+ double tempLo;
432
+
433
+ tempHi = floor(i/131072.0);
434
+ tempLo = i - (tempHi*131072.0);
435
+ tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m);
436
+
437
+ if (tempLo < 0.0) tempLo += m;
438
+ return tempLo;
439
+ }
440
+
441
+ /* multiply 3 by 3 matrices of doubles, modulo m */
442
+
443
+ QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m)
444
+ {
445
+ int i,j;
446
+ double temp[3][3];
447
+ for (i=0; i<3; i++){
448
+ for (j=0; j<3; j++){
449
+ temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) +
450
+ curand_MRGmodMul(i1[i][1], i2[1][j], m) +
451
+ curand_MRGmodMul(i1[i][2], i2[2][j], m));
452
+ temp[i][j] = curand_MRGmod( temp[i][j], m );
453
+ }
454
+ }
455
+ for (i=0; i<3; i++){
456
+ for (j=0; j<3; j++){
457
+ o[i][j] = (unsigned int)temp[i][j];
458
+ }
459
+ }
460
+ }
461
+
462
+ /* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */
463
+
464
+ QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m)
465
+ {
466
+ int k;
467
+ double t[3];
468
+ for (k = 0; k < 3; k++) {
469
+ t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) +
470
+ curand_MRGmodMul(i[k][1], v[1], m) +
471
+ curand_MRGmodMul(i[k][2], v[2], m) );
472
+ t[k] = curand_MRGmod( t[k], m );
473
+ }
474
+ for (k = 0; k < 3; k++) {
475
+ v[k] = (unsigned int)t[k];
476
+ }
477
+
478
+ }
479
+
480
+ /* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */
481
+ /* input is index zero of an array of 3 by 3 matrices m, */
482
+ /* each m = m[0]**(2**index) */
483
+
484
+ QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow )
485
+ {
486
+ int i,j;
487
+ for ( i = 0; i < 3; i++ ) {
488
+ for ( j = 0; j < 3; j++ ) {
489
+ o[i][j] = 0;
490
+ if ( i == j ) o[i][j] = 1;
491
+ }
492
+ }
493
+ i = 0;
494
+ curand_MRGmatVecMul3x3(o,o[0],m);
495
+ while (pow) {
496
+ if ( pow & 1ll ) {
497
+ curand_MRGmatMul3x3(in[i], o, o, m);
498
+ }
499
+ i++;
500
+ pow >>= 1;
501
+ }
502
+ }
503
+
504
+ /* raise a 3 by 3 matrix of doubles to the power */
505
+ /* 2 to the power (pow modulo 191), modulo m */
506
+
507
+ QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow )
508
+ {
509
+ unsigned int temp[3][3];
510
+ int i,j;
511
+ pow = pow % 191;
512
+ for ( i = 0; i < 3; i++ ) {
513
+ for ( j = 0; j < 3; j++ ) {
514
+ temp[i][j] = (unsigned int)in[i][j];
515
+ }
516
+ }
517
+ while (pow) {
518
+ curand_MRGmatMul3x3(temp, temp, temp, m);
519
+ pow--;
520
+ }
521
+ for ( i = 0; i < 3; i++ ) {
522
+ for ( j = 0; j < 3; j++ ) {
523
+ o[i][j] = temp[i][j];
524
+ }
525
+ }
526
+ }
527
+
528
+ /** \endcond */
529
+
530
+ /****************************************************************************/
531
+ /* Kernel implementations of RNGs */
532
+ /****************************************************************************/
533
+
534
+ /* Test RNG */
535
+
536
+ QUALIFIERS void curand_init(unsigned long long seed,
537
+ unsigned long long subsequence,
538
+ unsigned long long offset,
539
+ curandStateTest_t *state)
540
+ {
541
+ state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \
542
+ (unsigned int)offset;
543
+ }
544
+
545
+
546
+ QUALIFIERS unsigned int curand(curandStateTest_t *state)
547
+ {
548
+ unsigned int r = state->v++;
549
+ return r;
550
+ }
551
+
552
+ QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state)
553
+ {
554
+ state->v += (unsigned int)n;
555
+ }
556
+
557
+ /* XORWOW RNG */
558
+
559
+ template <typename T, int n>
560
+ QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[])
561
+ {
562
+ T state;
563
+ // Generate matrix that advances one step
564
+ // matrix has n * n * 32 32-bit elements
565
+ // solve for matrix by stepping single bit states
566
+ for(int i = 0; i < 32 * n; i++) {
567
+ state.d = 0;
568
+ for(int j = 0; j < n; j++) {
569
+ state.v[j] = 0;
570
+ }
571
+ state.v[i / 32] = (1 << (i & 31));
572
+ curand(&state);
573
+ for(int j = 0; j < n; j++) {
574
+ matrix[i * n + j] = state.v[j];
575
+ }
576
+ }
577
+ }
578
+
579
+ template <typename T, int n>
580
+ QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch)
581
+ {
582
+ // unsigned int matrix[n * n * 32];
583
+ unsigned int *matrix = scratch;
584
+ // unsigned int matrixA[n * n * 32];
585
+ unsigned int *matrixA = scratch + (n * n * 32);
586
+ // unsigned int vector[n];
587
+ unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
588
+ // unsigned int result[n];
589
+ unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
590
+ unsigned long long p = x;
591
+ for(int i = 0; i < n; i++) {
592
+ vector[i] = state->v[i];
593
+ }
594
+ int matrix_num = 0;
595
+ while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) {
596
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
597
+ #ifdef __CUDA_ARCH__
598
+ __curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n);
599
+ #else
600
+ __curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n);
601
+ #endif
602
+ __curand_veccopy(vector, result, n);
603
+ }
604
+ p >>= PRECALC_BLOCK_SIZE;
605
+ matrix_num++;
606
+ }
607
+ if(p) {
608
+ #ifdef __CUDA_ARCH__
609
+ __curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
610
+ __curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
611
+ #else
612
+ __curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
613
+ __curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
614
+ #endif
615
+ }
616
+ while(p) {
617
+ for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
618
+ __curand_matvec(vector, matrixA, result, n);
619
+ __curand_veccopy(vector, result, n);
620
+ }
621
+ p >>= SKIPAHEAD_BLOCKSIZE;
622
+ if(p) {
623
+ for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
624
+ __curand_matmat(matrix, matrixA, n);
625
+ __curand_matcopy(matrixA, matrix, n);
626
+ }
627
+ }
628
+ }
629
+ for(int i = 0; i < n; i++) {
630
+ state->v[i] = vector[i];
631
+ }
632
+ state->d += 362437 * (unsigned int)x;
633
+ }
634
+
635
+ template <typename T, int n>
636
+ QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch)
637
+ {
638
+ // unsigned int matrix[n * n * 32];
639
+ unsigned int *matrix = scratch;
640
+ // unsigned int matrixA[n * n * 32];
641
+ unsigned int *matrixA = scratch + (n * n * 32);
642
+ // unsigned int vector[n];
643
+ unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
644
+ // unsigned int result[n];
645
+ unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
646
+ unsigned long long p = x;
647
+ for(int i = 0; i < n; i++) {
648
+ vector[i] = state->v[i];
649
+ }
650
+ int matrix_num = 0;
651
+ while(p && matrix_num < PRECALC_NUM_MATRICES - 1) {
652
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
653
+ #ifdef __CUDA_ARCH__
654
+ __curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n);
655
+ #else
656
+ __curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n);
657
+ #endif
658
+ __curand_veccopy(vector, result, n);
659
+ }
660
+ p >>= PRECALC_BLOCK_SIZE;
661
+ matrix_num++;
662
+ }
663
+ if(p) {
664
+ #ifdef __CUDA_ARCH__
665
+ __curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
666
+ __curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
667
+ #else
668
+ __curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
669
+ __curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
670
+ #endif
671
+ }
672
+ while(p) {
673
+ for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
674
+ __curand_matvec(vector, matrixA, result, n);
675
+ __curand_veccopy(vector, result, n);
676
+ }
677
+ p >>= SKIPAHEAD_BLOCKSIZE;
678
+ if(p) {
679
+ for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
680
+ __curand_matmat(matrix, matrixA, n);
681
+ __curand_matcopy(matrixA, matrix, n);
682
+ }
683
+ }
684
+ }
685
+ for(int i = 0; i < n; i++) {
686
+ state->v[i] = vector[i];
687
+ }
688
+ /* No update of state->d needed, guaranteed to be a multiple of 2^32 */
689
+ }
690
+
691
+ template <typename T, int N>
692
+ QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state)
693
+ {
694
+ unsigned long long p = x;
695
+ int matrix_num = 0;
696
+ while(p) {
697
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
698
+ #ifdef __CUDA_ARCH__
699
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix[matrix_num]);
700
+ #else
701
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix_host[matrix_num]);
702
+ #endif
703
+ }
704
+ p >>= PRECALC_BLOCK_SIZE;
705
+ matrix_num++;
706
+ }
707
+ state->d += 362437 * (unsigned int)x;
708
+ }
709
+
710
+ template <typename T, int N>
711
+ QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state)
712
+ {
713
+ int matrix_num = 0;
714
+ while(x) {
715
+ for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) {
716
+ #ifdef __CUDA_ARCH__
717
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix[matrix_num]);
718
+ #else
719
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix_host[matrix_num]);
720
+ #endif
721
+ }
722
+ x >>= PRECALC_BLOCK_SIZE;
723
+ matrix_num++;
724
+ }
725
+ /* No update of state->d needed, guaranteed to be a multiple of 2^32 */
726
+ }
727
+
728
+ /**
729
+ * \brief Update XORWOW state to skip \p n elements.
730
+ *
731
+ * Update the XORWOW state in \p state to skip ahead \p n elements.
732
+ *
733
+ * All values of \p n are valid. Large values require more computation and so
734
+ * will take more time to complete.
735
+ *
736
+ * \param n - Number of elements to skip
737
+ * \param state - Pointer to state to update
738
+ */
739
+ QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state)
740
+ {
741
+ _skipahead_inplace<curandStateXORWOW_t, 5>(n, state);
742
+ }
743
+
744
+ /**
745
+ * \brief Update XORWOW state to skip ahead \p n subsequences.
746
+ *
747
+ * Update the XORWOW state in \p state to skip ahead \p n subsequences. Each
748
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
749
+ * \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
750
+ *
751
+ * All values of \p n are valid. Large values require more computation and so
752
+ * will take more time to complete.
753
+ *
754
+ * \param n - Number of subsequences to skip
755
+ * \param state - Pointer to state to update
756
+ */
757
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state)
758
+ {
759
+ _skipahead_sequence_inplace<curandStateXORWOW_t, 5>(n, state);
760
+ }
761
+
762
+ QUALIFIERS void _curand_init_scratch(unsigned long long seed,
763
+ unsigned long long subsequence,
764
+ unsigned long long offset,
765
+ curandStateXORWOW_t *state,
766
+ unsigned int *scratch)
767
+ {
768
+ // Break up seed, apply salt
769
+ // Constants are arbitrary nonzero values
770
+ unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
771
+ unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
772
+ // Simple multiplication to mix up bits
773
+ // Constants are arbitrary odd values
774
+ unsigned int t0 = 1099087573UL * s0;
775
+ unsigned int t1 = 2591861531UL * s1;
776
+ state->d = 6615241 + t1 + t0;
777
+ state->v[0] = 123456789UL + t0;
778
+ state->v[1] = 362436069UL ^ t0;
779
+ state->v[2] = 521288629UL + t1;
780
+ state->v[3] = 88675123UL ^ t1;
781
+ state->v[4] = 5783321UL + t0;
782
+ _skipahead_sequence_scratch<curandStateXORWOW_t, 5>(subsequence, state, scratch);
783
+ _skipahead_scratch<curandStateXORWOW_t, 5>(offset, state, scratch);
784
+ state->boxmuller_flag = 0;
785
+ state->boxmuller_flag_double = 0;
786
+ state->boxmuller_extra = 0.f;
787
+ state->boxmuller_extra_double = 0.;
788
+ }
789
+
790
+ QUALIFIERS void _curand_init_inplace(unsigned long long seed,
791
+ unsigned long long subsequence,
792
+ unsigned long long offset,
793
+ curandStateXORWOW_t *state)
794
+ {
795
+ // Break up seed, apply salt
796
+ // Constants are arbitrary nonzero values
797
+ unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
798
+ unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
799
+ // Simple multiplication to mix up bits
800
+ // Constants are arbitrary odd values
801
+ unsigned int t0 = 1099087573UL * s0;
802
+ unsigned int t1 = 2591861531UL * s1;
803
+ state->d = 6615241 + t1 + t0;
804
+ state->v[0] = 123456789UL + t0;
805
+ state->v[1] = 362436069UL ^ t0;
806
+ state->v[2] = 521288629UL + t1;
807
+ state->v[3] = 88675123UL ^ t1;
808
+ state->v[4] = 5783321UL + t0;
809
+ _skipahead_sequence_inplace<curandStateXORWOW_t, 5>(subsequence, state);
810
+ _skipahead_inplace<curandStateXORWOW_t, 5>(offset, state);
811
+ state->boxmuller_flag = 0;
812
+ state->boxmuller_flag_double = 0;
813
+ state->boxmuller_extra = 0.f;
814
+ state->boxmuller_extra_double = 0.;
815
+ }
816
+
817
+ /**
818
+ * \brief Initialize XORWOW state.
819
+ *
820
+ * Initialize XORWOW state in \p state with the given \p seed, \p subsequence,
821
+ * and \p offset.
822
+ *
823
+ * All input values of \p seed, \p subsequence, and \p offset are legal. Large
824
+ * values for \p subsequence and \p offset require more computation and so will
825
+ * take more time to complete.
826
+ *
827
+ * A value of 0 for \p seed sets the state to the values of the original
828
+ * published version of the \p xorwow algorithm.
829
+ *
830
+ * \param seed - Arbitrary bits to use as a seed
831
+ * \param subsequence - Subsequence to start at
832
+ * \param offset - Absolute offset into sequence
833
+ * \param state - Pointer to state to initialize
834
+ */
835
+ QUALIFIERS void curand_init(unsigned long long seed,
836
+ unsigned long long subsequence,
837
+ unsigned long long offset,
838
+ curandStateXORWOW_t *state)
839
+ {
840
+ _curand_init_inplace(seed, subsequence, offset, state);
841
+ }
842
+
843
+ /**
844
+ * \brief Return 32-bits of pseudorandomness from an XORWOW generator.
845
+ *
846
+ * Return 32-bits of pseudorandomness from the XORWOW generator in \p state,
847
+ * increment position of generator by one.
848
+ *
849
+ * \param state - Pointer to state to update
850
+ *
851
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
852
+ */
853
+ QUALIFIERS unsigned int curand(curandStateXORWOW_t *state)
854
+ {
855
+ unsigned int t;
856
+ t = (state->v[0] ^ (state->v[0] >> 2));
857
+ state->v[0] = state->v[1];
858
+ state->v[1] = state->v[2];
859
+ state->v[2] = state->v[3];
860
+ state->v[3] = state->v[4];
861
+ state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1));
862
+ state->d += 362437;
863
+ return state->v[4] + state->d;
864
+ }
865
+
866
+
867
+ /**
868
+ * \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator.
869
+ *
870
+ * Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state,
871
+ * increment position of generator by one.
872
+ *
873
+ * \param state - Pointer to state to update
874
+ *
875
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
876
+ */
877
+
878
+ QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
879
+ {
880
+ // Maintain the invariant: output[STATE] is always "good" and
881
+ // is the next value to be returned by curand.
882
+ unsigned int ret;
883
+ switch(state->STATE++){
884
+ default:
885
+ ret = state->output.x;
886
+ break;
887
+ case 1:
888
+ ret = state->output.y;
889
+ break;
890
+ case 2:
891
+ ret = state->output.z;
892
+ break;
893
+ case 3:
894
+ ret = state->output.w;
895
+ break;
896
+ }
897
+ if(state->STATE == 4){
898
+ Philox_State_Incr(state);
899
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
900
+ state->STATE = 0;
901
+ }
902
+ return ret;
903
+ }
904
+
905
+ /**
906
+ * \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator.
907
+ *
908
+ * Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state,
909
+ * increment position of generator by four.
910
+ *
911
+ * \param state - Pointer to state to update
912
+ *
913
+ * \return 128-bits of pseudorandomness as a uint4, all bits valid to use.
914
+ */
915
+
916
+ QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state)
917
+ {
918
+ uint4 r;
919
+
920
+ uint4 tmp = state->output;
921
+ Philox_State_Incr(state);
922
+ state->output= curand_Philox4x32_10(state->ctr,state->key);
923
+ switch(state->STATE){
924
+ case 0:
925
+ return tmp;
926
+ case 1:
927
+ r.x = tmp.y;
928
+ r.y = tmp.z;
929
+ r.z = tmp.w;
930
+ r.w = state->output.x;
931
+ break;
932
+ case 2:
933
+ r.x = tmp.z;
934
+ r.y = tmp.w;
935
+ r.z = state->output.x;
936
+ r.w = state->output.y;
937
+ break;
938
+ case 3:
939
+ r.x = tmp.w;
940
+ r.y = state->output.x;
941
+ r.z = state->output.y;
942
+ r.w = state->output.z;
943
+ break;
944
+ default:
945
+ // NOT possible but needed to avoid compiler warnings
946
+ return tmp;
947
+ }
948
+ return r;
949
+ }
950
+
951
+ /**
952
+ * \brief Update Philox4_32_10 state to skip \p n elements.
953
+ *
954
+ * Update the Philox4_32_10 state in \p state to skip ahead \p n elements.
955
+ *
956
+ * All values of \p n are valid.
957
+ *
958
+ * \param n - Number of elements to skip
959
+ * \param state - Pointer to state to update
960
+ */
961
+ QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state)
962
+ {
963
+ state->STATE += (n & 3);
964
+ n /= 4;
965
+ if( state->STATE > 3 ){
966
+ n += 1;
967
+ state->STATE -= 4;
968
+ }
969
+ Philox_State_Incr(state, n);
970
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
971
+ }
972
+
973
+ /**
974
+ * \brief Update Philox4_32_10 state to skip ahead \p n subsequences.
975
+ *
976
+ * Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each
977
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
978
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly * n elements.
979
+ *
980
+ * All values of \p n are valid.
981
+ *
982
+ * \param n - Number of subsequences to skip
983
+ * \param state - Pointer to state to update
984
+ */
985
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state)
986
+ {
987
+ Philox_State_Incr_hi(state, n);
988
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
989
+ }
990
+
991
+ /**
992
+ * \brief Initialize Philox4_32_10 state.
993
+ *
994
+ * Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence,
995
+ * and \p offset.
996
+ *
997
+ * All input values for \p seed, \p subseqence and \p offset are legal. Each of the
998
+ * \xmlonly<ph outputclass="xmlonly">2<sup>64</sup></ph>\endxmlonly possible
999
+ * values of seed selects an independent sequence of length
1000
+ * \xmlonly<ph outputclass="xmlonly">2<sup>130</sup></ph>\endxmlonly.
1001
+ * The first
1002
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup> * subsequence + offset</ph>\endxmlonly.
1003
+ * values of the sequence are skipped.
1004
+ * I.e., subsequences are of length
1005
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly.
1006
+ *
1007
+ * \param seed - Arbitrary bits to use as a seed
1008
+ * \param subsequence - Subsequence to start at
1009
+ * \param offset - Absolute offset into subsequence
1010
+ * \param state - Pointer to state to initialize
1011
+ */
1012
+ QUALIFIERS void curand_init(unsigned long long seed,
1013
+ unsigned long long subsequence,
1014
+ unsigned long long offset,
1015
+ curandStatePhilox4_32_10_t *state)
1016
+ {
1017
+ state->ctr = make_uint4(0, 0, 0, 0);
1018
+ state->key.x = (unsigned int)seed;
1019
+ state->key.y = (unsigned int)(seed>>32);
1020
+ state->STATE = 0;
1021
+ state->boxmuller_flag = 0;
1022
+ state->boxmuller_flag_double = 0;
1023
+ state->boxmuller_extra = 0.f;
1024
+ state->boxmuller_extra_double = 0.;
1025
+ skipahead_sequence(subsequence, state);
1026
+ skipahead(offset, state);
1027
+ }
1028
+
1029
+
1030
+ /* MRG32k3a RNG */
1031
+
1032
+ /* Base generator for MRG32k3a */
1033
+ #if __CUDA_ARCH__ > 600
1034
+ QUALIFIERS unsigned long long __curand_umad(unsigned int a, unsigned int b, unsigned long long c)
1035
+ {
1036
+ unsigned long long r;
1037
+ asm("mad.wide.u32 %0, %1, %2, %3;"
1038
+ : "=l"(r) : "r"(a), "r"(b), "l"(c));
1039
+ return r;
1040
+ }
1041
+ QUALIFIERS unsigned long long __curand_umul(unsigned int a, unsigned int b)
1042
+ {
1043
+ unsigned long long r;
1044
+ asm("mul.wide.u32 %0, %1, %2;"
1045
+ : "=l"(r) : "r"(a), "r"(b));
1046
+ return r;
1047
+ }
1048
+
1049
+ QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state)
1050
+ {
1051
+ const unsigned int m1 = 4294967087u;
1052
+ const unsigned int m2 = 4294944443u;
1053
+ const unsigned int m1c = 209u;
1054
+ const unsigned int m2c = 22853u;
1055
+ const unsigned int a12 = 1403580u;
1056
+ const unsigned int a13n = 810728u;
1057
+ const unsigned int a21 = 527612u;
1058
+ const unsigned int a23n = 1370589u;
1059
+
1060
+ unsigned long long p1, p2;
1061
+ const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]);
1062
+ p1 = __curand_umad(a12, state->s1[1], p3);
1063
+
1064
+ // Putting addition inside and changing umul to umad
1065
+ // slowed this function down on GV100
1066
+ p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff);
1067
+ if (p1 >= m1) p1 -= m1;
1068
+
1069
+ state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1;
1070
+ const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]);
1071
+ p2 = __curand_umad(a21, state->s2[2], p4);
1072
+
1073
+ // Putting addition inside and changing umul to umad
1074
+ // slowed this function down on GV100
1075
+ p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
1076
+ p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
1077
+ if (p2 >= m2) p2 -= m2;
1078
+
1079
+ state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2;
1080
+
1081
+ const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2;
1082
+ if(p1 <= p2) return p5 + m1;
1083
+ return p5;
1084
+ }
1085
+ #elif __CUDA_ARCH__ > 0
1086
+ /* nj's implementation */
1087
+ QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state)
1088
+ {
1089
+ const double m1 = 4294967087.;
1090
+ const double m2 = 4294944443.;
1091
+ const double a12 = 1403580.;
1092
+ const double a13n = 810728.;
1093
+ const double a21 = 527612.;
1094
+ const double a23n = 1370589.;
1095
+
1096
+ const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */
1097
+ const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */
1098
+ const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */
1099
+ const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */
1100
+
1101
+ double q, p1, p2;
1102
+ p1 = a12 * state->s1[1] - a13n * state->s1[0];
1103
+ q = trunc (fma (p1, rh1, p1 * rl1));
1104
+ p1 -= q * m1;
1105
+ if (p1 < 0.0) p1 += m1;
1106
+ state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1;
1107
+ p2 = a21 * state->s2[2] - a23n * state->s2[0];
1108
+ q = trunc (fma (p2, rh2, p2 * rl2));
1109
+ p2 -= q * m2;
1110
+ if (p2 < 0.0) p2 += m2;
1111
+ state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2;
1112
+ if (p1 <= p2) return (p1 - p2 + m1);
1113
+ else return (p1 - p2);
1114
+ }
1115
+ /* end nj's implementation */
1116
+ #else
1117
+ QUALIFIERS double curand_MRG32k3a(curandStateMRG32k3a_t *state)
1118
+ {
1119
+ double p1,p2,r;
1120
+ p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]);
1121
+ p1 = curand_MRGmod(p1, MRG32K3A_MOD1);
1122
+ if (p1 < 0.0) p1 += MRG32K3A_MOD1;
1123
+ state->s1[0] = state->s1[1];
1124
+ state->s1[1] = state->s1[2];
1125
+ state->s1[2] = (unsigned int)p1;
1126
+ p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]);
1127
+ p2 = curand_MRGmod(p2, MRG32K3A_MOD2);
1128
+ if (p2 < 0) p2 += MRG32K3A_MOD2;
1129
+ state->s2[0] = state->s2[1];
1130
+ state->s2[1] = state->s2[2];
1131
+ state->s2[2] = (unsigned int)p2;
1132
+ r = p1 - p2;
1133
+ if (r <= 0) r += MRG32K3A_MOD1;
1134
+ return r;
1135
+ }
1136
+ #endif
1137
+
1138
+
1139
+ /**
1140
+ * \brief Return 32-bits of pseudorandomness from an MRG32k3a generator.
1141
+ *
1142
+ * Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state,
1143
+ * increment position of generator by one.
1144
+ *
1145
+ * \param state - Pointer to state to update
1146
+ *
1147
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
1148
+ */
1149
+ QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state)
1150
+ {
1151
+ double dRet;
1152
+ dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM;
1153
+ return (unsigned int)dRet;
1154
+ }
1155
+
1156
+
1157
+
1158
+ /**
1159
+ * \brief Update MRG32k3a state to skip \p n elements.
1160
+ *
1161
+ * Update the MRG32k3a state in \p state to skip ahead \p n elements.
1162
+ *
1163
+ * All values of \p n are valid. Large values require more computation and so
1164
+ * will take more time to complete.
1165
+ *
1166
+ * \param n - Number of elements to skip
1167
+ * \param state - Pointer to state to update
1168
+ */
1169
+ QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state)
1170
+ {
1171
+ unsigned int t[3][3];
1172
+ #ifdef __CUDA_ARCH__
1173
+ curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n);
1174
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1175
+ curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n);
1176
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1177
+ #else
1178
+ curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n);
1179
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1180
+ curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n);
1181
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1182
+ #endif
1183
+ }
1184
+
1185
+ /**
1186
+ * \brief Update MRG32k3a state to skip ahead \p n subsequences.
1187
+ *
1188
+ * Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each
1189
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly
1190
+ *
1191
+ * \xmlonly<ph outputclass="xmlonly">2<sup>76</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
1192
+ * \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
1193
+ *
1194
+ * Valid values of \p n are 0 to \xmlonly<ph outputclass="xmlonly">2<sup>51</sup></ph>\endxmlonly. Note \p n will be masked to 51 bits
1195
+ *
1196
+ * \param n - Number of subsequences to skip
1197
+ * \param state - Pointer to state to update
1198
+ */
1199
+ QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state)
1200
+ {
1201
+ unsigned int t[3][3];
1202
+ #ifdef __CUDA_ARCH__
1203
+ curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n);
1204
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1205
+ curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n);
1206
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1207
+ #else
1208
+ curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n);
1209
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1210
+ curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n);
1211
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1212
+ #endif
1213
+ }
1214
+
1215
+ /**
1216
+ * \brief Update MRG32k3a state to skip ahead \p n sequences.
1217
+ *
1218
+ * Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each
1219
+ * sequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
1220
+ * \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly * n elements.
1221
+ *
1222
+ * All values of \p n are valid. Large values require more computation and so
1223
+ * will take more time to complete.
1224
+ *
1225
+ * \param n - Number of sequences to skip
1226
+ * \param state - Pointer to state to update
1227
+ */
1228
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state)
1229
+ {
1230
+ unsigned int t[3][3];
1231
+ #ifdef __CUDA_ARCH__
1232
+ curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n);
1233
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1234
+ curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n);
1235
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1236
+ #else
1237
+ curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n);
1238
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1239
+ curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n);
1240
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1241
+ #endif
1242
+ }
1243
+
1244
+
1245
+ /**
1246
+ * \brief Initialize MRG32k3a state.
1247
+ *
1248
+ * Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence,
1249
+ * and \p offset.
1250
+ *
1251
+ * All input values of \p seed, \p subsequence, and \p offset are legal.
1252
+ * \p subsequence will be truncated to 51 bits to avoid running into the next sequence
1253
+ *
1254
+ * A value of 0 for \p seed sets the state to the values of the original
1255
+ * published version of the \p MRG32k3a algorithm.
1256
+ *
1257
+ * \param seed - Arbitrary bits to use as a seed
1258
+ * \param subsequence - Subsequence to start at
1259
+ * \param offset - Absolute offset into sequence
1260
+ * \param state - Pointer to state to initialize
1261
+ */
1262
+ QUALIFIERS void curand_init(unsigned long long seed,
1263
+ unsigned long long subsequence,
1264
+ unsigned long long offset,
1265
+ curandStateMRG32k3a_t *state)
1266
+ {
1267
+ int i;
1268
+ for ( i=0; i<3; i++ ) {
1269
+ state->s1[i] = 12345u;
1270
+ state->s2[i] = 12345u;
1271
+ }
1272
+ if (seed != 0ull) {
1273
+ unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL;
1274
+ unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL);
1275
+ state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1);
1276
+ state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1);
1277
+ state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1);
1278
+ state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2);
1279
+ state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2);
1280
+ state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2);
1281
+ }
1282
+ skipahead_subsequence( subsequence, state );
1283
+ skipahead( offset, state );
1284
+ state->boxmuller_flag = 0;
1285
+ state->boxmuller_flag_double = 0;
1286
+ state->boxmuller_extra = 0.f;
1287
+ state->boxmuller_extra_double = 0.;
1288
+ }
1289
+
1290
+ /**
1291
+ * \brief Update Sobol32 state to skip \p n elements.
1292
+ *
1293
+ * Update the Sobol32 state in \p state to skip ahead \p n elements.
1294
+ *
1295
+ * All values of \p n are valid.
1296
+ *
1297
+ * \param n - Number of elements to skip
1298
+ * \param state - Pointer to state to update
1299
+ */
1300
+ template <typename T>
1301
+ QUALIFIERS
1302
+ typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol32_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol32_t*, T>::value>::type
1303
+ skipahead(unsigned int n, T state)
1304
+ {
1305
+ unsigned int i_gray;
1306
+ state->x = state->c;
1307
+ state->i += n;
1308
+ /* Convert state->i to gray code */
1309
+ i_gray = state->i ^ (state->i >> 1);
1310
+ for(unsigned int k = 0; k < 32; k++) {
1311
+ if(i_gray & (1 << k)) {
1312
+ state->x ^= state->direction_vectors[k];
1313
+ }
1314
+ }
1315
+ return;
1316
+ }
1317
+
1318
+ /**
1319
+ * \brief Update Sobol64 state to skip \p n elements.
1320
+ *
1321
+ * Update the Sobol64 state in \p state to skip ahead \p n elements.
1322
+ *
1323
+ * All values of \p n are valid.
1324
+ *
1325
+ * \param n - Number of elements to skip
1326
+ * \param state - Pointer to state to update
1327
+ */
1328
+ template <typename T>
1329
+ QUALIFIERS
1330
+ typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol64_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol64_t*, T>::value>::type
1331
+ skipahead(unsigned long long n, T state)
1332
+ {
1333
+ unsigned long long i_gray;
1334
+ state->x = state->c;
1335
+ state->i += n;
1336
+ /* Convert state->i to gray code */
1337
+ i_gray = state->i ^ (state->i >> 1);
1338
+ for(unsigned k = 0; k < 64; k++) {
1339
+ if(i_gray & (1ULL << k)) {
1340
+ state->x ^= state->direction_vectors[k];
1341
+ }
1342
+ }
1343
+ return;
1344
+ }
1345
+
1346
+ /**
1347
+ * \brief Initialize Sobol32 state.
1348
+ *
1349
+ * Initialize Sobol32 state in \p state with the given \p direction \p vectors and
1350
+ * \p offset.
1351
+ *
1352
+ * The direction vector is a device pointer to an array of 32 unsigned ints.
1353
+ * All input values of \p offset are legal.
1354
+ *
1355
+ * \param direction_vectors - Pointer to array of 32 unsigned ints representing the
1356
+ * direction vectors for the desired dimension
1357
+ * \param offset - Absolute offset into sequence
1358
+ * \param state - Pointer to state to initialize
1359
+ */
1360
+ QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
1361
+ unsigned int offset,
1362
+ curandStateSobol32_t *state)
1363
+ {
1364
+ state->i = 0;
1365
+ state->c = 0;
1366
+ for(int i = 0; i < 32; i++) {
1367
+ state->direction_vectors[i] = direction_vectors[i];
1368
+ }
1369
+ state->x = 0;
1370
+ skipahead<curandStateSobol32_t *>(offset, state);
1371
+ }
1372
+ /**
1373
+ * \brief Initialize Scrambled Sobol32 state.
1374
+ *
1375
+ * Initialize Sobol32 state in \p state with the given \p direction \p vectors and
1376
+ * \p offset.
1377
+ *
1378
+ * The direction vector is a device pointer to an array of 32 unsigned ints.
1379
+ * All input values of \p offset are legal.
1380
+ *
1381
+ * \param direction_vectors - Pointer to array of 32 unsigned ints representing the
1382
+ direction vectors for the desired dimension
1383
+ * \param scramble_c Scramble constant
1384
+ * \param offset - Absolute offset into sequence
1385
+ * \param state - Pointer to state to initialize
1386
+ */
1387
+ QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
1388
+ unsigned int scramble_c,
1389
+ unsigned int offset,
1390
+ curandStateScrambledSobol32_t *state)
1391
+ {
1392
+ state->i = 0;
1393
+ state->c = scramble_c;
1394
+ for(int i = 0; i < 32; i++) {
1395
+ state->direction_vectors[i] = direction_vectors[i];
1396
+ }
1397
+ state->x = state->c;
1398
+ skipahead<curandStateScrambledSobol32_t *>(offset, state);
1399
+ }
1400
+
1401
+ QUALIFIERS int __curand_find_trailing_zero(unsigned int x)
1402
+ {
1403
+ #if __CUDA_ARCH__ > 0
1404
+ int y = __ffs(~x);
1405
+ if(y)
1406
+ return y - 1;
1407
+ return 31;
1408
+ #else
1409
+ int i = 1;
1410
+ while(x & 1) {
1411
+ i++;
1412
+ x >>= 1;
1413
+ }
1414
+ i = i - 1;
1415
+ return i == 32 ? 31 : i;
1416
+ #endif
1417
+ }
1418
+
1419
+ QUALIFIERS int __curand_find_trailing_zero(unsigned long long x)
1420
+ {
1421
+ #if __CUDA_ARCH__ > 0
1422
+ int y = __ffsll(~x);
1423
+ if(y)
1424
+ return y - 1;
1425
+ return 63;
1426
+ #else
1427
+ int i = 1;
1428
+ while(x & 1) {
1429
+ i++;
1430
+ x >>= 1;
1431
+ }
1432
+ i = i - 1;
1433
+ return i == 64 ? 63 : i;
1434
+ #endif
1435
+ }
1436
+
1437
+ /**
1438
+ * \brief Initialize Sobol64 state.
1439
+ *
1440
+ * Initialize Sobol64 state in \p state with the given \p direction \p vectors and
1441
+ * \p offset.
1442
+ *
1443
+ * The direction vector is a device pointer to an array of 64 unsigned long longs.
1444
+ * All input values of \p offset are legal.
1445
+ *
1446
+ * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
1447
+ direction vectors for the desired dimension
1448
+ * \param offset - Absolute offset into sequence
1449
+ * \param state - Pointer to state to initialize
1450
+ */
1451
+ QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
1452
+ unsigned long long offset,
1453
+ curandStateSobol64_t *state)
1454
+ {
1455
+ state->i = 0;
1456
+ state->c = 0;
1457
+ for(int i = 0; i < 64; i++) {
1458
+ state->direction_vectors[i] = direction_vectors[i];
1459
+ }
1460
+ state->x = 0;
1461
+ skipahead<curandStateSobol64_t *>(offset, state);
1462
+ }
1463
+
1464
+ /**
1465
+ * \brief Initialize Scrambled Sobol64 state.
1466
+ *
1467
+ * Initialize Sobol64 state in \p state with the given \p direction \p vectors and
1468
+ * \p offset.
1469
+ *
1470
+ * The direction vector is a device pointer to an array of 64 unsigned long longs.
1471
+ * All input values of \p offset are legal.
1472
+ *
1473
+ * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
1474
+ direction vectors for the desired dimension
1475
+ * \param scramble_c Scramble constant
1476
+ * \param offset - Absolute offset into sequence
1477
+ * \param state - Pointer to state to initialize
1478
+ */
1479
+ QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
1480
+ unsigned long long scramble_c,
1481
+ unsigned long long offset,
1482
+ curandStateScrambledSobol64_t *state)
1483
+ {
1484
+ state->i = 0;
1485
+ state->c = scramble_c;
1486
+ for(int i = 0; i < 64; i++) {
1487
+ state->direction_vectors[i] = direction_vectors[i];
1488
+ }
1489
+ state->x = state->c;
1490
+ skipahead<curandStateScrambledSobol64_t *>(offset, state);
1491
+ }
1492
+
1493
+ /**
1494
+ * \brief Return 32-bits of quasirandomness from a Sobol32 generator.
1495
+ *
1496
+ * Return 32-bits of quasirandomness from the Sobol32 generator in \p state,
1497
+ * increment position of generator by one.
1498
+ *
1499
+ * \param state - Pointer to state to update
1500
+ *
1501
+ * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
1502
+ */
1503
+
1504
+ QUALIFIERS unsigned int curand(curandStateSobol32_t * state)
1505
+ {
1506
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1507
+ the trailing zero bit of i
1508
+ */
1509
+ unsigned int res = state->x;
1510
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1511
+ state->i ++;
1512
+ return res;
1513
+ }
1514
+
1515
+ /**
1516
+ * \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator.
1517
+ *
1518
+ * Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
1519
+ * increment position of generator by one.
1520
+ *
1521
+ * \param state - Pointer to state to update
1522
+ *
1523
+ * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
1524
+ */
1525
+
1526
+ QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state)
1527
+ {
1528
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1529
+ the trailing zero bit of i
1530
+ */
1531
+ unsigned int res = state->x;
1532
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1533
+ state->i ++;
1534
+ return res;
1535
+ }
1536
+
1537
+ /**
1538
+ * \brief Return 64-bits of quasirandomness from a Sobol64 generator.
1539
+ *
1540
+ * Return 64-bits of quasirandomness from the Sobol64 generator in \p state,
1541
+ * increment position of generator by one.
1542
+ *
1543
+ * \param state - Pointer to state to update
1544
+ *
1545
+ * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
1546
+ */
1547
+
1548
+ QUALIFIERS unsigned long long curand(curandStateSobol64_t * state)
1549
+ {
1550
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1551
+ the trailing zero bit of i
1552
+ */
1553
+ unsigned long long res = state->x;
1554
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1555
+ state->i ++;
1556
+ return res;
1557
+ }
1558
+
1559
+ /**
1560
+ * \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator.
1561
+ *
1562
+ * Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
1563
+ * increment position of generator by one.
1564
+ *
1565
+ * \param state - Pointer to state to update
1566
+ *
1567
+ * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
1568
+ */
1569
+
1570
+ QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state)
1571
+ {
1572
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1573
+ the trailing zero bit of i
1574
+ */
1575
+ unsigned long long res = state->x;
1576
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1577
+ state->i ++;
1578
+ return res;
1579
+ }
1580
+
1581
+ #include "curand_uniform.h"
1582
+ #include "curand_normal.h"
1583
+ #include "curand_lognormal.h"
1584
+ #include "curand_poisson.h"
1585
+ #include "curand_discrete2.h"
1586
+
1587
+ __device__ static inline unsigned int *__get_precalculated_matrix(int n)
1588
+ {
1589
+ if(n == 0) {
1590
+ return precalc_xorwow_matrix[n];
1591
+ }
1592
+ if(n == 2) {
1593
+ return precalc_xorwow_offset_matrix[n];
1594
+ }
1595
+ return precalc_xorwow_matrix[n];
1596
+ }
1597
+
1598
+ #ifndef __CUDACC_RTC__
1599
+ __host__ static inline unsigned int *__get_precalculated_matrix_host(int n)
1600
+ {
1601
+ if(n == 1) {
1602
+ return precalc_xorwow_matrix_host[n];
1603
+ }
1604
+ if(n == 3) {
1605
+ return precalc_xorwow_offset_matrix_host[n];
1606
+ }
1607
+ return precalc_xorwow_matrix_host[n];
1608
+ }
1609
+ #endif // #ifndef __CUDACC_RTC__
1610
+
1611
+ __device__ static inline unsigned int *__get_mrg32k3a_matrix(int n)
1612
+ {
1613
+ if(n == 0) {
1614
+ return mrg32k3aM1[n][0];
1615
+ }
1616
+ if(n == 2) {
1617
+ return mrg32k3aM2[n][0];
1618
+ }
1619
+ if(n == 4) {
1620
+ return mrg32k3aM1SubSeq[n][0];
1621
+ }
1622
+ if(n == 6) {
1623
+ return mrg32k3aM2SubSeq[n][0];
1624
+ }
1625
+ if(n == 8) {
1626
+ return mrg32k3aM1Seq[n][0];
1627
+ }
1628
+ if(n == 10) {
1629
+ return mrg32k3aM2Seq[n][0];
1630
+ }
1631
+ return mrg32k3aM1[n][0];
1632
+ }
1633
+
1634
+ #ifndef __CUDACC_RTC__
1635
+ __host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n)
1636
+ {
1637
+ if(n == 1) {
1638
+ return mrg32k3aM1Host[n][0];
1639
+ }
1640
+ if(n == 3) {
1641
+ return mrg32k3aM2Host[n][0];
1642
+ }
1643
+ if(n == 5) {
1644
+ return mrg32k3aM1SubSeqHost[n][0];
1645
+ }
1646
+ if(n == 7) {
1647
+ return mrg32k3aM2SubSeqHost[n][0];
1648
+ }
1649
+ if(n == 9) {
1650
+ return mrg32k3aM1SeqHost[n][0];
1651
+ }
1652
+ if(n == 11) {
1653
+ return mrg32k3aM2SeqHost[n][0];
1654
+ }
1655
+ return mrg32k3aM1Host[n][0];
1656
+ }
1657
+
1658
+ __host__ static inline double *__get__cr_lgamma_table_host(void) {
1659
+ return __cr_lgamma_table;
1660
+ }
1661
+ #endif // #ifndef __CUDACC_RTC__
1662
+
1663
+ /** @} */
1664
+
1665
+ #endif // !defined(CURAND_KERNEL_H_)
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_LOGNORMAL_H_)
52
+ #define CURAND_LOGNORMAL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+ /**
69
+ * \brief Return a log-normally distributed float from an XORWOW generator.
70
+ *
71
+ * Return a single log-normally distributed float derived from a normal
72
+ * distribution with mean \p mean and standard deviation \p stddev
73
+ * from the XORWOW generator in \p state,
74
+ * increment position of generator by one.
75
+ *
76
+ * The implementation uses a Box-Muller transform to generate two
77
+ * normally distributed results, transforms them to log-normal distribution,
78
+ * then returns them one at a time.
79
+ * See ::curand_log_normal2() for a more efficient version that returns
80
+ * both results at once.
81
+ *
82
+ * \param state - Pointer to state to update
83
+ * \param mean - Mean of the related normal distribution
84
+ * \param stddev - Standard deviation of the related normal distribution
85
+ *
86
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
87
+ */
88
+ QUALIFIERS float curand_log_normal(curandStateXORWOW_t *state, float mean, float stddev)
89
+ {
90
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
91
+ unsigned int x, y;
92
+ x = curand(state);
93
+ y = curand(state);
94
+ float2 v = _curand_box_muller(x, y);
95
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
96
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
97
+ return expf(mean + (stddev * v.x));
98
+ }
99
+ state->boxmuller_flag = 0;
100
+ return state->boxmuller_extra;
101
+ }
102
+
103
+ /**
104
+ * \brief Return a log-normally distributed float from an Philox4_32_10 generator.
105
+ *
106
+ * Return a single log-normally distributed float derived from a normal
107
+ * distribution with mean \p mean and standard deviation \p stddev
108
+ * from the Philox4_32_10 generator in \p state,
109
+ * increment position of generator by one.
110
+ *
111
+ * The implementation uses a Box-Muller transform to generate two
112
+ * normally distributed results, transforms them to log-normal distribution,
113
+ * then returns them one at a time.
114
+ * See ::curand_log_normal2() for a more efficient version that returns
115
+ * both results at once.
116
+ *
117
+ * \param state - Pointer to state to update
118
+ * \param mean - Mean of the related normal distribution
119
+ * \param stddev - Standard deviation of the related normal distribution
120
+ *
121
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
122
+ */
123
+
124
+ QUALIFIERS float curand_log_normal(curandStatePhilox4_32_10_t *state, float mean, float stddev)
125
+ {
126
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
127
+ unsigned int x, y;
128
+ x = curand(state);
129
+ y = curand(state);
130
+ float2 v = _curand_box_muller(x, y);
131
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
132
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
133
+ return expf(mean + (stddev * v.x));
134
+ }
135
+ state->boxmuller_flag = 0;
136
+ return state->boxmuller_extra;
137
+ }
138
+
139
+ /**
140
+ * \brief Return two normally distributed floats from an XORWOW generator.
141
+ *
142
+ * Return two log-normally distributed floats derived from a normal
143
+ * distribution with mean \p mean and standard deviation \p stddev
144
+ * from the XORWOW generator in \p state,
145
+ * increment position of generator by two.
146
+ *
147
+ * The implementation uses a Box-Muller transform to generate two
148
+ * normally distributed results, then transforms them to log-normal.
149
+ *
150
+ * \param state - Pointer to state to update
151
+ * \param mean - Mean of the related normal distribution
152
+ * \param stddev - Standard deviation of the related normal distribution
153
+ *
154
+ * \return Log-normally distributed float2 where each element is from a
155
+ * distribution with mean \p mean and standard deviation \p stddev
156
+ */
157
+ QUALIFIERS float2 curand_log_normal2(curandStateXORWOW_t *state, float mean, float stddev)
158
+ {
159
+ float2 v = curand_box_muller(state);
160
+ v.x = expf(mean + (stddev * v.x));
161
+ v.y = expf(mean + (stddev * v.y));
162
+ return v;
163
+ }
164
+
165
+ /**
166
+ * \brief Return two normally distributed floats from an Philox4_32_10 generator.
167
+ *
168
+ * Return two log-normally distributed floats derived from a normal
169
+ * distribution with mean \p mean and standard deviation \p stddev
170
+ * from the Philox4_32_10 generator in \p state,
171
+ * increment position of generator by two.
172
+ *
173
+ * The implementation uses a Box-Muller transform to generate two
174
+ * normally distributed results, then transforms them to log-normal.
175
+ *
176
+ * \param state - Pointer to state to update
177
+ * \param mean - Mean of the related normal distribution
178
+ * \param stddev - Standard deviation of the related normal distribution
179
+ *
180
+ * \return Log-normally distributed float2 where each element is from a
181
+ * distribution with mean \p mean and standard deviation \p stddev
182
+ */
183
+ QUALIFIERS float2 curand_log_normal2(curandStatePhilox4_32_10_t *state, float mean, float stddev)
184
+ {
185
+ float2 v = curand_box_muller(state);
186
+ v.x = expf(mean + (stddev * v.x));
187
+ v.y = expf(mean + (stddev * v.y));
188
+ return v;
189
+ }
190
+ /**
191
+ * \brief Return four normally distributed floats from an Philox4_32_10 generator.
192
+ *
193
+ * Return four log-normally distributed floats derived from a normal
194
+ * distribution with mean \p mean and standard deviation \p stddev
195
+ * from the Philox4_32_10 generator in \p state,
196
+ * increment position of generator by four.
197
+ *
198
+ * The implementation uses a Box-Muller transform to generate two
199
+ * normally distributed results, then transforms them to log-normal.
200
+ *
201
+ * \param state - Pointer to state to update
202
+ * \param mean - Mean of the related normal distribution
203
+ * \param stddev - Standard deviation of the related normal distribution
204
+ *
205
+ * \return Log-normally distributed float4 where each element is from a
206
+ * distribution with mean \p mean and standard deviation \p stddev
207
+ */
208
+ QUALIFIERS float4 curand_log_normal4(curandStatePhilox4_32_10_t *state, float mean, float stddev)
209
+ {
210
+ float4 v = curand_box_muller4(state);
211
+ v.x = expf(mean + (stddev * v.x));
212
+ v.y = expf(mean + (stddev * v.y));
213
+ v.z = expf(mean + (stddev * v.z));
214
+ v.w = expf(mean + (stddev * v.w));
215
+ return v;
216
+ }
217
+
218
+ /**
219
+ * \brief Return a log-normally distributed float from an MRG32k3a generator.
220
+ *
221
+ * Return a single log-normally distributed float derived from a normal
222
+ * distribution with mean \p mean and standard deviation \p stddev
223
+ * from the MRG32k3a generator in \p state,
224
+ * increment position of generator by one.
225
+ *
226
+ * The implementation uses a Box-Muller transform to generate two
227
+ * normally distributed results, transforms them to log-normal distribution,
228
+ * then returns them one at a time.
229
+ * See ::curand_log_normal2() for a more efficient version that returns
230
+ * both results at once.
231
+ *
232
+ * \param state - Pointer to state to update
233
+ * \param mean - Mean of the related normal distribution
234
+ * \param stddev - Standard deviation of the related normal distribution
235
+ *
236
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
237
+ */
238
+ QUALIFIERS float curand_log_normal(curandStateMRG32k3a_t *state, float mean, float stddev)
239
+ {
240
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
241
+ float2 v = curand_box_muller_mrg(state);
242
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
243
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
244
+ return expf(mean + (stddev * v.x));
245
+ }
246
+ state->boxmuller_flag = 0;
247
+ return state->boxmuller_extra;
248
+ }
249
+
250
+ /**
251
+ * \brief Return two normally distributed floats from an MRG32k3a generator.
252
+ *
253
+ * Return two log-normally distributed floats derived from a normal
254
+ * distribution with mean \p mean and standard deviation \p stddev
255
+ * from the MRG32k3a generator in \p state,
256
+ * increment position of generator by two.
257
+ *
258
+ * The implementation uses a Box-Muller transform to generate two
259
+ * normally distributed results, then transforms them to log-normal.
260
+ *
261
+ * \param state - Pointer to state to update
262
+ * \param mean - Mean of the related normal distribution
263
+ * \param stddev - Standard deviation of the related normal distribution
264
+ *
265
+ * \return Log-normally distributed float2 where each element is from a
266
+ * distribution with mean \p mean and standard deviation \p stddev
267
+ */
268
+ QUALIFIERS float2 curand_log_normal2(curandStateMRG32k3a_t *state, float mean, float stddev)
269
+ {
270
+ float2 v = curand_box_muller_mrg(state);
271
+ v.x = expf(mean + (stddev * v.x));
272
+ v.y = expf(mean + (stddev * v.y));
273
+ return v;
274
+ }
275
+
276
+ /**
277
+ * \brief Return a log-normally distributed float from an MTGP32 generator.
278
+ *
279
+ * Return a single log-normally distributed float derived from a normal
280
+ * distribution with mean \p mean and standard deviation \p stddev
281
+ * from the MTGP32 generator in \p state,
282
+ * increment position of generator.
283
+ *
284
+ * The implementation uses the inverse cumulative distribution function
285
+ * to generate a normally distributed result, then transforms the result
286
+ * to log-normal.
287
+ *
288
+ * \param state - Pointer to state to update
289
+ * \param mean - Mean of the related normal distribution
290
+ * \param stddev - Standard deviation of the related normal distribution
291
+ *
292
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
293
+ */
294
+ QUALIFIERS float curand_log_normal(curandStateMtgp32_t *state, float mean, float stddev)
295
+ {
296
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
297
+ }
298
+
299
+ /**
300
+ * \brief Return a log-normally distributed float from a Sobol32 generator.
301
+ *
302
+ * Return a single log-normally distributed float derived from a normal
303
+ * distribution with mean \p mean and standard deviation \p stddev
304
+ * from the Sobol32 generator in \p state,
305
+ * increment position of generator by one.
306
+ *
307
+ * The implementation uses the inverse cumulative distribution function
308
+ * to generate a normally distributed result, then transforms the result
309
+ * to log-normal.
310
+ *
311
+ * \param state - Pointer to state to update
312
+ * \param mean - Mean of the related normal distribution
313
+ * \param stddev - Standard deviation of the related normal distribution
314
+ *
315
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
316
+ */
317
+ QUALIFIERS float curand_log_normal(curandStateSobol32_t *state, float mean, float stddev)
318
+ {
319
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
320
+ }
321
+ /**
322
+ * \brief Return a log-normally distributed float from a scrambled Sobol32 generator.
323
+ *
324
+ * Return a single log-normally distributed float derived from a normal
325
+ * distribution with mean \p mean and standard deviation \p stddev
326
+ * from the scrambled Sobol32 generator in \p state,
327
+ * increment position of generator by one.
328
+ *
329
+ * The implementation uses the inverse cumulative distribution function
330
+ * to generate a normally distributed result, then transforms the result
331
+ * to log-normal.
332
+ *
333
+ * \param state - Pointer to state to update
334
+ * \param mean - Mean of the related normal distribution
335
+ * \param stddev - Standard deviation of the related normal distribution
336
+ *
337
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
338
+ */
339
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol32_t *state, float mean, float stddev)
340
+ {
341
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
342
+ }
343
+
344
+ /**
345
+ * \brief Return a log-normally distributed float from a Sobol64 generator.
346
+ *
347
+ * Return a single log-normally distributed float derived from a normal
348
+ * distribution with mean \p mean and standard deviation \p stddev
349
+ * from the Sobol64 generator in \p state,
350
+ * increment position of generator by one.
351
+ *
352
+ * The implementation uses the inverse cumulative distribution function
353
+ * to generate normally distributed results, then converts to log-normal
354
+ * distribution.
355
+ *
356
+ * \param state - Pointer to state to update
357
+ * \param mean - Mean of the related normal distribution
358
+ * \param stddev - Standard deviation of the related normal distribution
359
+ *
360
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
361
+ */
362
+ QUALIFIERS float curand_log_normal(curandStateSobol64_t *state, float mean, float stddev)
363
+ {
364
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
365
+ }
366
+
367
+ /**
368
+ * \brief Return a log-normally distributed float from a scrambled Sobol64 generator.
369
+ *
370
+ * Return a single log-normally distributed float derived from a normal
371
+ * distribution with mean \p mean and standard deviation \p stddev
372
+ * from the scrambled Sobol64 generator in \p state,
373
+ * increment position of generator by one.
374
+ *
375
+ * The implementation uses the inverse cumulative distribution function
376
+ * to generate normally distributed results, then converts to log-normal
377
+ * distribution.
378
+ *
379
+ * \param state - Pointer to state to update
380
+ * \param mean - Mean of the related normal distribution
381
+ * \param stddev - Standard deviation of the related normal distribution
382
+ *
383
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
384
+ */
385
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol64_t *state, float mean, float stddev)
386
+ {
387
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
388
+ }
389
+
390
+ /**
391
+ * \brief Return a log-normally distributed double from an XORWOW generator.
392
+ *
393
+ * Return a single normally distributed double derived from a normal
394
+ * distribution with mean \p mean and standard deviation \p stddev
395
+ * from the XORWOW generator in \p state,
396
+ * increment position of generator.
397
+ *
398
+ * The implementation uses a Box-Muller transform to generate two
399
+ * normally distributed results, transforms them to log-normal distribution,
400
+ * then returns them one at a time.
401
+ * See ::curand_log_normal2_double() for a more efficient version that returns
402
+ * both results at once.
403
+ *
404
+ * \param state - Pointer to state to update
405
+ * \param mean - Mean of the related normal distribution
406
+ * \param stddev - Standard deviation of the related normal distribution
407
+ *
408
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
409
+ */
410
+
411
+ QUALIFIERS double curand_log_normal_double(curandStateXORWOW_t *state, double mean, double stddev)
412
+ {
413
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
414
+ unsigned int x0, x1, y0, y1;
415
+ x0 = curand(state);
416
+ x1 = curand(state);
417
+ y0 = curand(state);
418
+ y1 = curand(state);
419
+ double2 v = _curand_box_muller_double(x0, x1, y0, y1);
420
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
421
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
422
+ return exp(mean + (stddev * v.x));
423
+ }
424
+ state->boxmuller_flag_double = 0;
425
+ return state->boxmuller_extra_double;
426
+ }
427
+
428
+ /**
429
+ * \brief Return a log-normally distributed double from an Philox4_32_10 generator.
430
+ *
431
+ * Return a single normally distributed double derived from a normal
432
+ * distribution with mean \p mean and standard deviation \p stddev
433
+ * from the Philox4_32_10 generator in \p state,
434
+ * increment position of generator.
435
+ *
436
+ * The implementation uses a Box-Muller transform to generate two
437
+ * normally distributed results, transforms them to log-normal distribution,
438
+ * then returns them one at a time.
439
+ * See ::curand_log_normal2_double() for a more efficient version that returns
440
+ * both results at once.
441
+ *
442
+ * \param state - Pointer to state to update
443
+ * \param mean - Mean of the related normal distribution
444
+ * \param stddev - Standard deviation of the related normal distribution
445
+ *
446
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
447
+ */
448
+
449
+ QUALIFIERS double curand_log_normal_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
450
+ {
451
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
452
+ uint4 _x;
453
+ _x = curand4(state);
454
+ double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
455
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
456
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
457
+ return exp(mean + (stddev * v.x));
458
+ }
459
+ state->boxmuller_flag_double = 0;
460
+ return state->boxmuller_extra_double;
461
+ }
462
+
463
+
464
+ /**
465
+ * \brief Return two log-normally distributed doubles from an XORWOW generator.
466
+ *
467
+ * Return two log-normally distributed doubles derived from a normal
468
+ * distribution with mean \p mean and standard deviation \p stddev
469
+ * from the XORWOW generator in \p state,
470
+ * increment position of generator by two.
471
+ *
472
+ * The implementation uses a Box-Muller transform to generate two
473
+ * normally distributed results, and transforms them to log-normal distribution,.
474
+ *
475
+ * \param state - Pointer to state to update
476
+ * \param mean - Mean of the related normal distribution
477
+ * \param stddev - Standard deviation of the related normal distribution
478
+ *
479
+ * \return Log-normally distributed double2 where each element is from a
480
+ * distribution with mean \p mean and standard deviation \p stddev
481
+ */
482
+ QUALIFIERS double2 curand_log_normal2_double(curandStateXORWOW_t *state, double mean, double stddev)
483
+ {
484
+ double2 v = curand_box_muller_double(state);
485
+ v.x = exp(mean + (stddev * v.x));
486
+ v.y = exp(mean + (stddev * v.y));
487
+ return v;
488
+ }
489
+
490
+ /**
491
+ * \brief Return two log-normally distributed doubles from an Philox4_32_10 generator.
492
+ *
493
+ * Return two log-normally distributed doubles derived from a normal
494
+ * distribution with mean \p mean and standard deviation \p stddev
495
+ * from the Philox4_32_10 generator in \p state,
496
+ * increment position of generator by four.
497
+ *
498
+ * The implementation uses a Box-Muller transform to generate two
499
+ * normally distributed results, and transforms them to log-normal distribution,.
500
+ *
501
+ * \param state - Pointer to state to update
502
+ * \param mean - Mean of the related normal distribution
503
+ * \param stddev - Standard deviation of the related normal distribution
504
+ *
505
+ * \return Log-normally distributed double4 where each element is from a
506
+ * distribution with mean \p mean and standard deviation \p stddev
507
+ */
508
+ QUALIFIERS double2 curand_log_normal2_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
509
+ {
510
+ double2 v = curand_box_muller2_double(state);
511
+ v.x = exp(mean + (stddev * v.x));
512
+ v.y = exp(mean + (stddev * v.y));
513
+ return v;
514
+ }
515
+ // nor part of API
516
+ QUALIFIERS double4 curand_log_normal4_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
517
+ {
518
+ double4 v = curand_box_muller4_double(state);
519
+ v.x = exp(mean + (stddev * v.x));
520
+ v.y = exp(mean + (stddev * v.y));
521
+ v.z = exp(mean + (stddev * v.z));
522
+ v.w = exp(mean + (stddev * v.w));
523
+ return v;
524
+ }
525
+
526
+ /**
527
+ * \brief Return a log-normally distributed double from an MRG32k3a generator.
528
+ *
529
+ * Return a single normally distributed double derived from a normal
530
+ * distribution with mean \p mean and standard deviation \p stddev
531
+ * from the MRG32k3a generator in \p state,
532
+ * increment position of generator.
533
+ *
534
+ * The implementation uses a Box-Muller transform to generate two
535
+ * normally distributed results, transforms them to log-normal distribution,
536
+ * then returns them one at a time.
537
+ * See ::curand_log_normal2_double() for a more efficient version that returns
538
+ * both results at once.
539
+ *
540
+ * \param state - Pointer to state to update
541
+ * \param mean - Mean of the related normal distribution
542
+ * \param stddev - Standard deviation of the related normal distribution
543
+ *
544
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
545
+ */
546
+ QUALIFIERS double curand_log_normal_double(curandStateMRG32k3a_t *state, double mean, double stddev)
547
+ {
548
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
549
+ double2 v = curand_box_muller_mrg_double(state);
550
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
551
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
552
+ return exp(mean + (stddev * v.x));
553
+ }
554
+ state->boxmuller_flag_double = 0;
555
+ return state->boxmuller_extra_double;
556
+ }
557
+
558
+ /**
559
+ * \brief Return two log-normally distributed doubles from an MRG32k3a generator.
560
+ *
561
+ * Return two log-normally distributed doubles derived from a normal
562
+ * distribution with mean \p mean and standard deviation \p stddev
563
+ * from the MRG32k3a generator in \p state,
564
+ * increment position of generator by two.
565
+ *
566
+ * The implementation uses a Box-Muller transform to generate two
567
+ * normally distributed results, and transforms them to log-normal distribution,.
568
+ *
569
+ * \param state - Pointer to state to update
570
+ * \param mean - Mean of the related normal distribution
571
+ * \param stddev - Standard deviation of the related normal distribution
572
+ *
573
+ * \return Log-normally distributed double2 where each element is from a
574
+ * distribution with mean \p mean and standard deviation \p stddev
575
+ */
576
+ QUALIFIERS double2 curand_log_normal2_double(curandStateMRG32k3a_t *state, double mean, double stddev)
577
+ {
578
+ double2 v = curand_box_muller_mrg_double(state);
579
+ v.x = exp(mean + (stddev * v.x));
580
+ v.y = exp(mean + (stddev * v.y));
581
+ return v;
582
+ }
583
+
584
+ /**
585
+ * \brief Return a log-normally distributed double from an MTGP32 generator.
586
+ *
587
+ * Return a single log-normally distributed double derived from a normal
588
+ * distribution with mean \p mean and standard deviation \p stddev
589
+ * from the MTGP32 generator in \p state,
590
+ * increment position of generator.
591
+ *
592
+ * The implementation uses the inverse cumulative distribution function
593
+ * to generate normally distributed results, and transforms them into
594
+ * log-normal distribution.
595
+ *
596
+ * \param state - Pointer to state to update
597
+ * \param mean - Mean of the related normal distribution
598
+ * \param stddev - Standard deviation of the related normal distribution
599
+ *
600
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
601
+ */
602
+ QUALIFIERS double curand_log_normal_double(curandStateMtgp32_t *state, double mean, double stddev)
603
+ {
604
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
605
+ }
606
+
607
+ /**
608
+ * \brief Return a log-normally distributed double from a Sobol32 generator.
609
+ *
610
+ * Return a single log-normally distributed double derived from a normal
611
+ * distribution with mean \p mean and standard deviation \p stddev
612
+ * from the Sobol32 generator in \p state,
613
+ * increment position of generator by one.
614
+ *
615
+ * The implementation uses the inverse cumulative distribution function
616
+ * to generate normally distributed results, and transforms them into
617
+ * log-normal distribution.
618
+ *
619
+ * \param state - Pointer to state to update
620
+ * \param mean - Mean of the related normal distribution
621
+ * \param stddev - Standard deviation of the related normal distribution
622
+ *
623
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
624
+ */
625
+ QUALIFIERS double curand_log_normal_double(curandStateSobol32_t *state, double mean, double stddev)
626
+ {
627
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
628
+ }
629
+
630
+ /**
631
+ * \brief Return a log-normally distributed double from a scrambled Sobol32 generator.
632
+ *
633
+ * Return a single log-normally distributed double derived from a normal
634
+ * distribution with mean \p mean and standard deviation \p stddev
635
+ * from the scrambled Sobol32 generator in \p state,
636
+ * increment position of generator by one.
637
+ *
638
+ * The implementation uses the inverse cumulative distribution function
639
+ * to generate normally distributed results, and transforms them into
640
+ * log-normal distribution.
641
+ *
642
+ * \param state - Pointer to state to update
643
+ * \param mean - Mean of the related normal distribution
644
+ * \param stddev - Standard deviation of the related normal distribution
645
+ *
646
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
647
+ */
648
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol32_t *state, double mean, double stddev)
649
+ {
650
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
651
+ }
652
+
653
+ /**
654
+ * \brief Return a log-normally distributed double from a Sobol64 generator.
655
+ *
656
+ * Return a single normally distributed double derived from a normal
657
+ * distribution with mean \p mean and standard deviation \p stddev
658
+ * from the Sobol64 generator in \p state,
659
+ * increment position of generator by one.
660
+ *
661
+ * The implementation uses the inverse cumulative distribution function
662
+ * to generate normally distributed results.
663
+ *
664
+ * \param state - Pointer to state to update
665
+ * \param mean - Mean of the related normal distribution
666
+ * \param stddev - Standard deviation of the related normal distribution
667
+ *
668
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
669
+ */
670
+ QUALIFIERS double curand_log_normal_double(curandStateSobol64_t *state, double mean, double stddev)
671
+ {
672
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
673
+ }
674
+
675
+ /**
676
+ * \brief Return a log-normally distributed double from a scrambled Sobol64 generator.
677
+ *
678
+ * Return a single normally distributed double derived from a normal
679
+ * distribution with mean \p mean and standard deviation \p stddev
680
+ * from the scrambled Sobol64 generator in \p state,
681
+ * increment position of generator by one.
682
+ *
683
+ * The implementation uses the inverse cumulative distribution function
684
+ * to generate normally distributed results.
685
+ *
686
+ * \param state - Pointer to state to update
687
+ * \param mean - Mean of the related normal distribution
688
+ * \param stddev - Standard deviation of the related normal distribution
689
+ *
690
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
691
+ */
692
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol64_t *state, double mean, double stddev)
693
+ {
694
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
695
+ }
696
+
697
+ #endif // !defined(CURAND_LOGNORMAL_H_)
mgm/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h ADDED
The diff for this file is too large to render. See raw diff