ZTWHHH commited on
Commit
a1a1c79
·
verified ·
1 Parent(s): 7502102

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.so +3 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl.hpp +22 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_config.h +22 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_debug.h +22 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_ocl.h +22 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_ocl.hpp +22 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_sycl.h +22 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_sycl_types.h +22 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_threadpool.h +22 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_threadpool_iface.hpp +22 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_types.h +22 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_version.h +22 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl.hpp +0 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_common.h +175 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_common_types.h +210 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_config.h +200 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_graph.hpp +1510 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_ocl.h +249 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_ocl_types.h +51 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_sycl.h +169 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_sycl_types.h +51 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_version.h.in +36 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/batch_normalization_pd.hpp +366 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/bfloat16.hpp +96 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/binary_pd.hpp +174 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/bit_cast.hpp +57 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/c_types_map.hpp +1966 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_blob.hpp +114 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_blob_id.hpp +60 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_utils.hpp +412 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/compiler_workarounds.hpp +84 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/convolution_pd.hpp +473 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cpp_compat.hpp +54 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/dnnl_thread.hpp +678 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/dnnl_traits.hpp +151 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/eltwise_pd.hpp +298 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/experimental.hpp +29 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/gemm_pd.hpp +119 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/gemm_types.hpp +161 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/impl_registration.hpp +235 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/internal_defs.hpp +36 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/disable_warnings.h +29 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify.h +0 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_config.h +667 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_static.h +355 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_types.h +65 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/jitprofiling.h +642 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/legacy/ittnotify.h +992 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/kernel_cache.hpp +139 -0
.gitattributes CHANGED
@@ -902,3 +902,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_
902
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so filter=lfs diff=lfs merge=lfs -text
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
904
  videochat2/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
 
 
902
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so filter=lfs diff=lfs merge=lfs -text
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
904
  videochat2/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
905
+ videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da88dc5c45d96c2d1f088cc4d0eba821029954006f09ee74c8aa343caebf28c3
3
+ size 1825945
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl.hpp ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_HPP
18
+ #define DNNL_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl.hpp"
21
+
22
+ #endif /* DNNL_HPP */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_config.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_CONFIG_H
18
+ #define DNNL_CONFIG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_config.h"
21
+
22
+ #endif /* DNNL_CONFIG_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_debug.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_DEBUG_H
18
+ #define DNNL_DEBUG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_debug.h"
21
+
22
+ #endif /* DNNL_DEBUG_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_ocl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_OCL_H
18
+ #define DNNL_OCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_ocl.h"
21
+
22
+ #endif /* DNNL_OCL_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_ocl.hpp ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_OCL_HPP
18
+ #define DNNL_OCL_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl_ocl.hpp"
21
+
22
+ #endif /* DNNL_OCL_HPP */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_sycl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_H
18
+ #define DNNL_SYCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl.h"
21
+
22
+ #endif /* DNNL_SYCL_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_sycl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_TYPES_H
18
+ #define DNNL_SYCL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl_types.h"
21
+
22
+ #endif /* DNNL_SYCL_TYPES_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_threadpool.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_THREADPOOL_H
18
+ #define DNNL_THREADPOOL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_threadpool.h"
21
+
22
+ #endif /* DNNL_THREADPOOL_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_threadpool_iface.hpp ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_THREADPOOL_IFACE_HPP
18
+ #define DNNL_THREADPOOL_IFACE_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl_threadpool_iface.hpp"
21
+
22
+ #endif /* DNNL_THREADPOOL_IFACE_HPP */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_TYPES_H
18
+ #define DNNL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_types.h"
21
+
22
+ #endif /* DNNL_TYPES_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/dnnl_version.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_VERSION_H
18
+ #define DNNL_VERSION_H
19
+
20
+ #include "oneapi/dnnl/dnnl_version.h"
21
+
22
+ #endif /* DNNL_VERSION_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl.hpp ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_common.h ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2022-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ /// @file
18
+ /// C common API
19
+
20
+ #ifndef ONEAPI_DNNL_DNNL_COMMON_H
21
+ #define ONEAPI_DNNL_DNNL_COMMON_H
22
+
23
+ #include "oneapi/dnnl/dnnl_common_types.h"
24
+ #include "oneapi/dnnl/dnnl_config.h"
25
+ #include "oneapi/dnnl/dnnl_version.h"
26
+
27
+ #ifdef __cplusplus
28
+ extern "C" {
29
+ #endif
30
+
31
+ /// @addtogroup dnnl_api oneDNN API
32
+ /// @{
33
+
34
+ /// @addtogroup dnnl_api_common Common API
35
+ /// @{
36
+
37
+ /// @addtogroup dnnl_api_engine Engine
38
+ /// @{
39
+
40
+ /// Returns the number of engines of a particular kind.
41
+ ///
42
+ /// @param kind Kind of engines to count.
43
+ /// @returns Count of the engines.
44
+ size_t DNNL_API dnnl_engine_get_count(dnnl_engine_kind_t kind);
45
+
46
+ /// Creates an engine.
47
+ ///
48
+ /// @param engine Output engine.
49
+ /// @param kind Engine kind.
50
+ /// @param index Engine index that should be between 0 and the count of
51
+ /// engines of the requested kind.
52
+ /// @returns #dnnl_success on success and a status describing the error
53
+ /// otherwise.
54
+ dnnl_status_t DNNL_API dnnl_engine_create(
55
+ dnnl_engine_t *engine, dnnl_engine_kind_t kind, size_t index);
56
+
57
+ /// Returns the kind of an engine.
58
+ ///
59
+ /// @param engine Engine to query.
60
+ /// @param kind Output engine kind.
61
+ /// @returns #dnnl_success on success and a status describing the error
62
+ /// otherwise.
63
+ dnnl_status_t DNNL_API dnnl_engine_get_kind(
64
+ dnnl_engine_t engine, dnnl_engine_kind_t *kind);
65
+
66
+ /// Destroys an engine.
67
+ ///
68
+ /// @param engine Engine to destroy.
69
+ /// @returns #dnnl_success on success and a status describing the error
70
+ /// otherwise.
71
+ dnnl_status_t DNNL_API dnnl_engine_destroy(dnnl_engine_t engine);
72
+
73
+ /// @} dnnl_api_engine
74
+
75
+ /// @addtogroup dnnl_api_stream Stream
76
+ /// @{
77
+
78
+ /// Creates an execution stream.
79
+ ///
80
+ /// @param stream Output execution stream.
81
+ /// @param engine Engine to create the execution stream on.
82
+ /// @param flags Stream behavior flags (@sa dnnl_stream_flags_t).
83
+ /// @returns #dnnl_success on success and a status describing the error
84
+ /// otherwise.
85
+ dnnl_status_t DNNL_API dnnl_stream_create(
86
+ dnnl_stream_t *stream, dnnl_engine_t engine, unsigned flags);
87
+
88
+ /// Returns the engine of a stream object.
89
+ ///
90
+ /// @param stream Stream object.
91
+ /// @param engine Output engine on which the stream is created.
92
+ /// @returns #dnnl_success on success and a status describing the error
93
+ /// otherwise.
94
+ dnnl_status_t DNNL_API dnnl_stream_get_engine(
95
+ const_dnnl_stream_t stream, dnnl_engine_t *engine);
96
+
97
+ /// Waits for all primitives in the execution stream to finish computations.
98
+ ///
99
+ /// @param stream Execution stream.
100
+ /// @returns #dnnl_success on success and a status describing the error
101
+ /// otherwise.
102
+ dnnl_status_t DNNL_API dnnl_stream_wait(dnnl_stream_t stream);
103
+
104
+ /// Destroys an execution stream.
105
+ ///
106
+ /// @param stream Execution stream to destroy.
107
+ /// @returns #dnnl_success on success and a status describing the error
108
+ /// otherwise.
109
+ dnnl_status_t DNNL_API dnnl_stream_destroy(dnnl_stream_t stream);
110
+
111
+ /// @} dnnl_api_stream
112
+
113
+ /// @addtogroup dnnl_api_fpmath_mode Floating-point Math Mode
114
+ /// @{
115
+
116
+ /// Returns the floating-point math mode that will be used by default
117
+ /// for all subsequently created primitives.
118
+ ///
119
+ /// @param mode Output FP math mode.
120
+ /// @returns #dnnl_success on success and a status describing the error
121
+ /// otherwise.
122
+ dnnl_status_t DNNL_API dnnl_get_default_fpmath_mode(dnnl_fpmath_mode_t *mode);
123
+
124
+ /// Sets the floating-point math mode that will be used by default
125
+ /// for all subsequently created primitives.
126
+ ///
127
+ /// @param mode FP math mode. The possible values are:
128
+ /// #dnnl_fpmath_mode_strict,
129
+ /// #dnnl_fpmath_mode_bf16,
130
+ /// #dnnl_fpmath_mode_f16,
131
+ /// #dnnl_fpmath_mode_tf32,
132
+ /// #dnnl_fpmath_mode_any.
133
+ /// @returns #dnnl_success on success and a status describing the error
134
+ /// otherwise.
135
+ dnnl_status_t DNNL_API dnnl_set_default_fpmath_mode(dnnl_fpmath_mode_t mode);
136
+
137
+ /// @} dnnl_api_fpmath_mode
138
+
139
+ /// @addtogroup dnnl_api_service
140
+ /// @{
141
+
142
+ /// Configures verbose output to stdout.
143
+ ///
144
+ /// @note
145
+ /// Enabling verbose output affects performance.
146
+ /// This setting overrides the ONEDNN_VERBOSE environment variable.
147
+ ///
148
+ /// @param level Verbosity level:
149
+ /// - 0: no verbose output (default),
150
+ /// - 1: primitive and graph information at execution,
151
+ /// - 2: primitive and graph information at creation/compilation and execution.
152
+ /// @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the
153
+ /// @p level value is invalid, and #dnnl_success/#dnnl::status::success on
154
+ /// success.
155
+ dnnl_status_t DNNL_API dnnl_set_verbose(int level);
156
+
157
+ /// Returns library version information.
158
+ /// @returns Pointer to a constant structure containing
159
+ /// - major: major version number,
160
+ /// - minor: minor version number,
161
+ /// - patch: patch release number,
162
+ /// - hash: git commit hash.
163
+ const dnnl_version_t DNNL_API *dnnl_version(void);
164
+
165
+ /// @} dnnl_api_service
166
+
167
+ /// @} dnnl_api_common
168
+
169
+ /// @} dnnl_api
170
+
171
+ #ifdef __cplusplus
172
+ }
173
+ #endif
174
+
175
+ #endif /* ONEAPI_DNNL_DNNL_COMMON_H */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_common_types.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2022-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ /// @file
18
+ /// C API common types definitions
19
+
20
+ #ifndef ONEAPI_DNNL_DNNL_COMMON_TYPES_H
21
+ #define ONEAPI_DNNL_DNNL_COMMON_TYPES_H
22
+
23
+ #ifdef __cplusplus
24
+ extern "C" {
25
+ #endif
26
+
27
+ /// @cond DO_NOT_DOCUMENT_THIS
28
+ #include <stddef.h>
29
+ #include <stdint.h>
30
+
31
+ #include "oneapi/dnnl/dnnl_config.h"
32
+
33
+ /// @endcond
34
+
35
+ /// @addtogroup dnnl_api oneDNN API
36
+ /// @{
37
+
38
+ /// @addtogroup dnnl_api_common Common API
39
+ /// @{
40
+
41
+ /// @addtogroup dnnl_api_utils
42
+ /// @{
43
+
44
+ /// Status values returned by the library functions.
45
+ typedef enum {
46
+ /// The operation was successful
47
+ dnnl_success = 0,
48
+ /// The operation failed due to an out-of-memory condition
49
+ dnnl_out_of_memory = 1,
50
+ /// The operation failed because of incorrect function arguments
51
+ dnnl_invalid_arguments = 2,
52
+ /// The operation failed because requested functionality is not implemented
53
+ dnnl_unimplemented = 3,
54
+ /// The last available implementation is reached
55
+ dnnl_last_impl_reached = 4,
56
+ /// Primitive or engine failed on execution
57
+ dnnl_runtime_error = 5,
58
+ /// Queried element is not required for given primitive
59
+ dnnl_not_required = 6,
60
+ /// The graph is not legitimate
61
+ dnnl_invalid_graph = 7,
62
+ /// The operation is not legitimate according to op schema
63
+ dnnl_invalid_graph_op = 8,
64
+ /// The shape cannot be inferred or compiled
65
+ dnnl_invalid_shape = 9,
66
+ /// The data type cannot be inferred or compiled
67
+ dnnl_invalid_data_type = 10,
68
+ } dnnl_status_t;
69
+
70
+ /// @} dnnl_api_utils
71
+
72
+ /// @addtogroup dnnl_api_data_types Data types
73
+ /// @{
74
+
75
+ /// Data type specification
76
+ typedef enum {
77
+ /// Undefined data type, used for empty memory descriptors.
78
+ dnnl_data_type_undef = 0,
79
+ /// 16-bit/half-precision floating point.
80
+ dnnl_f16 = 1,
81
+ /// non-standard 16-bit (bfloat16 w/ 7 bit mantissa) floating point.
82
+ dnnl_bf16 = 2,
83
+ /// 32-bit/single-precision floating point.
84
+ dnnl_f32 = 3,
85
+ /// 32-bit signed integer.
86
+ dnnl_s32 = 4,
87
+ /// 8-bit signed integer.
88
+ dnnl_s8 = 5,
89
+ /// 8-bit unsigned integer.
90
+ dnnl_u8 = 6,
91
+ /// 64-bit/double-precision floating point.
92
+ dnnl_f64 = 7,
93
+ /// Boolean data type. Size is C++ implementation defined.
94
+ dnnl_boolean = 8,
95
+
96
+ /// Parameter to allow internal only data_types without undefined behavior.
97
+ /// This parameter is chosen to be valid for so long as sizeof(int) >= 2.
98
+ dnnl_data_type_max = 0x7fff,
99
+ } dnnl_data_type_t;
100
+
101
+ /// Maximum number of dimensions a tensor can have. Only restricts the amount
102
+ /// of space used for the tensor description. Individual computational
103
+ /// primitives may support only tensors of certain dimensions.
104
+ #define DNNL_MAX_NDIMS 12
105
+
106
+ /// A type to describe tensor dimension.
107
+ typedef int64_t dnnl_dim_t;
108
+
109
+ /// A type to describe tensor dimensions.
110
+ typedef dnnl_dim_t dnnl_dims_t[DNNL_MAX_NDIMS];
111
+
112
+ /// @} dnnl_api_data_types
113
+
114
+ /// @addtogroup dnnl_api_fpmath_mode Floating-point Math Mode
115
+ /// @{
116
+
117
+ /// Floating-point math mode
118
+ typedef enum {
119
+ /// Default behavior, no downconversions allowed
120
+ dnnl_fpmath_mode_strict,
121
+ /// Implicit f32->bf16 conversions allowed
122
+ dnnl_fpmath_mode_bf16,
123
+ /// Implicit f32->f16 conversions allowed
124
+ dnnl_fpmath_mode_f16,
125
+ /// Implicit f32->f16, f32->tf32 or f32->bf16 conversions allowed
126
+ dnnl_fpmath_mode_any,
127
+ /// Implicit f32->tf32 conversions allowed
128
+ dnnl_fpmath_mode_tf32,
129
+ } dnnl_fpmath_mode_t;
130
+
131
+ /// @} dnnl_api_fpmath_mode
132
+
133
+ /// @addtogroup dnnl_api_engine Engine
134
+ /// @{
135
+
136
+ /// @brief Kinds of engines.
137
+ typedef enum {
138
+ /// An unspecified engine.
139
+ dnnl_any_engine,
140
+ /// CPU engine.
141
+ dnnl_cpu,
142
+ /// GPU engine.
143
+ dnnl_gpu,
144
+ } dnnl_engine_kind_t;
145
+
146
+ /// @struct dnnl_engine
147
+ /// @brief An opaque structure to describe an engine.
148
+ struct dnnl_engine;
149
+ /// @brief An engine handle.
150
+ typedef struct dnnl_engine *dnnl_engine_t;
151
+ #if 0
152
+ // FIXME: looks like this never happens
153
+ /// @brief A constant engine handle.
154
+ typedef const struct dnnl_engine *const_dnnl_engine_t;
155
+ #endif
156
+
157
+ /// @} dnnl_api_engine
158
+
159
+ /// @addtogroup dnnl_api_stream Stream
160
+ /// @{
161
+
162
+ /// @brief Stream flags.
163
+ typedef enum {
164
+ // In-order execution.
165
+ dnnl_stream_in_order = 0x1U,
166
+ /// Out-of-order execution.
167
+ dnnl_stream_out_of_order = 0x2U,
168
+ /// Default stream configuration.
169
+ dnnl_stream_default_flags = dnnl_stream_in_order,
170
+ #ifdef DNNL_EXPERIMENTAL_PROFILING
171
+ /// Enables profiling capabilities.
172
+ dnnl_stream_profiling = 0x4U,
173
+ #endif
174
+ } dnnl_stream_flags_t;
175
+
176
+ /// @struct dnnl_stream
177
+ /// An opaque structure to describe an execution stream.
178
+ struct dnnl_stream;
179
+ /// An execution stream handle.
180
+ typedef struct dnnl_stream *dnnl_stream_t;
181
+ /// A constant execution stream handle.
182
+ typedef const struct dnnl_stream *const_dnnl_stream_t;
183
+
184
+ /// @} dnnl_api_stream
185
+
186
+ /// @addtogroup dnnl_api_service
187
+ /// @{
188
+
189
+ /// Structure containing version information as per [Semantic
190
+ /// Versioning](https://semver.org)
191
+ typedef struct {
192
+ int major; ///< Major version
193
+ int minor; ///< Minor version
194
+ int patch; ///< Patch version
195
+ const char *hash; ///< Git hash of the sources (may be absent)
196
+ unsigned cpu_runtime; ///< CPU runtime
197
+ unsigned gpu_runtime; ///< GPU runtime
198
+ } dnnl_version_t;
199
+
200
+ /// @} dnnl_api_service
201
+
202
+ /// @} dnnl_api_common
203
+
204
+ /// @} dnnl_api
205
+
206
+ #ifdef __cplusplus
207
+ }
208
+ #endif
209
+
210
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_config.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_CONFIG_H
18
+ #define ONEAPI_DNNL_DNNL_CONFIG_H
19
+
20
+ /// @cond DO_NOT_DOCUMENT_THIS
21
+
22
+ // All symbols shall be internal unless marked as DNNL_API
23
+ #if defined _WIN32 || defined __CYGWIN__
24
+ #define DNNL_HELPER_DLL_IMPORT __declspec(dllimport)
25
+ #define DNNL_HELPER_DLL_EXPORT __declspec(dllexport)
26
+ #else
27
+ #if __GNUC__ >= 4
28
+ #define DNNL_HELPER_DLL_IMPORT __attribute__((visibility("default")))
29
+ #define DNNL_HELPER_DLL_EXPORT __attribute__((visibility("default")))
30
+ #else
31
+ #define DNNL_HELPER_DLL_IMPORT
32
+ #define DNNL_HELPER_DLL_EXPORT
33
+ #endif
34
+ #endif
35
+
36
+ #ifdef DNNL_DLL
37
+ #ifdef DNNL_DLL_EXPORTS
38
+ #define DNNL_API DNNL_HELPER_DLL_EXPORT
39
+ #else
40
+ #define DNNL_API DNNL_HELPER_DLL_IMPORT
41
+ #endif
42
+ #else
43
+ #define DNNL_API
44
+ #endif
45
+
46
+ #if defined(__GNUC__)
47
+ #define DNNL_DEPRECATED __attribute__((deprecated))
48
+ #elif defined(_MSC_VER)
49
+ #define DNNL_DEPRECATED __declspec(deprecated)
50
+ #else
51
+ #define DNNL_DEPRECATED
52
+ #endif
53
+
54
+ /// @endcond
55
+
56
+ // clang-format off
57
+
58
+ /// @addtogroup dnnl_api_service
59
+ /// @{
60
+
61
+ /// No runtime (disabled)
62
+ #define DNNL_RUNTIME_NONE 0u
63
+
64
+ /// Sequential runtime (CPU only)
65
+ #define DNNL_RUNTIME_SEQ 1u
66
+
67
+ /// OpenMP runtime (CPU only)
68
+ #define DNNL_RUNTIME_OMP 2u
69
+
70
+ /// TBB runtime (CPU only)
71
+ #define DNNL_RUNTIME_TBB 4u
72
+
73
+ /// Threadpool runtime (CPU only)
74
+ #define DNNL_RUNTIME_THREADPOOL 8u
75
+
76
+ /// OpenCL runtime
77
+ #define DNNL_RUNTIME_OCL 256u
78
+
79
+ /// SYCL runtime
80
+ #define DNNL_RUNTIME_SYCL 512u
81
+
82
+ /// DPC++ runtime
83
+ #define DNNL_RUNTIME_DPCPP DNNL_RUNTIME_SYCL
84
+
85
+ /// @} dnnl_api_service
86
+
87
+ // oneDNN CPU threading runtime
88
+ #define DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_THREADPOOL
89
+
90
+ // oneDNN CPU engine runtime
91
+ #define DNNL_CPU_RUNTIME DNNL_RUNTIME_THREADPOOL
92
+
93
+ // oneDNN GPU engine runtime
94
+ #define DNNL_GPU_RUNTIME DNNL_RUNTIME_NONE
95
+
96
+ // clang-format on
97
+
98
+ #if defined(DNNL_CPU_RUNTIME) && defined(DNNL_GPU_RUNTIME)
99
+ #if (DNNL_CPU_RUNTIME == DNNL_RUNTIME_OCL)
100
+ #error "Unexpected DNNL_CPU_RUNTIME"
101
+ #endif
102
+ #if (DNNL_GPU_RUNTIME != DNNL_RUNTIME_NONE) \
103
+ && (DNNL_GPU_RUNTIME != DNNL_RUNTIME_OCL) \
104
+ && (DNNL_GPU_RUNTIME != DNNL_RUNTIME_SYCL)
105
+ #error "Unexpected DNNL_GPU_RUNTIME"
106
+ #endif
107
+ #if (DNNL_CPU_RUNTIME == DNNL_RUNTIME_NONE \
108
+ && DNNL_GPU_RUNTIME == DNNL_RUNTIME_NONE)
109
+ #error "At least one runtime must be specified"
110
+ #endif
111
+ #else
112
+ #error "BOTH DNNL_CPU_RUNTIME and DNNL_GPU_RUNTIME must be defined"
113
+ #endif
114
+
115
+ // For SYCL CPU, a primitive may be created and executed in different threads
116
+ // hence the global scratchpad does not work. This enables concurrent execution
117
+ // when CPU runtime is SYCL to avoid the issue.
118
+ #if DNNL_CPU_RUNTIME == DNNL_RUNTIME_SYCL
119
+ #ifndef DNNL_ENABLE_CONCURRENT_EXEC
120
+ #define DNNL_ENABLE_CONCURRENT_EXEC
121
+ #endif
122
+ #endif
123
+
124
+ // When defined, primitive cache stores runtime objects.
125
+ #undef DNNL_USE_RT_OBJECTS_IN_PRIMITIVE_CACHE
126
+
127
+ // When defined, DPCPP is supported.
128
+ #undef DNNL_WITH_SYCL
129
+
130
+ // When defined, Level Zero is supported.
131
+ #undef DNNL_WITH_LEVEL_ZERO
132
+
133
+ // When defined, SYCL CUDA backend is used.
134
+ #undef DNNL_SYCL_CUDA
135
+
136
+ // When defined, SYCL HIP backend is used.
137
+ #undef DNNL_SYCL_HIP
138
+
139
+ // When defined, stack checker is enabled.
140
+ #undef DNNL_ENABLE_STACK_CHECKER
141
+
142
+ // When defined, experimental features are enabled.
143
+ #undef DNNL_EXPERIMENTAL
144
+
145
+ // When defined, experimental functionality for sparse domain is enabled.
146
+ #undef DNNL_EXPERIMENTAL_SPARSE
147
+
148
+ // When defined, graph component is enabled.
149
+ #undef ONEDNN_BUILD_GRAPH
150
+
151
+ // When defined, experimental profiling capabilities are enabled.
152
+ #undef DNNL_EXPERIMENTAL_PROFILING
153
+
154
+ // List of configurating build controls
155
+ // Workload controls
156
+ #define BUILD_TRAINING 1
157
+ #define BUILD_INFERENCE 0
158
+ // Primitive controls
159
+ #define BUILD_PRIMITIVE_ALL 1
160
+ #define BUILD_BATCH_NORMALIZATION 0
161
+ #define BUILD_BINARY 0
162
+ #define BUILD_CONCAT 0
163
+ #define BUILD_CONVOLUTION 0
164
+ #define BUILD_DECONVOLUTION 0
165
+ #define BUILD_ELTWISE 0
166
+ #define BUILD_GROUP_NORMALIZATION 1
167
+ #define BUILD_INNER_PRODUCT 0
168
+ #define BUILD_LAYER_NORMALIZATION 0
169
+ #define BUILD_LRN 0
170
+ #define BUILD_MATMUL 0
171
+ #define BUILD_POOLING 0
172
+ #define BUILD_PRELU 0
173
+ #define BUILD_REDUCTION 0
174
+ #define BUILD_REORDER 0
175
+ #define BUILD_RESAMPLING 0
176
+ #define BUILD_RNN 0
177
+ #define BUILD_SHUFFLE 0
178
+ #define BUILD_SOFTMAX 0
179
+ #define BUILD_SUM 0
180
+ // Primitives CPU ISA controls
181
+ #define BUILD_PRIMITIVE_CPU_ISA_ALL 1
182
+ #define BUILD_SSE41 0
183
+ #define BUILD_AVX2 0
184
+ #define BUILD_AVX512 0
185
+ #define BUILD_AMX 0
186
+ // Primitives GPU ISA controls
187
+ #define BUILD_PRIMITIVE_GPU_ISA_ALL 0
188
+ #define BUILD_GEN9 0
189
+ #define BUILD_GEN11 0
190
+ #define BUILD_XELP 0
191
+ #define BUILD_XEHP 0
192
+ #define BUILD_XEHPG 0
193
+ #define BUILD_XEHPC 0
194
+ // GeMM kernels ISA controls
195
+ #define BUILD_GEMM_KERNELS_ALL 1
196
+ #define BUILD_GEMM_KERNELS_NONE 0
197
+ #define BUILD_GEMM_SSE41 1
198
+ #define BUILD_GEMM_AVX2 1
199
+ #define BUILD_GEMM_AVX512 1
200
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_graph.hpp ADDED
@@ -0,0 +1,1510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_GRAPH_HPP
18
+ #define ONEAPI_DNNL_DNNL_GRAPH_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl_common.hpp"
21
+ #include "oneapi/dnnl/dnnl_graph.h"
22
+
23
+ #include <limits>
24
+ #include <memory>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ /// @addtogroup dnnl_api
30
+ /// @{
31
+
32
+ /// @addtogroup dnnl_graph_api Graph API
33
+ /// @{
34
+
35
+ namespace dnnl {
36
+ namespace graph {
37
+
38
+ /// @cond DO_NOT_DOCUMENT_THIS
39
+
40
+ // Alias for common engine and stream API.
41
+ using engine = dnnl::engine;
42
+ using stream = dnnl::stream;
43
+ using fpmath_mode = dnnl::fpmath_mode;
44
+
45
+ /// @endcond
46
+
47
+ /// @addtogroup dnnl_graph_api_utils Utilities
48
+ /// Utility types and definitions
49
+ /// @{
50
+
51
+ /// @cond DO_NOT_DOCUMENT_THIS
52
+
53
+ /// A class that provides the destructor for a oneDNN graph C API handle.
54
+ template <typename T>
55
+ struct graph_handle_traits : public dnnl::handle_traits<T> {};
56
+
57
+ template <>
58
+ struct graph_handle_traits<dnnl_graph_op_t> {
59
+ static dnnl_status_t destructor(dnnl_graph_op_t p) {
60
+ return dnnl_graph_op_destroy(p);
61
+ }
62
+ };
63
+
64
+ template <>
65
+ struct graph_handle_traits<dnnl_graph_graph_t> {
66
+ static dnnl_status_t destructor(dnnl_graph_graph_t p) {
67
+ return dnnl_graph_graph_destroy(p);
68
+ }
69
+ };
70
+
71
+ template <>
72
+ struct graph_handle_traits<dnnl_graph_tensor_t> {
73
+ static dnnl_status_t destructor(dnnl_graph_tensor_t p) {
74
+ return dnnl_graph_tensor_destroy(p);
75
+ }
76
+ };
77
+
78
+ template <>
79
+ struct graph_handle_traits<dnnl_graph_partition_t> {
80
+ static dnnl_status_t destructor(dnnl_graph_partition_t p) {
81
+ return dnnl_graph_partition_destroy(p);
82
+ }
83
+ };
84
+
85
+ template <>
86
+ struct graph_handle_traits<dnnl_graph_compiled_partition_t> {
87
+ static dnnl_status_t destructor(dnnl_graph_compiled_partition_t p) {
88
+ return dnnl_graph_compiled_partition_destroy(p);
89
+ }
90
+ };
91
+
92
+ template <>
93
+ struct graph_handle_traits<dnnl_graph_allocator_t> {
94
+ static dnnl_status_t destructor(dnnl_graph_allocator_t p) {
95
+ return dnnl_graph_allocator_destroy(p);
96
+ }
97
+ };
98
+
99
+ #define DNNL_GRAPH_HANDLE_ALIAS(type) \
100
+ using type##_handle = dnnl::handle<dnnl_graph_##type##_t, \
101
+ graph_handle_traits<dnnl_graph_##type##_t>>
102
+
103
+ DNNL_GRAPH_HANDLE_ALIAS(allocator);
104
+ DNNL_GRAPH_HANDLE_ALIAS(graph);
105
+ DNNL_GRAPH_HANDLE_ALIAS(op);
106
+ DNNL_GRAPH_HANDLE_ALIAS(tensor);
107
+ DNNL_GRAPH_HANDLE_ALIAS(compiled_partition);
108
+ DNNL_GRAPH_HANDLE_ALIAS(partition);
109
+
110
+ #undef DNNL_GRAPH_HANDLE_ALIAS
111
+
112
+ template <bool B>
113
+ using req = typename std::enable_if<B, bool>::type;
114
+
115
+ /// @endcond
116
+
117
+ /// @} dnnl_graph_api_utils
118
+
119
+ /// @addtogroup dnnl_graph_api_status Status
120
+ /// Definitions of status values returned by the library functions.
121
+ ///
122
+ /// @{
123
+
124
+ /// Status values returned by the library functions.
125
+ enum class status {
126
+ /// The operation was successful
127
+ success = dnnl_success,
128
+ /// The operation failed due to an out-of-memory condition
129
+ out_of_memory = dnnl_out_of_memory,
130
+ /// The operation failed because of incorrect function arguments
131
+ invalid_arguments = dnnl_invalid_arguments,
132
+ /// The operation failed because requested functionality is not implemented
133
+ unimplemented = dnnl_unimplemented,
134
+ /// The last available implementation is reached
135
+ last_impl_reached = dnnl_last_impl_reached,
136
+ /// Primitive or engine failed on execution
137
+ runtime_error = dnnl_runtime_error,
138
+ /// Queried element is not required for given primitive
139
+ not_required = dnnl_not_required,
140
+ /// The graph is not legitimate
141
+ invalid_graph = dnnl_invalid_graph,
142
+ /// The operation is not legitimate according to op schema
143
+ invalid_graph_op = dnnl_invalid_graph_op,
144
+ /// The shape cannot be inferred or compiled
145
+ invalid_shape = dnnl_invalid_shape,
146
+ /// The data type cannot be inferred or compiled
147
+ invalid_data_type = dnnl_invalid_data_type,
148
+ };
149
+
150
+ /// @} dnnl_api_status
151
+
152
+ /// @addtogroup dnnl_graph_api_allocator Allocator
153
+ ///
154
+ /// Definitions of allocator which is used to acquire memory resources in
155
+ /// partition compilation and execution. SYCL allocator
156
+ /// (#dnnl::graph::sycl_interop::make_allocator) should be used for SYCL runtime
157
+ /// and host allocator should be used for non-SYCL.
158
+ ///
159
+ /// @{
160
+
161
+ /// Allocator
162
+ class allocator : public allocator_handle {
163
+ public:
164
+ using allocator_handle::handle;
165
+
166
+ /// Constructs an allocator according to given function pointers
167
+ ///
168
+ /// @param host_malloc A pointer to malloc function for CPU
169
+ /// @param host_free A pointer to free function for CPU
170
+ allocator(dnnl_graph_host_allocate_f host_malloc,
171
+ dnnl_graph_host_deallocate_f host_free) {
172
+ dnnl_graph_allocator_t a = nullptr;
173
+ error::wrap_c_api(
174
+ dnnl_graph_allocator_create(&a, host_malloc, host_free),
175
+ "could not create allocator for cpu");
176
+ reset(a);
177
+ }
178
+
179
+ /// Default constructor
180
+ allocator() {
181
+ dnnl_graph_allocator_t a = nullptr;
182
+ error::wrap_c_api(dnnl_graph_allocator_create(&a, nullptr, nullptr),
183
+ "could not create allocator");
184
+ reset(a);
185
+ }
186
+ };
187
+
188
+ /// @} dnnl_graph_api_allocator
189
+
190
+ /// @addtogroup dnnl_graph_api_engine Engine
191
+ /// @{
192
+
193
+ /// This API is a supplement for existing onednn engine API.
194
+ inline engine make_engine_with_allocator(
195
+ engine::kind kind, size_t index, const allocator &alloc) {
196
+ dnnl_engine_t c_engine;
197
+ error::wrap_c_api(
198
+ dnnl_graph_make_engine_with_allocator(&c_engine,
199
+ static_cast<dnnl_engine_kind_t>(kind), index, alloc.get()),
200
+ "could not make an engine with allocator");
201
+ return engine(c_engine);
202
+ }
203
+
204
+ /// @} dnnl_graph_api_engine
205
+
206
+ /// @addtogroup dnnl_graph_api_logical_tensor Logical Tensor
207
+ ///
208
+ /// Logical tensor describes the meta-data of the input or output tensor, like
209
+ /// elements data type, number of dimensions, size for each dimension (shape),
210
+ /// layout, and the property of the tensor.
211
+ ///
212
+ /// Each logical tensor has an unique ID. The library uses logical tensor IDs to
213
+ /// build up the connections between operations if the output of one operation
214
+ /// has the same ID as the input of another operation. The meta-data in a
215
+ /// logical tensor may be enriched in the framework graph as it progresses
216
+ /// toward final execution. For example, the library doesn't require detailed
217
+ /// shape information at the operation and graph creation stage. But shape
218
+ /// information of input logical tensor will be required at partition
219
+ /// compilation stage. Logical tensor is not mutable. Users must create a new
220
+ /// logical tensor with the same ID to pass any new additional information to
221
+ /// oneDNN Graph API. Please note that the library also has unique IDs for
222
+ /// operations. The ID should be unique among different logical tensors, but it
223
+ /// can have the same value between a logical tensor and an operation.
224
+ ///
225
+ /// @{
226
+
227
+ /// Logical tensor object
228
+ class logical_tensor {
229
+ friend class op;
230
+ friend class tensor;
231
+ friend class partition;
232
+ friend class compiled_partition;
233
+
234
+ dnnl_graph_logical_tensor_t data;
235
+
236
+ public:
237
+ /// Integer type for representing dimension sizes and indices.
238
+ using dim = dnnl_dim_t;
239
+ /// Vector of dimensions. Implementations are free to force a limit on the
240
+ /// vector's length.
241
+ using dims = std::vector<dim>;
242
+
243
+ /// Data Type
244
+ enum class data_type {
245
+ undef = dnnl_data_type_undef,
246
+ /// 16-bit/half-precision floating point.
247
+ f16 = dnnl_f16,
248
+ /// non-standard 16-bit (bfloat16 w/ 7 bit mantissa) floating point.
249
+ bf16 = dnnl_bf16,
250
+ /// 32-bit/single-precision floating point.
251
+ f32 = dnnl_f32,
252
+ /// 32-bit signed integer.
253
+ s32 = dnnl_s32,
254
+ /// 8-bit signed integer.
255
+ s8 = dnnl_s8,
256
+ /// 8-bit unsigned integer.
257
+ u8 = dnnl_u8,
258
+ /// Boolean data type. Size is C++ implementation defined.
259
+ boolean = dnnl_boolean,
260
+ };
261
+
262
+ /// Layout type
263
+ enum class layout_type {
264
+ /// Undefined layout type.
265
+ undef = dnnl_graph_layout_type_undef,
266
+ /// Any means to let the library to decide the layout for a tensor
267
+ /// during partition compilation.
268
+ any = dnnl_graph_layout_type_any,
269
+ /// Strided means that the layout of a tensor is determined by the
270
+ /// strides field in the logical tensor.
271
+ strided = dnnl_graph_layout_type_strided,
272
+ /// Opaque means that the layout of a tensor is the library specific.
273
+ /// Usually, an opaque layout is generated by a partition which is
274
+ /// compiled with layout type any.
275
+ opaque = dnnl_graph_layout_type_opaque,
276
+ };
277
+
278
+ /// Tensor property
279
+ enum class property_type {
280
+ /// Undefined tensor property.
281
+ undef = dnnl_graph_tensor_property_undef,
282
+ /// Variable means the tensor may be changed during computation or
283
+ /// between different iterations.
284
+ variable = dnnl_graph_tensor_property_variable,
285
+ /// Constant means the tensor will keep unchanged during computation and
286
+ /// between different iterations. It's useful for the library to apply
287
+ /// optimizations for constant tensors or cache constant tensors inside
288
+ /// the library. For example, constant weight tensors in inference
289
+ /// scenarios.
290
+ constant = dnnl_graph_tensor_property_constant,
291
+ };
292
+
293
+ /// default constructor
294
+ /// construct an empty object
295
+ logical_tensor() = default;
296
+
297
+ /// Constructs a logical tensor object
298
+ explicit logical_tensor(const dnnl_graph_logical_tensor_t &c_data)
299
+ : data(c_data) {}
300
+
301
+ /// Copy
302
+ logical_tensor(const logical_tensor &other) = default;
303
+
304
+ /// Assign
305
+ logical_tensor &operator=(const logical_tensor &other) = default;
306
+
307
+ /// Constructs a logical tensor object with ID, data type, ndims, layout
308
+ /// type, and property type.
309
+ ///
310
+ /// @param tid Logical tensor ID.
311
+ /// @param dtype Elements data type.
312
+ /// @param ndims Number of dimensions. -1 means unknown (see
313
+ /// #DNNL_GRAPH_UNKNOWN_NDIMS) and 0 means a scalar tensor.
314
+ /// @param ltype Layout type.
315
+ /// @param ptype Property type.
316
+ logical_tensor(size_t tid, data_type dtype, int32_t ndims,
317
+ layout_type ltype, property_type ptype = property_type::undef) {
318
+ dnnl_graph_logical_tensor_t val;
319
+ error::wrap_c_api(
320
+ dnnl_graph_logical_tensor_init(&val, tid, convert_to_c(dtype),
321
+ ndims, convert_to_c(ltype), convert_to_c(ptype)),
322
+ "could not create logical_tensor with property");
323
+ data = val;
324
+ }
325
+
326
+ /// Delegated constructor.
327
+ ///
328
+ /// @param tid Logical tensor ID.
329
+ /// @param dtype Elements data type.
330
+ /// @param ltype Layout type.
331
+ logical_tensor(
332
+ size_t tid, data_type dtype, layout_type ltype = layout_type::undef)
333
+ : logical_tensor(tid, dtype, DNNL_GRAPH_UNKNOWN_NDIMS, ltype) {}
334
+
335
+ /// Constructs a logical tensor object with basic information and detailed
336
+ /// dims.
337
+ ///
338
+ /// @param tid Logical tensor ID.
339
+ /// @param dtype Elements data type.
340
+ /// @param adims Logical tensor dimensions. #DNNL_GRAPH_UNKNOWN_DIM means
341
+ /// the size of that dimension is unknown. 0 is used to define
342
+ /// zero-dimension tensor.
343
+ /// @param ltype Layout type. If it's strided, the strides field in the
344
+ /// output logical tensor will be deduced accordingly.
345
+ /// @param ptype Property type.
346
+ logical_tensor(size_t tid, data_type dtype, const dims &adims,
347
+ layout_type ltype, property_type ptype = property_type::undef) {
348
+ dnnl_graph_logical_tensor_t val;
349
+ // if dimension size equals to 0, it's a scalar
350
+ if (adims.size() == 0)
351
+ error::wrap_c_api(dnnl_graph_logical_tensor_init(&val, tid,
352
+ convert_to_c(dtype), 0,
353
+ convert_to_c(ltype), convert_to_c(ptype)),
354
+ "could not create logical_tensor with property");
355
+ else
356
+ error::wrap_c_api(
357
+ dnnl_graph_logical_tensor_init_with_dims(&val, tid,
358
+ convert_to_c(dtype),
359
+ static_cast<int32_t>(adims.size()), adims.data(),
360
+ convert_to_c(ltype), convert_to_c(ptype)),
361
+ "could not create logical_tensor with dims and property");
362
+ data = val;
363
+ }
364
+
365
+ /// Constructs a logical tensor object with detailed dims and strides. The
366
+ /// layout_type of the output logical tensor object will always be strided.
367
+ ///
368
+ /// @param tid Logical tensor ID.
369
+ /// @param dtype Elements data type.
370
+ /// @param adims Logical tensor dimensions. #DNNL_GRAPH_UNKNOWN_DIM means
371
+ /// the size of that dimension is unknown. 0 is used to define
372
+ /// zero-dimension tensor.
373
+ /// @param strides Logical tensor strides. #DNNL_GRAPH_UNKNOWN_DIM means
374
+ /// the stride of the dimension is unknown. The library currently
375
+ /// doesn't support other negative stride values.
376
+ /// @param ptype Property type.
377
+ logical_tensor(size_t tid, data_type dtype, const dims &adims,
378
+ const dims &strides, property_type ptype = property_type::undef) {
379
+ dnnl_graph_logical_tensor_t val;
380
+ // TODO(lvtao): check the size of adims and strides.
381
+ // They should be same.
382
+ error::wrap_c_api(
383
+ dnnl_graph_logical_tensor_init_with_strides(&val, tid,
384
+ convert_to_c(dtype), static_cast<int32_t>(adims.size()),
385
+ adims.data(), strides.data(), convert_to_c(ptype)),
386
+ "could not create logical_tensor with strides and property");
387
+ data = val;
388
+ }
389
+
390
+ /// Constructs a logical tensor object with detailed dims and an opaque
391
+ /// layout ID. layout_type of the output logical tensor object will always
392
+ /// be opaque.
393
+ ///
394
+ /// @param tid Logical tensor ID.
395
+ /// @param dtype Elements data type.
396
+ /// @param adims Logical tensor dimensions. #DNNL_GRAPH_UNKNOWN_DIM means
397
+ /// the size of that dimension is unknown. 0 is used to define
398
+ /// zero-dimension tensor.
399
+ /// @param lid Opaque layout id.
400
+ /// @param ptype Property type
401
+ logical_tensor(size_t tid, data_type dtype, const dims &adims, size_t lid,
402
+ property_type ptype = property_type::undef) {
403
+ dnnl_graph_logical_tensor_t val;
404
+
405
+ if (adims.size() == 0) {
406
+ error::wrap_c_api(dnnl_graph_logical_tensor_init(&val, tid,
407
+ convert_to_c(dtype), 0,
408
+ convert_to_c(layout_type::opaque),
409
+ convert_to_c(ptype)),
410
+ "could not create logical_tensor");
411
+ } else {
412
+ error::wrap_c_api(
413
+ dnnl_graph_logical_tensor_init_with_dims(&val, tid,
414
+ convert_to_c(dtype),
415
+ static_cast<int32_t>(adims.size()), adims.data(),
416
+ convert_to_c(layout_type::opaque),
417
+ convert_to_c(ptype)),
418
+ "could not create logical_tensor with dims");
419
+ }
420
+
421
+ val.layout.layout_id = lid;
422
+ data = val;
423
+ }
424
+
425
+ /// Returns dimensions of a logical tensor.
426
+ ///
427
+ /// @returns A vector describing the size of each dimension.
428
+ dims get_dims() const {
429
+ if (data.ndims < 0) {
430
+ error::wrap_c_api(dnnl_invalid_arguments,
431
+ "cannot return dims when ndims < 0");
432
+ }
433
+
434
+ return {data.dims, data.dims + data.ndims};
435
+ }
436
+
437
+ /// Returns the unique id of a logical tensor.
438
+ ///
439
+ /// @returns An integer value describing the ID.
440
+ size_t get_id() const { return data.id; }
441
+
442
+ /// Returns the data type of a logical tensor.
443
+ ///
444
+ /// @returns The data type.
445
+ data_type get_data_type() const {
446
+ return static_cast<data_type>(data.data_type);
447
+ }
448
+
449
+ /// Returns the property type of a logical tensor.
450
+ ///
451
+ /// @returns The property type.
452
+ property_type get_property_type() const {
453
+ return static_cast<property_type>(data.property);
454
+ }
455
+
456
+ /// Returns the layout type of a logical tensor.
457
+ ///
458
+ /// @returns The layout type.
459
+ layout_type get_layout_type() const {
460
+ return static_cast<layout_type>(data.layout_type);
461
+ }
462
+
463
+ /// Returns the layout ID of a logical tensor. The API should be called on a
464
+ /// logical tensor with opaque layout type. Otherwise, an exception will be
465
+ /// raised.
466
+ ///
467
+ /// @returns Layout ID.
468
+ size_t get_layout_id() const {
469
+ if (get_layout_type() != layout_type::opaque) {
470
+ error::wrap_c_api(
471
+ dnnl_invalid_arguments, "layout type should be opaque");
472
+ }
473
+
474
+ return data.layout.layout_id;
475
+ }
476
+
477
+ /// Returns the strides of a logical tensor. The API should be called on a
478
+ /// logical tensor with strided layout type. Otherwise, an exception will be
479
+ /// raised.
480
+ ///
481
+ /// @returns A vector describing the stride size of each dimension.
482
+ dims get_strides() const {
483
+ if (get_layout_type() != layout_type::strided) {
484
+ error::wrap_c_api(
485
+ dnnl_invalid_arguments, "layout type should be strided");
486
+ }
487
+
488
+ if (data.ndims < 0) {
489
+ error::wrap_c_api(dnnl_invalid_arguments,
490
+ "cannot return strides when ndims < 0");
491
+ }
492
+
493
+ return {data.layout.strides, data.layout.strides + data.ndims};
494
+ }
495
+
496
+ /// Returns memory size in bytes required by this logical tensor.
497
+ ///
498
+ /// @returns The memory size in bytes.
499
+ size_t get_mem_size() const {
500
+ size_t size = 0;
501
+ error::wrap_c_api(dnnl_graph_logical_tensor_get_mem_size(&data, &size),
502
+ "could not get memory size from the logical_tensor");
503
+ return size;
504
+ }
505
+
506
+ /// Compares if two logical tenors are equal. Users can decide accordingly
507
+ /// if layout reordering is needed for two logical tensors. The method will
508
+ /// return true for below two circumstances:
509
+ ///
510
+ /// 1. the two logical tensors are equal regarding each field in the struct,
511
+ /// eg. id, ndims, dims, layout type, property, etc.
512
+ /// 2. If all other fields are equal but the layout types in two logical
513
+ /// tensors are different, the method will return true when the underlying
514
+ /// memory layout is the same. For example, one logical tensor has strided
515
+ /// layout type while the other one has opaque layout type, but underneath,
516
+ /// both layouts are NHWC, the method will still return true for this case.
517
+ ///
518
+ /// @param lt The input logical tensor to be compared.
519
+ /// @returns @c true if the two logical tensors are equal. @c false otherwise
520
+ bool is_equal(const logical_tensor &lt) const {
521
+ uint8_t equal = 0;
522
+ error::wrap_c_api(
523
+ dnnl_graph_logical_tensor_is_equal(&data, &lt.data, &equal),
524
+ "could not compare between the two logical tensors");
525
+ return equal != 0;
526
+ }
527
+
528
+ private:
529
+ static dnnl_data_type_t convert_to_c(data_type dtype) {
530
+ return static_cast<dnnl_data_type_t>(dtype);
531
+ }
532
+
533
+ static dnnl_graph_layout_type_t convert_to_c(layout_type ltype) {
534
+ return static_cast<dnnl_graph_layout_type_t>(ltype);
535
+ }
536
+
537
+ static dnnl_graph_tensor_property_t convert_to_c(property_type ptype) {
538
+ return static_cast<dnnl_graph_tensor_property_t>(ptype);
539
+ }
540
+ };
541
+
542
+ /// @} dnnl_graph_api_logical_tensor
543
+
544
+ /// @addtogroup dnnl_graph_api_tensor Tensor
545
+ ///
546
+ /// Tensor is an abstraction for multi-dimensional input and output data needed
547
+ /// in the execution of a compiled partition. A tensor object encapsulates a
548
+ /// handle to a memory buffer allocated on a specific engine and a logical
549
+ /// tensor which describes the dimensions, elements data type, and memory
550
+ /// layout.
551
+ ///
552
+ /// @{
553
+
554
+ /// A tensor object
555
+ class tensor : public tensor_handle {
556
+ public:
557
+ /// Default constructor. Constructs an empty object.
558
+ tensor() = default;
559
+
560
+ /// Constructs a tensor object according to a given logical tensor, an
561
+ /// engine, and a memory handle.
562
+ ///
563
+ /// @param lt The given logical tensor
564
+ /// @param aengine Engine to store the data on.
565
+ /// @param handle Handle of memory buffer to use as an underlying storage.
566
+ tensor(const logical_tensor &lt, const engine &aengine, void *handle) {
567
+ dnnl_graph_tensor_t t = nullptr;
568
+ error::wrap_c_api(
569
+ dnnl_graph_tensor_create(&t, &(lt.data), aengine.get(), handle),
570
+ "could not create tensor object with the logical_tensor, "
571
+ "engine, and handle");
572
+ reset(t);
573
+ }
574
+
575
+ /// Returns the underlying memory buffer.
576
+ ///
577
+ /// On the CPU engine, or when using USM, this is a pointer to the
578
+ /// allocated memory.
579
+ void *get_data_handle() const {
580
+ void *handle = nullptr;
581
+ error::wrap_c_api(dnnl_graph_tensor_get_data_handle(get(), &handle),
582
+ "could not get data handle from the tensor");
583
+ return handle;
584
+ }
585
+
586
+ /// Sets the underlying memory handle.
587
+ ///
588
+ /// @param handle Memory handle.
589
+ void set_data_handle(void *handle) {
590
+ error::wrap_c_api(dnnl_graph_tensor_set_data_handle(get(), handle),
591
+ "setting data handle to the tensor failed");
592
+ }
593
+
594
+ /// Returns the associated engine.
595
+ ///
596
+ /// @returns An engine object
597
+ engine get_engine() const {
598
+ dnnl_engine_t c_engine = nullptr;
599
+ error::wrap_c_api(dnnl_graph_tensor_get_engine(get(), &c_engine),
600
+ "could not get an engine from a tensor object");
601
+ return engine(c_engine, true);
602
+ }
603
+ };
604
+
605
+ /// @} dnnl_graph_api_tensor
606
+
607
+ /// @addtogroup dnnl_graph_api_compiled_partition Compiled Partition
608
+ ///
609
+ /// A compiled partition represents the generated kernels specialized for a
610
+ /// partition on a target hardware (engine) with input and output information
611
+ /// specified by the logical tensors.
612
+ ///
613
+ /// @{
614
+
615
+ /// A compiled partition object.
616
+ class compiled_partition : public compiled_partition_handle {
617
+ public:
618
+ /// Default constructor. Constructs an empty object.
619
+ compiled_partition() = default;
620
+
621
+ /// Constructs a compiled partition object
622
+ compiled_partition(dnnl_graph_compiled_partition_t compiled_partition) {
623
+ reset(compiled_partition, false);
624
+ }
625
+
626
+ /// Queries an input or output logical tensor according to tensor ID. If the
627
+ /// tensor ID doesn't belong to any input or output of the compiled
628
+ /// partition, an exception will be raised by the API.
629
+ ///
630
+ /// @param tid The unique id of required tensor.
631
+ /// @returns The logical tensor.
632
+ logical_tensor query_logical_tensor(size_t tid) const {
633
+ dnnl_graph_logical_tensor_t lt;
634
+ error::wrap_c_api(dnnl_graph_compiled_partition_query_logical_tensor(
635
+ get(), tid, &lt),
636
+ "query logical tensor from compiled_partition failed");
637
+ return logical_tensor {lt};
638
+ }
639
+
640
+ /// Returns the hint of in-place pairs from a compiled partition. It
641
+ /// indicates that an input and an output of the partition can share the
642
+ /// same memory buffer for computation. In-place computation helps to reduce
643
+ /// the memory footprint and improves cache locality. But since the library
644
+ /// may not have a global view of user's application, it's possible that the
645
+ /// input tensor is used at other places in user's computation graph. In
646
+ /// this case, the user should take the in-place pair as a hint and pass a
647
+ /// different memory buffer for output tensor to avoid overwriting the input
648
+ /// memory buffer which will probably cause unexpected incorrect results.
649
+ ///
650
+ /// @returns A list of pairs of input and output IDs.
651
+ std::vector<std::pair<size_t, size_t>> get_inplace_ports() const {
652
+ size_t num = 0;
653
+ const dnnl_graph_inplace_pair_t *inplace_pairs;
654
+
655
+ error::wrap_c_api(dnnl_graph_compiled_partition_get_inplace_ports(
656
+ get(), &num, &inplace_pairs),
657
+ "could not get the in-place pairs from a compiled partition");
658
+ if (num == 0) return {};
659
+
660
+ std::vector<std::pair<size_t, size_t>> inplace_options;
661
+ inplace_options.reserve(num);
662
+ for (size_t i = 0; i < num; ++i) {
663
+ const dnnl_graph_inplace_pair_t *inplace_pair = inplace_pairs + i;
664
+ inplace_options.emplace_back(
665
+ inplace_pair->input_id, inplace_pair->output_id);
666
+ }
667
+ return inplace_options;
668
+ }
669
+
670
+ /// Execute a compiled partition.
671
+ ///
672
+ /// @param astream Stream object to run over.
673
+ /// @param inputs A list of input tensors.
674
+ /// @param outputs A list of output tensors.
675
+ void execute(stream &astream, const std::vector<tensor> &inputs,
676
+ const std::vector<tensor> &outputs) const {
677
+ std::vector<const_dnnl_graph_tensor_t> c_inputs;
678
+ c_inputs.reserve(inputs.size());
679
+ for (auto &in : inputs) {
680
+ c_inputs.push_back(in.get());
681
+ }
682
+ std::vector<const_dnnl_graph_tensor_t> c_outputs;
683
+ c_outputs.reserve(outputs.size());
684
+ for (auto &out : outputs) {
685
+ c_outputs.push_back(out.get());
686
+ }
687
+
688
+ error::wrap_c_api(
689
+ dnnl_graph_compiled_partition_execute(get(), astream.get(),
690
+ c_inputs.size(), c_inputs.data(), c_outputs.size(),
691
+ c_outputs.data()),
692
+ "could not execute the compiled_partition");
693
+ }
694
+ };
695
+
696
+ /// @} dnnl_graph_api_compiled_partition
697
+
698
+ /// @addtogroup dnnl_graph_api_op Op
699
+ ///
700
+ /// OP is an abstraction of computation logic for deep neural network
701
+ /// operations. An op object encapsulates an operation kind which describes the
702
+ /// computation logic, an unique ID which differentiates operations with the
703
+ /// same kind, and logical tensors which describes the input and output of the
704
+ /// operation and its connections to other operations in the graph.
705
+ ///
706
+ /// @{
707
+
708
+ /// An op object.
709
+ class op : public op_handle {
710
+ public:
711
+ /// Kinds of operations
712
+ enum class kind {
713
+ Abs = dnnl_graph_op_abs,
714
+ AbsBackward = dnnl_graph_op_abs_backward,
715
+ Add = dnnl_graph_op_add,
716
+ AvgPool = dnnl_graph_op_avg_pool,
717
+ AvgPoolBackward = dnnl_graph_op_avg_pool_backward,
718
+ BatchNormForwardTraining = dnnl_graph_op_batch_norm_forward_training,
719
+ BatchNormInference = dnnl_graph_op_batch_norm_inference,
720
+ BatchNormTrainingBackward = dnnl_graph_op_batch_norm_backward,
721
+ BiasAdd = dnnl_graph_op_bias_add,
722
+ BiasAddBackward = dnnl_graph_op_bias_add_backward,
723
+ Clamp = dnnl_graph_op_clamp,
724
+ ClampBackward = dnnl_graph_op_clamp_backward,
725
+ Concat = dnnl_graph_op_concat,
726
+ Convolution = dnnl_graph_op_convolution,
727
+ ConvolutionBackwardData = dnnl_graph_op_convolution_backward_data,
728
+ ConvolutionBackwardWeights = dnnl_graph_op_convolution_backward_weights,
729
+ ConvTranspose = dnnl_graph_op_conv_transpose,
730
+ ConvTransposeBackwardData = dnnl_graph_op_conv_transpose_backward_data,
731
+ ConvTransposeBackwardWeights
732
+ = dnnl_graph_op_conv_transpose_backward_weights,
733
+ Dequantize = dnnl_graph_op_dequantize,
734
+ Divide = dnnl_graph_op_divide,
735
+ DynamicDequantize = dnnl_graph_op_dynamic_dequantize,
736
+ DynamicQuantize = dnnl_graph_op_dynamic_quantize,
737
+ Elu = dnnl_graph_op_elu,
738
+ EluBackward = dnnl_graph_op_elu_backward,
739
+ End = dnnl_graph_op_end,
740
+ Exp = dnnl_graph_op_exp,
741
+ GELU = dnnl_graph_op_gelu,
742
+ GELUBackward = dnnl_graph_op_gelu_backward,
743
+ HardSigmoid = dnnl_graph_op_hard_sigmoid,
744
+ HardSigmoidBackward = dnnl_graph_op_hard_sigmoid_backward,
745
+ HardSwish = dnnl_graph_op_hard_swish,
746
+ HardSwishBackward = dnnl_graph_op_hard_swish_backward,
747
+ Interpolate = dnnl_graph_op_interpolate,
748
+ InterpolateBackward = dnnl_graph_op_interpolate_backward,
749
+ LayerNorm = dnnl_graph_op_layer_norm,
750
+ LayerNormBackward = dnnl_graph_op_layer_norm_backward,
751
+ LeakyReLU = dnnl_graph_op_leaky_relu,
752
+ Log = dnnl_graph_op_log,
753
+ LogSoftmax = dnnl_graph_op_log_softmax,
754
+ LogSoftmaxBackward = dnnl_graph_op_log_softmax_backward,
755
+ MatMul = dnnl_graph_op_matmul,
756
+ Maximum = dnnl_graph_op_maximum,
757
+ MaxPool = dnnl_graph_op_max_pool,
758
+ MaxPoolBackward = dnnl_graph_op_max_pool_backward,
759
+ Minimum = dnnl_graph_op_minimum,
760
+ Mish = dnnl_graph_op_mish,
761
+ MishBackward = dnnl_graph_op_mish_backward,
762
+ Multiply = dnnl_graph_op_multiply,
763
+ Pow = dnnl_graph_op_pow,
764
+ PReLU = dnnl_graph_op_prelu,
765
+ PReLUBackward = dnnl_graph_op_prelu_backward,
766
+ Quantize = dnnl_graph_op_quantize,
767
+ Reciprocal = dnnl_graph_op_reciprocal,
768
+ ReduceL1 = dnnl_graph_op_reduce_l1,
769
+ ReduceL2 = dnnl_graph_op_reduce_l2,
770
+ ReduceMax = dnnl_graph_op_reduce_max,
771
+ ReduceMean = dnnl_graph_op_reduce_mean,
772
+ ReduceMin = dnnl_graph_op_reduce_min,
773
+ ReduceProd = dnnl_graph_op_reduce_prod,
774
+ ReduceSum = dnnl_graph_op_reduce_sum,
775
+ ReLU = dnnl_graph_op_relu,
776
+ ReLUBackward = dnnl_graph_op_relu_backward,
777
+ Reorder = dnnl_graph_op_reorder,
778
+ Round = dnnl_graph_op_round,
779
+ Select = dnnl_graph_op_select,
780
+ Sigmoid = dnnl_graph_op_sigmoid,
781
+ SigmoidBackward = dnnl_graph_op_sigmoid_backward,
782
+ SoftMax = dnnl_graph_op_softmax,
783
+ SoftMaxBackward = dnnl_graph_op_softmax_backward,
784
+ SoftPlus = dnnl_graph_op_softplus,
785
+ SoftPlusBackward = dnnl_graph_op_softplus_backward,
786
+ Sqrt = dnnl_graph_op_sqrt,
787
+ SqrtBackward = dnnl_graph_op_sqrt_backward,
788
+ Square = dnnl_graph_op_square,
789
+ SquaredDifference = dnnl_graph_op_squared_difference,
790
+ StaticReshape = dnnl_graph_op_static_reshape,
791
+ StaticTranspose = dnnl_graph_op_static_transpose,
792
+ Subtract = dnnl_graph_op_subtract,
793
+ Tanh = dnnl_graph_op_tanh,
794
+ TanhBackward = dnnl_graph_op_tanh_backward,
795
+ TypeCast = dnnl_graph_op_type_cast,
796
+ Wildcard = dnnl_graph_op_wildcard,
797
+ // Sentinel
798
+ LastSymbol = dnnl_graph_op_last_symbol,
799
+ };
800
+
801
+ /// Attributes of operations. Different operations support different
802
+ /// attributes. Check the document of each operation for what attributes are
803
+ /// supported and what are the potential values for them. Missing required
804
+ /// attribute or illegal attribute value may lead to failure when adding the
805
+ /// operation to a graph.
806
+ enum class attr {
807
+ /// Undefined op attribute.
808
+ undef = dnnl_graph_op_attr_undef,
809
+
810
+ // float32 attributes. The value of these attributes can be any single
811
+ // float32 number.
812
+
813
+ /// Specifies an alpha attribute to an op.
814
+ alpha = dnnl_graph_op_attr_alpha,
815
+ /// Specifies an beta attribute to an op.
816
+ beta = dnnl_graph_op_attr_beta,
817
+ /// Specifies an epsilon attribute to an op.
818
+ epsilon = dnnl_graph_op_attr_epsilon,
819
+ /// Specifies a max attribute to an op.
820
+ max = dnnl_graph_op_attr_max,
821
+ /// Specifies a min attribute to an op.
822
+ min = dnnl_graph_op_attr_min,
823
+ /// Specifies a momentum attribute to an op.
824
+ momentum = dnnl_graph_op_attr_momentum,
825
+
826
+ // float32 vector attributes. The value of these attributes can be a
827
+ // vector of float32 numbers.
828
+
829
+ /// Specifies a scales attribute to an op.
830
+ scales = dnnl_graph_op_attr_scales,
831
+
832
+ // int64_t attributes. The value of these attributes can be any single
833
+ // int64 number.
834
+
835
+ /// Specifies an axis attribute to an op.
836
+ axis = dnnl_graph_op_attr_axis,
837
+ /// Specifies a begin_norm_axis attribute to an op.
838
+ begin_norm_axis = dnnl_graph_op_attr_begin_norm_axis,
839
+ /// Specifies a groups attribute to an op.
840
+ groups = dnnl_graph_op_attr_groups,
841
+
842
+ // int64_t vector attributes. The value of these attributes can be a
843
+ // vector of int64 numbers.
844
+
845
+ /// Specifies an axes attribute to an op.
846
+ axes = dnnl_graph_op_attr_axes,
847
+ /// Specifies a dilations attribute to an op.
848
+ dilations = dnnl_graph_op_attr_dilations,
849
+ /// Specifies an dst_shape attribute to an op.
850
+ dst_shape = dnnl_graph_op_attr_dst_shape,
851
+ /// Specifies a kernel attribute to an op.
852
+ kernel = dnnl_graph_op_attr_kernel,
853
+ /// Specifies an order attribute to an op.
854
+ order = dnnl_graph_op_attr_order,
855
+ /// Specifies an output_padding attribute to an op.
856
+ output_padding = dnnl_graph_op_attr_output_padding,
857
+ /// Specifies a pads_begin attribute to an op.
858
+ pads_begin = dnnl_graph_op_attr_pads_begin,
859
+ /// Specifies a pads_end attribute to an op.
860
+ pads_end = dnnl_graph_op_attr_pads_end,
861
+ /// Specifies a shape attribute to an op.
862
+ shape = dnnl_graph_op_attr_shape,
863
+ /// Specifies a sizes attribute to an op.
864
+ sizes = dnnl_graph_op_attr_sizes,
865
+ /// Specifies an src_shape attribute to an op.
866
+ src_shape = dnnl_graph_op_attr_src_shape,
867
+ /// Specifies a strides attribute to an op.
868
+ strides = dnnl_graph_op_attr_strides,
869
+ /// Specifies a weight_shape attribute to an op.
870
+ weights_shape = dnnl_graph_op_attr_weights_shape,
871
+ /// Specifies a zps attribute to an op.
872
+ zps = dnnl_graph_op_attr_zps,
873
+
874
+ // bool attributes. The value of these attributes can be any single bool
875
+ // value.
876
+
877
+ /// Specifies an exclude_pad attribute to an op.
878
+ exclude_pad = dnnl_graph_op_attr_exclude_pad,
879
+ /// Specifies a keep_dims attribute to an op.
880
+ keep_dims = dnnl_graph_op_attr_keep_dims,
881
+ /// Specifies a keep_stats attribute to an op.
882
+ keep_stats = dnnl_graph_op_attr_keep_stats,
883
+ /// Specifies a per_channel_broadcast attribute to an op.
884
+ per_channel_broadcast = dnnl_graph_op_attr_per_channel_broadcast,
885
+ /// Specifies a special_zero attribute to an op.
886
+ special_zero = dnnl_graph_op_attr_special_zero,
887
+ /// Specifies a transpose_a attribute to an op.
888
+ transpose_a = dnnl_graph_op_attr_transpose_a,
889
+ /// Specifies a transpose_b attribute to an op.
890
+ transpose_b = dnnl_graph_op_attr_transpose_b,
891
+ /// Specifies an use_affine attribute to an op.
892
+ use_affine = dnnl_graph_op_attr_use_affine,
893
+ /// Specifies an use_dst attribute to an op.
894
+ use_dst = dnnl_graph_op_attr_use_dst,
895
+
896
+ // string attributes. The value of these attributes can be a string.
897
+
898
+ /// Specifies an auto_broadcast attribute to an op. The value can be
899
+ /// "none" or "numpy".
900
+ auto_broadcast = dnnl_graph_op_attr_auto_broadcast,
901
+ /// Specifies an auto_pad attribute to an op. The value can be "none",
902
+ /// "same_upper", "same_lower", or "valid".
903
+ auto_pad = dnnl_graph_op_attr_auto_pad,
904
+ /// Specifies an coordinate_transformation_mode attribute to an op. The
905
+ /// value can be "half_pixel" or "align_corners". The attribute is
906
+ /// defined for Interpolate operations.
907
+ coordinate_transformation_mode
908
+ = dnnl_graph_op_attr_coordinate_transformation_mode,
909
+ /// Specifies a data_format of an op. The value can be "NCX" or "NXC".
910
+ data_format = dnnl_graph_op_attr_data_format,
911
+ /// Specifies a mode attribute of an op. The value can be "nearest",
912
+ /// "linear", "bilinear", or "trilinear". The attribute is defined for
913
+ /// Interpolate operations.
914
+ mode = dnnl_graph_op_attr_mode,
915
+ /// Specifies a qtype attribute to an op. The value can be "per_channel"
916
+ /// or "per_tensor". The attribute is defined for quantization
917
+ /// operations.
918
+ qtype = dnnl_graph_op_attr_qtype,
919
+ /// Specifies a rounding_type attribute to an op. The value can be
920
+ /// "ceil" or "floor".
921
+ rounding_type = dnnl_graph_op_attr_rounding_type,
922
+ /// Specifies a weights_format of an op. The value can be "OIX", "XIO",
923
+ /// "IOX", or "XOI". Different operations may support different values.
924
+ weights_format = dnnl_graph_op_attr_weights_format,
925
+ };
926
+
927
+ /// Constructs an op object with an unique ID, an operation kind, and a name
928
+ /// string.
929
+ ///
930
+ /// @param id The unique ID of the op.
931
+ /// @param akind The op kind specifies which computation is represented by
932
+ /// the op, such as Convolution or ReLU.
933
+ /// @param verbose_name The string added as the op name.
934
+ op(size_t id, kind akind, const std::string &verbose_name = "") {
935
+ dnnl_graph_op_t op = nullptr;
936
+ error::wrap_c_api(dnnl_graph_op_create(&op, id, convert_to_c(akind),
937
+ verbose_name.c_str()),
938
+ "could not create op with id and op kind");
939
+ reset(op);
940
+ }
941
+
942
+ /// Constructs an op object with an unique ID, an operation kind, and
943
+ /// input/output logical tensors.
944
+ ///
945
+ /// @param id The unique ID of this op.
946
+ /// @param akind The op kind specifies which computation is represented by
947
+ /// this op, such as Convolution or ReLU.
948
+ /// @param inputs Input logical tensor to be bound to this op.
949
+ /// @param outputs Output logical tensor to be bound to this op.
950
+ /// @param verbose_name The string added as the op name.
951
+ op(size_t id, kind akind, const std::vector<logical_tensor> &inputs,
952
+ const std::vector<logical_tensor> &outputs,
953
+ const std::string &verbose_name = "")
954
+ : op(id, akind, verbose_name) {
955
+ for (const auto &input : inputs) {
956
+ error::wrap_c_api(dnnl_graph_op_add_input(get(), &(input.data)),
957
+ "adding input to the op failed");
958
+ }
959
+ for (const auto &output : outputs) {
960
+ error::wrap_c_api(dnnl_graph_op_add_output(get(), &(output.data)),
961
+ "adding output to the op failed");
962
+ }
963
+ }
964
+
965
+ /// Adds an input logical tensor to the op.
966
+ ///
967
+ /// @param t Input logical tensor.
968
+ void add_input(const logical_tensor &t) {
969
+ error::wrap_c_api(dnnl_graph_op_add_input(get(), &(t.data)),
970
+ "adding input to the op failed");
971
+ }
972
+
973
+ /// Adds a vector of input logical tensors to the op.
974
+ ///
975
+ /// @param ts The list of input logical tensors.
976
+ void add_inputs(const std::vector<logical_tensor> &ts) {
977
+ for (const auto &t : ts) {
978
+ error::wrap_c_api(dnnl_graph_op_add_input(get(), &(t.data)),
979
+ "adding input to the op failed");
980
+ }
981
+ }
982
+
983
+ /// Adds an output logical tensor to the op.
984
+ ///
985
+ /// @param t Output logical tensor.
986
+ void add_output(const logical_tensor &t) {
987
+ error::wrap_c_api(dnnl_graph_op_add_output(get(), &(t.data)),
988
+ "adding output to the op failed");
989
+ }
990
+
991
+ /// Adds a vector of output logical tensors to the op.
992
+ ///
993
+ /// @param ts The list of output logical tensors.
994
+ void add_outputs(const std::vector<logical_tensor> &ts) {
995
+ for (const auto &t : ts) {
996
+ error::wrap_c_api(dnnl_graph_op_add_output(get(), &(t.data)),
997
+ "adding output to the op failed");
998
+ }
999
+ }
1000
+
1001
+ /// Sets the attribute according to the name and type (int64_t).
1002
+ ///
1003
+ /// @tparam Type Attribute's type.
1004
+ /// @param name Attribute's name.
1005
+ /// @param value The attribute's value.
1006
+ /// @returns The Op self.
1007
+ template <typename Type, req<std::is_same<Type, int64_t>::value> = true>
1008
+ op &set_attr(attr name, const Type &value) {
1009
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1010
+ error::wrap_c_api(dnnl_graph_op_set_attr_s64(get(), attr, &value, 1),
1011
+ "could not set attribute to the op");
1012
+ return *this;
1013
+ }
1014
+
1015
+ /// Sets the attribute according to the name and type (float).
1016
+ ///
1017
+ /// @tparam Type Attribute's type.
1018
+ /// @param name Attribute's name.
1019
+ /// @param value The attribute's value.
1020
+ /// @returns The Op self.
1021
+ template <typename Type, req<std::is_same<Type, float>::value> = true>
1022
+ op &set_attr(attr name, const Type &value) {
1023
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1024
+ error::wrap_c_api(dnnl_graph_op_set_attr_f32(get(), attr, &value, 1),
1025
+ "could not set attribute to the op");
1026
+ return *this;
1027
+ }
1028
+
1029
+ /// Sets the attribute according to the name and type (bool).
1030
+ ///
1031
+ /// @tparam Type Attribute's type.
1032
+ /// @param name Attribute's name.
1033
+ /// @param value The attribute's value.
1034
+ /// @returns The Op self.
1035
+ template <typename Type, req<std::is_same<Type, bool>::value> = true>
1036
+ op &set_attr(attr name, const Type &value) {
1037
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1038
+ const uint8_t val = value;
1039
+ error::wrap_c_api(dnnl_graph_op_set_attr_bool(get(), attr, &val, 1),
1040
+ "could not set attribute to the op");
1041
+ return *this;
1042
+ }
1043
+
1044
+ /// Sets the attribute according to the name and type (string).
1045
+ ///
1046
+ /// @tparam Type Attribute's type.
1047
+ /// @param name Attribute's name.
1048
+ /// @param value The attribute's value.
1049
+ /// @returns The Op self.
1050
+ template <typename Type, req<std::is_same<Type, std::string>::value> = true>
1051
+ op &set_attr(attr name, const Type &value) {
1052
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1053
+ error::wrap_c_api(dnnl_graph_op_set_attr_str(
1054
+ get(), attr, value.c_str(), value.size()),
1055
+ "could not set attribute to the op");
1056
+ return *this;
1057
+ }
1058
+
1059
+ /// Sets the attribute according to the name and type
1060
+ /// (std::vector<int64_t>).
1061
+ ///
1062
+ /// @tparam Type Attribute's type.
1063
+ /// @param name Attribute's name.
1064
+ /// @param value The attribute's value.
1065
+ /// @returns The Op self.
1066
+ template <typename Type,
1067
+ req<std::is_same<Type, std::vector<int64_t>>::value> = true>
1068
+ op &set_attr(attr name, const Type &value) {
1069
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1070
+ error::wrap_c_api(dnnl_graph_op_set_attr_s64(
1071
+ get(), attr, value.data(), value.size()),
1072
+ "could not set attribute to the op");
1073
+ return *this;
1074
+ }
1075
+
1076
+ /// Sets the attribute according to the name and type (std::vector<float>).
1077
+ ///
1078
+ /// @tparam Type Attribute's type.
1079
+ /// @param name Attribute's name.
1080
+ /// @param value The attribute's value.
1081
+ /// @returns The Op self.
1082
+ template <typename Type,
1083
+ req<std::is_same<Type, std::vector<float>>::value> = true>
1084
+ op &set_attr(attr name, const Type &value) {
1085
+ dnnl_graph_op_attr_t attr = convert_to_c(name);
1086
+ error::wrap_c_api(dnnl_graph_op_set_attr_f32(
1087
+ get(), attr, value.data(), value.size()),
1088
+ "could not set attribute to the op");
1089
+ return *this;
1090
+ }
1091
+
1092
+ private:
1093
+ dnnl_graph_op_kind_t convert_to_c(kind akind) {
1094
+ return static_cast<dnnl_graph_op_kind_t>(akind);
1095
+ }
1096
+
1097
+ dnnl_graph_op_attr_t convert_to_c(attr aattr) {
1098
+ return static_cast<dnnl_graph_op_attr_t>(aattr);
1099
+ }
1100
+ };
1101
+
1102
+ /// @} dnnl_graph_api_op
1103
+
1104
+ /// @addtogroup dnnl_graph_api_partition Partition
1105
+ ///
1106
+ /// Partition represents a collection of operations and their input and output
1107
+ /// logical tensors identified by library as the basic unit for compilation and
1108
+ /// execution.
1109
+ ///
1110
+ /// @{
1111
+
1112
+ /// A partition object.
1113
+ class partition : public partition_handle {
1114
+ public:
1115
+ /// Policy specifications for partitioning.
1116
+ enum class policy {
1117
+ /// Fusion policy returns partitions with typical post-op fusions, eg.
1118
+ /// Convolution + ReLU or other element-wise operations or a chian of
1119
+ /// post-ops.
1120
+ fusion = dnnl_graph_partition_policy_fusion,
1121
+ /// Debug policy doesn't not apply any fusions. It returns partitions
1122
+ /// with single operations in each partition. The policy is useful when
1123
+ /// users notice any bug or correctness issue in fusion policy.
1124
+ debug = dnnl_graph_partition_policy_debug,
1125
+ };
1126
+
1127
+ partition() = default;
1128
+
1129
+ /// Constructs a partition object
1130
+ ///
1131
+ /// @param p A raw pointer to the C API handle
1132
+ partition(dnnl_graph_partition_t p) { reset(p, false); }
1133
+
1134
+ /// Creates a new partition with a given operator and engine kind. The API
1135
+ /// is used to create a partition from an operation directly without
1136
+ /// creating the graph and calling `get_partitions()`. The output partition
1137
+ /// contains only one operation.
1138
+ ///
1139
+ /// @param aop An operation used to create the partition.
1140
+ /// @param ekind Engine kind.
1141
+ partition(const op &aop, engine::kind ekind) {
1142
+ dnnl_graph_partition_t p = nullptr;
1143
+ error::wrap_c_api(dnnl_graph_partition_create_with_op(&p, aop.get(),
1144
+ static_cast<dnnl_engine_kind_t>(ekind)),
1145
+ "could not create a partition with the op and engine kind");
1146
+ reset(p);
1147
+ }
1148
+
1149
+ /// Returns the number of operations contained in the partition.
1150
+ ///
1151
+ /// @returns Number of operations.
1152
+ size_t get_ops_num() const {
1153
+ size_t num {0};
1154
+ error::wrap_c_api(dnnl_graph_partition_get_op_num(get(), &num),
1155
+ "could not get number of ops from the partition");
1156
+ return num;
1157
+ }
1158
+
1159
+ /// Returns all operation IDs contained in the partition.
1160
+ ///
1161
+ /// @returns An unordered set of operation IDs.
1162
+ std::vector<size_t> get_ops() const {
1163
+ auto num = get_ops_num();
1164
+ std::vector<size_t> ops(num);
1165
+
1166
+ error::wrap_c_api(dnnl_graph_partition_get_ops(get(), num, ops.data()),
1167
+ "could not get op ids from the partition");
1168
+ return ops;
1169
+ }
1170
+
1171
+ /// Returns the unique ID of the partition. Partition ID is generated by the
1172
+ /// library internally. The ID can be used for debugging purpose or verbose.
1173
+ ///
1174
+ /// @returns ID of the partition.
1175
+ size_t get_id() const {
1176
+ size_t id {};
1177
+ error::wrap_c_api(dnnl_graph_partition_get_id(get(), &id),
1178
+ "could not get id of the partition");
1179
+ return id;
1180
+ }
1181
+
1182
+ /// Compiles a partition with given input and output logical tensors. The
1183
+ /// output logical tensors can contain unknown dimensions. For this case,
1184
+ /// the compilation will deduce the output shapes according to input shapes.
1185
+ /// The output logical tensors can also have layout type `any`. The
1186
+ /// compilation will choose the optimal layout for output tensors. The
1187
+ /// optimal layout will be represented as an opaque layout ID saved in the
1188
+ /// output logical tensor.
1189
+ ///
1190
+ /// @param inputs A list of input logical tensors.
1191
+ /// @param outputs A list of output logical tensors.
1192
+ /// @param e The engine used to compile the partition.
1193
+ /// @returns A compiled partition.
1194
+ compiled_partition compile(const std::vector<logical_tensor> &inputs,
1195
+ const std::vector<logical_tensor> &outputs, const engine &e) const {
1196
+ if (!is_supported()) {
1197
+ error::wrap_c_api(dnnl_invalid_arguments,
1198
+ "could not compile an unsupported partition");
1199
+ }
1200
+
1201
+ return compile_(inputs, outputs, e);
1202
+ }
1203
+
1204
+ /// Returns the supporting status of a partition. Some operations may not be
1205
+ /// supported by the library under certain circumstances. During
1206
+ /// partitioning stage, unsupported partitions will be returned to users
1207
+ /// with each containing an unsupported operation. Users should check the
1208
+ /// supporting status of a partition before transforming the computation
1209
+ /// graph or compiling the partition.
1210
+ ///
1211
+ /// @returns @c true if this partition is supported or @c false if this
1212
+ /// partition isn't supported by the library
1213
+ bool is_supported() const {
1214
+ uint8_t supported {0};
1215
+ error::wrap_c_api(dnnl_graph_partition_is_supported(get(), &supported),
1216
+ "could not get supporting status of the partition");
1217
+ return supported != 0;
1218
+ }
1219
+
1220
+ /// Returns a list of input logical tensors from the partition.
1221
+ ///
1222
+ /// @returns A list of input logical tensors.
1223
+ std::vector<logical_tensor> get_input_ports() const {
1224
+ size_t num = 0;
1225
+ error::wrap_c_api(dnnl_graph_partition_get_input_ports_num(get(), &num),
1226
+ "could not get number of inputs of the partition");
1227
+ if (num == 0) return {};
1228
+
1229
+ std::vector<dnnl_graph_logical_tensor_t> c_inputs(num);
1230
+ error::wrap_c_api(dnnl_graph_partition_get_input_ports(
1231
+ get(), num, c_inputs.data()),
1232
+ "could not get input logical tensors of the partition");
1233
+
1234
+ std::vector<logical_tensor> inputs;
1235
+ inputs.reserve(num);
1236
+ for (auto &c_lt : c_inputs)
1237
+ inputs.emplace_back(c_lt);
1238
+ return inputs;
1239
+ }
1240
+
1241
+ /// Returns a list of output logical tensors from the partition.
1242
+ ///
1243
+ /// @returns A list of output logical tensor.
1244
+ std::vector<logical_tensor> get_output_ports() const {
1245
+ size_t num = 0;
1246
+ error::wrap_c_api(
1247
+ dnnl_graph_partition_get_output_ports_num(get(), &num),
1248
+ "cannot get number of outputs of the partition");
1249
+ if (num == 0) return {};
1250
+
1251
+ std::vector<dnnl_graph_logical_tensor_t> c_outputs(num);
1252
+ error::wrap_c_api(dnnl_graph_partition_get_output_ports(
1253
+ get(), num, c_outputs.data()),
1254
+ "could not get output logical tensors of the partition");
1255
+
1256
+ std::vector<logical_tensor> outputs;
1257
+ outputs.reserve(num);
1258
+ for (auto &c_lt : c_outputs)
1259
+ outputs.emplace_back(c_lt);
1260
+ return outputs;
1261
+ }
1262
+
1263
+ /// Returns the engine kind of the partition
1264
+ ///
1265
+ /// @returns The engine kind
1266
+ engine::kind get_engine_kind() const {
1267
+ dnnl_engine_kind_t akind;
1268
+ error::wrap_c_api(dnnl_graph_partition_get_engine_kind(get(), &akind),
1269
+ "cannot get the engine kind from the partition");
1270
+
1271
+ return static_cast<engine::kind>(akind);
1272
+ }
1273
+
1274
+ private:
1275
+ compiled_partition compile_(const std::vector<logical_tensor> &inputs,
1276
+ const std::vector<logical_tensor> &outputs, const engine &e) const {
1277
+ std::vector<const dnnl_graph_logical_tensor_t *> c_inputs;
1278
+ std::vector<const dnnl_graph_logical_tensor_t *> c_outputs;
1279
+
1280
+ c_inputs.reserve(inputs.size());
1281
+ for (const auto &in : inputs) {
1282
+ c_inputs.push_back(&(in.data));
1283
+ }
1284
+
1285
+ c_outputs.reserve(outputs.size());
1286
+ for (const auto &out : outputs) {
1287
+ c_outputs.push_back(&(out.data));
1288
+ }
1289
+
1290
+ dnnl_graph_compiled_partition_t cpartitions = nullptr;
1291
+ error::wrap_c_api(
1292
+ dnnl_graph_compiled_partition_create(&cpartitions, get()),
1293
+ "could not create compiled_partition");
1294
+ error::wrap_c_api(dnnl_graph_partition_compile(get(), cpartitions,
1295
+ c_inputs.size(), c_inputs.data(),
1296
+ c_outputs.size(), c_outputs.data(), e.get()),
1297
+ "partition compile failed");
1298
+
1299
+ return compiled_partition(cpartitions);
1300
+ }
1301
+ };
1302
+
1303
+ /// @} dnnl_graph_api_partition
1304
+
1305
+ /// @addtogroup dnnl_graph_api_graph Graph
1306
+ ///
1307
+ /// Graph represents a computational DAG with a set of operations.
1308
+ /// #dnnl::graph::graph::add_op() adds an operation and its input and output
1309
+ /// logical tensors into a graph. The library accumulates the operations and
1310
+ /// logical tensors and constructs and validates the graph as an internal state.
1311
+ /// A graph object is associated to a specific engine kind. The partitions
1312
+ /// returned from the graph will inherit the engine kind of the graph.
1313
+ ///
1314
+ /// @{
1315
+
1316
+ /// A graph object.
1317
+ class graph : public graph_handle {
1318
+ public:
1319
+ /// Constructs a graph with an engine kind.
1320
+ ///
1321
+ /// @param engine_kind Engine kind.
1322
+ graph(engine::kind engine_kind) {
1323
+ dnnl_graph_graph_t g = nullptr;
1324
+ error::wrap_c_api(
1325
+ dnnl_graph_graph_create(&g, convert_to_c(engine_kind)),
1326
+ "could not create graph with engine kind");
1327
+ reset(g);
1328
+ }
1329
+
1330
+ /// Creates a new empty graph with an engine kind and a floating-point math
1331
+ /// mode. All partitions returned from the graph will inherit the engine
1332
+ /// kind and floating-point math mode.
1333
+ ///
1334
+ /// @param engine_kind Engine kind.
1335
+ /// @param mode Floating-point math mode.
1336
+ graph(engine::kind engine_kind, fpmath_mode mode) {
1337
+ dnnl_graph_graph_t g = nullptr;
1338
+ error::wrap_c_api(
1339
+ dnnl_graph_graph_create_with_fpmath_mode(
1340
+ &g, convert_to_c(engine_kind), convert_to_c(mode)),
1341
+ "could not create graph with engine kind and math mode");
1342
+ reset(g);
1343
+ }
1344
+
1345
+ /// Adds an op into the graph to construct a computational DAG. The API will
1346
+ /// return failure if the operator has already been added to the graph or
1347
+ /// the operation cannot pass the schema check in the library (eg. input and
1348
+ /// output numbers and data types, the attributes of the operation, etc.).
1349
+ ///
1350
+ /// @param op An operation to be added.
1351
+ /// @param allow_exception A flag indicating whether the method is allowed
1352
+ /// to throw an exception if it fails to add the op to the graph.
1353
+ /// @returns #status::success or a status describing the error otherwise.
1354
+ status add_op(const op &op, bool allow_exception = true) {
1355
+ dnnl_status_t ret = dnnl_graph_add_op(get(), op.get());
1356
+
1357
+ if (allow_exception) {
1358
+ error::wrap_c_api(ret, "could not add op to the graph");
1359
+ }
1360
+
1361
+ return static_cast<status>(ret);
1362
+ }
1363
+
1364
+ /// Finalizes a graph. It means users have finished adding operations into
1365
+ /// the graph and the graph is ready for partitioning. Adding a new
1366
+ /// operation into a finalized graph will return failures. Similarly,
1367
+ /// partitioning on a un-finalized graph will also return failures.
1368
+ void finalize() {
1369
+ error::wrap_c_api(dnnl_graph_graph_finalize(get()),
1370
+ "could not finalize the graph");
1371
+ }
1372
+
1373
+ /// Checks if a graph is finalized.
1374
+ ///
1375
+ /// @return True if the graph is finalized or false if the graph is not
1376
+ /// finalized.
1377
+ bool is_finalized() const {
1378
+ uint8_t ret = 0;
1379
+ error::wrap_c_api(dnnl_graph_graph_is_finalized(get(), &ret),
1380
+ "could not get the finalization status of the graph");
1381
+
1382
+ return ret != 0;
1383
+ }
1384
+
1385
+ /// Gets filtered partitions from a graph. Partitions will be claimed
1386
+ /// internally according to the capability of the library, the engine kind
1387
+ /// of the graph, and the policy.
1388
+ ///
1389
+ /// @param policy Partition policy, defaults to policy
1390
+ /// #dnnl::graph::partition::policy::fusion.
1391
+ /// @return A vector storing the partitions.
1392
+ std::vector<partition> get_partitions(
1393
+ partition::policy policy = partition::policy::fusion) {
1394
+ if (!is_finalized()) {
1395
+ error::wrap_c_api(
1396
+ dnnl_invalid_graph, "the graph is not finalized yet");
1397
+ }
1398
+
1399
+ error::wrap_c_api(
1400
+ dnnl_graph_graph_filter(get(),
1401
+ static_cast<dnnl_graph_partition_policy_t>(policy)),
1402
+ "could not filter the graph");
1403
+
1404
+ size_t num = 0;
1405
+ error::wrap_c_api(dnnl_graph_graph_get_partition_num(get(), &num),
1406
+ "could not get number of partitions from the graph");
1407
+
1408
+ // return early if there is no partitions in the graph.
1409
+ if (num == 0) return {};
1410
+
1411
+ std::vector<partition> out_list;
1412
+ out_list.reserve(num);
1413
+
1414
+ std::vector<dnnl_graph_partition_t> partitions(num);
1415
+ error::wrap_c_api(
1416
+ dnnl_graph_graph_get_partitions(get(), num, partitions.data()),
1417
+ "could not get partitions from the graph");
1418
+
1419
+ for (auto p : partitions) {
1420
+ out_list.emplace_back(p);
1421
+ }
1422
+
1423
+ return out_list;
1424
+ }
1425
+
1426
+ private:
1427
+ static dnnl_fpmath_mode_t convert_to_c(fpmath_mode mode) {
1428
+ return static_cast<dnnl_fpmath_mode_t>(mode);
1429
+ }
1430
+
1431
+ static dnnl_engine_kind_t convert_to_c(engine::kind akind) {
1432
+ return static_cast<dnnl_engine_kind_t>(akind);
1433
+ }
1434
+ };
1435
+
1436
+ /// @} dnnl_graph_api_graph
1437
+
1438
+ /// @addtogroup dnnl_graph_api_compiled_partition_cache Compiled Partition Cache
1439
+ ///
1440
+ /// A set of functions that provide compiled partition cache control.
1441
+ ///
1442
+ /// @{
1443
+
1444
+ /// Returns the number of compiled partition that can be held in the compiled
1445
+ /// partition cache at the same time.
1446
+ inline int get_compiled_partition_cache_capacity() {
1447
+ int result = 0;
1448
+ error::wrap_c_api(dnnl_graph_get_compiled_partition_cache_capacity(&result),
1449
+ "could not get compiled partition cache capacity");
1450
+ return result;
1451
+ }
1452
+
1453
+ /// @copydoc dnnl_graph_set_compiled_partition_cache_capacity(int capacity)
1454
+ inline void set_compiled_partition_cache_capacity(int capacity) {
1455
+ error::wrap_c_api(
1456
+ dnnl_graph_set_compiled_partition_cache_capacity(capacity),
1457
+ "could not set compiled partition cache capacity");
1458
+ }
1459
+
1460
+ /// @} dnnl_graph_api_compiled_partition_cache
1461
+
1462
+ /// @addtogroup dnnl_graph_api_constant_tensor_cache Constant Tensor Cache
1463
+ ///
1464
+ /// A set of functions that provide constant tensor cache control
1465
+ ///
1466
+ /// @{
1467
+
1468
+ /// Control the enabling or disabling of constant tensor cache. This API must be
1469
+ /// called once before compilation stage. By default, constant tensor cache is
1470
+ /// disabled in the library.
1471
+ ///
1472
+ /// @param flag Set to positive value to enable the cache and set to 0 to
1473
+ /// disable the cache. Negative values are invalid.
1474
+ inline void set_constant_tensor_cache(int flag) {
1475
+ error::wrap_c_api(dnnl_graph_set_constant_tensor_cache(flag),
1476
+ "fail to set constant tensor cache");
1477
+ }
1478
+
1479
+ /// Return the enabling status of constant tensor cache.
1480
+ inline int get_constant_tensor_cache() {
1481
+ int result = 0;
1482
+ error::wrap_c_api(dnnl_graph_get_constant_tensor_cache(&result),
1483
+ "fail to get constant tensor cache");
1484
+ return result;
1485
+ }
1486
+
1487
+ /// @} dnnl_graph_constant_tensor_cache
1488
+
1489
+ } // namespace graph
1490
+ } // namespace dnnl
1491
+
1492
+ /// @cond DO_NOT_DOCUMENT_THIS
1493
+
1494
+ /// oneAPI namespace
1495
+ // Contains the oneapi::dnnl namespace as an alias to the ::dnnl namespace.
1496
+ namespace oneapi {
1497
+ // Note: without this guard, doxygen warns of potentially recursive namespace
1498
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
1499
+ /// oneDNN alias namespace
1500
+ namespace dnnl = ::dnnl;
1501
+ #endif
1502
+ } // namespace oneapi
1503
+
1504
+ /// @endcond
1505
+
1506
+ /// @} dnnl_graph_api
1507
+
1508
+ /// @} dnnl_api
1509
+
1510
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_ocl.h ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2022 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_OCL_H
18
+ #define ONEAPI_DNNL_DNNL_OCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "oneapi/dnnl/dnnl_ocl_types.h"
23
+
24
+ /// @cond DO_NOT_DOCUMENT_THIS
25
+ // Set target version for OpenCL explicitly to suppress a compiler warning.
26
+ #ifndef CL_TARGET_OPENCL_VERSION
27
+ #define CL_TARGET_OPENCL_VERSION 120
28
+ #endif
29
+
30
+ #include <CL/cl.h>
31
+ /// @endcond
32
+
33
+ #ifdef __cplusplus
34
+ extern "C" {
35
+ #endif
36
+
37
+ /// @addtogroup dnnl_api
38
+ /// @{
39
+
40
+ /// @addtogroup dnnl_api_interop
41
+ /// @{
42
+
43
+ /// @addtogroup dnnl_api_ocl_interop
44
+ /// @{
45
+
46
+ /// Creates a memory object.
47
+ ///
48
+ /// Unless @p handle is equal to DNNL_MEMORY_NONE or DNNL_MEMORY_ALLOCATE, the
49
+ /// constructed memory object will have the underlying buffer set. In this
50
+ /// case, the buffer will be initialized as if:
51
+ /// - dnnl_memory_set_data_handle() has been called, if @p memory_kind is equal
52
+ /// to dnnl_ocl_interop_usm, or
53
+ /// - dnnl_ocl_interop_memory_set_mem_object() has been called, if @p memory_kind
54
+ /// is equal to dnnl_ocl_interop_buffer.
55
+ ///
56
+ /// @param memory Output memory object.
57
+ /// @param memory_desc Memory descriptor.
58
+ /// @param engine Engine to use.
59
+ /// @param memory_kind Memory allocation kind to specify the type of handle.
60
+ /// @param handle Handle of the memory buffer to use as an underlying storage.
61
+ /// - A USM pointer to the user-allocated buffer. In this case the library
62
+ /// doesn't own the buffer. Requires @p memory_kind to be equal to
63
+ /// dnnl_ocl_interop_usm.
64
+ /// - An OpenCL buffer. In this case the library doesn't own the buffer.
65
+ /// Requires @p memory_kind be equal to be equal to dnnl_ocl_interop_buffer.
66
+ /// - The DNNL_MEMORY_ALLOCATE special value. Instructs the library to
67
+ /// allocate the buffer that corresponds to the memory allocation kind
68
+ /// @p memory_kind for the memory object. In this case the library
69
+ /// owns the buffer.
70
+ /// - The DNNL_MEMORY_NONE specific value. Instructs the library to
71
+ /// create memory object without an underlying buffer.
72
+ /// @returns #dnnl_success on success and a status describing the error
73
+ /// otherwise.
74
+ dnnl_status_t DNNL_API dnnl_ocl_interop_memory_create(dnnl_memory_t *memory,
75
+ const_dnnl_memory_desc_t memory_desc, dnnl_engine_t engine,
76
+ dnnl_ocl_interop_memory_kind_t memory_kind, void *handle);
77
+
78
+ /// Returns the memory allocation kind associated with a memory object.
79
+ ///
80
+ /// @param memory Memory to query.
81
+ /// @param memory_kind Output underlying memory allocation kind of the memory
82
+ /// object.
83
+ /// @returns #dnnl_success on success and a status describing the error
84
+ /// otherwise.
85
+ dnnl_status_t DNNL_API dnnl_ocl_interop_memory_get_memory_kind(
86
+ const_dnnl_memory_t memory,
87
+ dnnl_ocl_interop_memory_kind_t *memory_kind);
88
+
89
+ /// Returns an OpenCL memory object associated with a memory object.
90
+ ///
91
+ /// @param memory Memory object.
92
+ /// @param mem_object Output OpenCL memory object.
93
+ /// @returns #dnnl_success on success and a status describing the error
94
+ /// otherwise.
95
+ dnnl_status_t DNNL_API dnnl_ocl_interop_memory_get_mem_object(
96
+ const_dnnl_memory_t memory, cl_mem *mem_object);
97
+
98
+ /// Sets OpenCL memory object associated with a memory object.
99
+ ///
100
+ /// For behavioral details, see dnnl_memory_set_data_handle().
101
+ ///
102
+ /// @param memory Memory object.
103
+ /// @param mem_object OpenCL memory object.
104
+ /// @returns #dnnl_success on success and a status describing the error
105
+ /// otherwise.
106
+ dnnl_status_t DNNL_API dnnl_ocl_interop_memory_set_mem_object(
107
+ dnnl_memory_t memory, cl_mem mem_object);
108
+
109
+ /// Retrieves a cache blob ID for the OpenCL device.
110
+ ///
111
+ /// @warning
112
+ /// This API is intended to be used with
113
+ /// #dnnl_ocl_interop_engine_get_cache_blob() and
114
+ /// #dnnl_ocl_interop_engine_create_from_cache_blob(). The returned cache
115
+ /// blob ID can only be used as an ID of the cache blob returned by
116
+ /// #dnnl_ocl_interop_engine_get_cache_blob().
117
+ ///
118
+ /// @note The cache blob ID can be empty (@p size will be 0 and
119
+ /// @p cache_blob_id will be nullptr) if oneDNN doesn't have anything to
120
+ /// put in the cache blob. (#dnnl_ocl_interop_engine_get_cache_blob will
121
+ /// return an empty cache blob).
122
+ ///
123
+ /// @param device An OpenCL device.
124
+ /// @param size Size of the cache blob ID in bytes.
125
+ /// @param cache_blob_id Cache blob id of size @p size. If
126
+ /// the @p cache_blob_id is nullptr then the size of the cache blob ID is
127
+ /// returned in @p size.
128
+ /// @returns #dnnl_success on success and a status describing the error
129
+ /// otherwise.
130
+ dnnl_status_t DNNL_API dnnl_ocl_interop_engine_get_cache_blob_id(
131
+ cl_device_id device, size_t *size, uint8_t *cache_blob_id);
132
+
133
+ /// Retrieves a cache blob associated with the given engine.
134
+ ///
135
+ /// @note The cache blob can be empty (@p size will be 0 and @p cache_blob
136
+ /// will be nullptr) if oneDNN doesn't have anything to put in the cache
137
+ /// blob. It's the user's responsibility to check whether it's empty
138
+ /// prior to passing it to
139
+ /// #dnnl_ocl_interop_engine_create_from_cache_blob().
140
+ ///
141
+ /// @param engine Engine to query for the cache blob.
142
+ /// @param size Size of the cache blob in bytes.
143
+ /// @param cache_blob Cache blob of size @p size. If the @p cache_blob is
144
+ /// nullptr then the size of the cache blob is returned in @p size.
145
+ /// @returns #dnnl_success on success and a status describing the error
146
+ /// otherwise.
147
+ dnnl_status_t DNNL_API dnnl_ocl_interop_engine_get_cache_blob(
148
+ dnnl_engine_t engine, size_t *size, uint8_t *cache_blob);
149
+
150
+ /// Creates an engine from the given cache blob.
151
+ ///
152
+ /// @param engine Output engine.
153
+ /// @param device The OpenCL device that this engine will encapsulate.
154
+ /// @param context The OpenCL context (containing the device) that this
155
+ /// engine will use for all operations.
156
+ /// @returns #dnnl_success on success and a status describing the error
157
+ /// otherwise.
158
+ /// @param size Size of the cache blob in bytes.
159
+ /// @param cache_blob Cache blob of size @p size.
160
+ /// @returns #dnnl_success on success and a status describing the error
161
+ /// otherwise.
162
+ dnnl_status_t DNNL_API dnnl_ocl_interop_engine_create_from_cache_blob(
163
+ dnnl_engine_t *engine, cl_device_id device, cl_context context,
164
+ size_t size, const uint8_t *cache_blob);
165
+
166
+ /// Creates an engine associated with an OpenCL device and an OpenCL context.
167
+ ///
168
+ /// @param engine Output engine.
169
+ /// @param device Underlying OpenCL device to use for the engine.
170
+ /// @param context Underlying OpenCL context to use for the engine.
171
+ /// @returns #dnnl_success on success and a status describing the error
172
+ /// otherwise.
173
+ dnnl_status_t DNNL_API dnnl_ocl_interop_engine_create(
174
+ dnnl_engine_t *engine, cl_device_id device, cl_context context);
175
+
176
+ /// Returns the OpenCL context associated with an engine.
177
+ ///
178
+ /// @param engine Engine to query.
179
+ /// @param context Output underlying OpenCL context of the engine.
180
+ /// @returns #dnnl_success on success and a status describing the error
181
+ /// otherwise.
182
+ dnnl_status_t DNNL_API dnnl_ocl_interop_engine_get_context(
183
+ dnnl_engine_t engine, cl_context *context);
184
+
185
+ /// Returns the OpenCL device associated with an engine.
186
+ ///
187
+ /// @param engine Engine to query.
188
+ /// @param device Output underlying OpenCL device of the engine.
189
+ /// @returns #dnnl_success on success and a status describing the error
190
+ /// otherwise.
191
+ dnnl_status_t DNNL_API dnnl_ocl_interop_get_device(
192
+ dnnl_engine_t engine, cl_device_id *device);
193
+
194
+ /// Creates an execution stream for a given engine associated with
195
+ /// an OpenCL command queue.
196
+ ///
197
+ /// @param stream Output execution stream.
198
+ /// @param engine Engine to create the execution stream on.
199
+ /// @param queue OpenCL command queue to use.
200
+ /// @returns #dnnl_success on success and a status describing the error
201
+ /// otherwise.
202
+ dnnl_status_t DNNL_API dnnl_ocl_interop_stream_create(
203
+ dnnl_stream_t *stream, dnnl_engine_t engine, cl_command_queue queue);
204
+
205
+ /// Returns the OpenCL command queue associated with an execution stream.
206
+ ///
207
+ /// @param stream Execution stream to query.
208
+ /// @param queue Output OpenCL command queue.
209
+ /// @returns #dnnl_success on success and a status describing the error
210
+ /// otherwise.
211
+ dnnl_status_t DNNL_API dnnl_ocl_interop_stream_get_command_queue(
212
+ dnnl_stream_t stream, cl_command_queue *queue);
213
+
214
+ /// Executes computations specified by the primitive in a specified stream and
215
+ /// returns an OpenCL event.
216
+ ///
217
+ /// @param primitive Primitive to execute.
218
+ /// @param stream Stream to use.
219
+ /// @param nargs Number of arguments.
220
+ /// @param args Array of arguments. Each argument is an
221
+ /// <index, #dnnl_memory_t> pair. The index is one of the `DNNL_ARG_*`
222
+ /// values such as `DNNL_ARG_SRC`. Unless runtime shapes are used (see
223
+ /// #DNNL_RUNTIME_DIM_VAL), the memory object must have the same memory
224
+ /// descriptor as that returned by
225
+ /// #dnnl_primitive_desc_query_md(#dnnl_query_exec_arg_md, index).
226
+ /// @param deps A pointer to a vector of size @p ndeps that contains
227
+ /// dependencies.
228
+ /// @param ndeps Number of dependencies.
229
+ /// @param return_event Output event. It's the user's responsibility to
230
+ /// manage lifetime of the event. Can be NULL. When @p stream is in-order
231
+ /// NULL will be returned.
232
+ /// @returns #dnnl_success on success and a status describing the error
233
+ /// otherwise.
234
+ dnnl_status_t DNNL_API dnnl_ocl_interop_primitive_execute(
235
+ const_dnnl_primitive_t primitive, dnnl_stream_t stream, int nargs,
236
+ const dnnl_exec_arg_t *args, const cl_event *deps, int ndeps,
237
+ cl_event *return_event);
238
+
239
+ /// @} dnnl_api_ocl_interop
240
+
241
+ /// @} dnnl_api_interop
242
+
243
+ /// @} dnnl_api
244
+
245
+ #ifdef __cplusplus
246
+ }
247
+ #endif
248
+
249
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_ocl_types.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2021 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_OCL_TYPES_H
18
+ #define ONEAPI_DNNL_DNNL_OCL_TYPES_H
19
+
20
+ #ifdef __cplusplus
21
+ extern "C" {
22
+ #endif
23
+
24
+ /// @addtogroup dnnl_api
25
+ /// @{
26
+
27
+ /// @addtogroup dnnl_api_interop
28
+ /// @{
29
+
30
+ /// @addtogroup dnnl_api_ocl_interop
31
+ /// @{
32
+
33
+ /// Memory allocation kind.
34
+ typedef enum {
35
+ /// USM (device, shared, host, or unknown) memory allocation kind.
36
+ dnnl_ocl_interop_usm,
37
+ /// Buffer memory allocation kind - default.
38
+ dnnl_ocl_interop_buffer,
39
+ } dnnl_ocl_interop_memory_kind_t;
40
+
41
+ /// @} dnnl_api_ocl_interop
42
+
43
+ /// @} dnnl_api_interop
44
+
45
+ /// @} dnnl_api
46
+
47
+ #ifdef __cplusplus
48
+ }
49
+ #endif
50
+
51
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_sycl.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2022 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_SYCL_H
18
+ #define ONEAPI_DNNL_DNNL_SYCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "oneapi/dnnl/dnnl_sycl_types.h"
23
+
24
+ #ifdef __cplusplus
25
+ extern "C" {
26
+ #endif
27
+
28
+ /// @addtogroup dnnl_api
29
+ /// @{
30
+
31
+ /// @addtogroup dnnl_api_interop
32
+ /// @{
33
+
34
+ /// @addtogroup dnnl_api_sycl_interop
35
+ /// @{
36
+
37
+ /// Creates an engine associated with a SYCL device and a SYCL context.
38
+ ///
39
+ /// @param engine Output engine.
40
+ /// @param device Pointer to the SYCL device to use for the engine.
41
+ /// @param context Pointer to the SYCL context to use for the engine.
42
+ /// @returns #dnnl_success on success and a status describing the error
43
+ /// otherwise.
44
+ dnnl_status_t DNNL_API dnnl_sycl_interop_engine_create(
45
+ dnnl_engine_t *engine, const void *device, const void *context);
46
+
47
+ /// Returns the SYCL context associated with an engine.
48
+ ///
49
+ /// @param engine Engine to query.
50
+ /// @param context Pointer to the underlying SYCL context of the engine.
51
+ /// @returns #dnnl_success on success and a status describing the error
52
+ /// otherwise.
53
+ dnnl_status_t DNNL_API dnnl_sycl_interop_engine_get_context(
54
+ dnnl_engine_t engine, void **context);
55
+
56
+ /// Returns the SYCL device associated with an engine.
57
+ ///
58
+ /// @param engine Engine to query.
59
+ /// @param device Pointer to the underlying SYCL device of the engine.
60
+ /// @returns #dnnl_success on success and a status describing the error
61
+ /// otherwise.
62
+ dnnl_status_t DNNL_API dnnl_sycl_interop_engine_get_device(
63
+ dnnl_engine_t engine, void **device);
64
+
65
+ /// Creates a memory object.
66
+ ///
67
+ /// Unless @p handle is equal to DNNL_MEMORY_NONE or DNNL_MEMORY_ALLOCATE, the
68
+ /// constructed memory object will have the underlying buffer set. In this
69
+ /// case, the buffer will be initialized as if:
70
+ /// - dnnl_memory_set_data_handle() had been called, if @p memory_kind is equal
71
+ /// to dnnl_sycl_interop_usm, or
72
+ /// - dnnl_sycl_interop_memory_set_buffer() has been called, if @p memory_kind
73
+ /// is equal to dnnl_sycl_interop_buffer.
74
+ ///
75
+ /// @param memory Output memory object.
76
+ /// @param memory_desc Memory descriptor.
77
+ /// @param engine Engine to use.
78
+ /// @param memory_kind Memory allocation kind to specify the type of handle.
79
+ /// @param handle Handle of the memory buffer to use as an underlying storage.
80
+ /// - A USM pointer to the user-allocated buffer. In this case the library
81
+ /// doesn't own the buffer. Requires @p memory_kind to be equal to
82
+ /// dnnl_sycl_interop_usm.
83
+ /// - A pointer to SYCL buffer. In this case the library doesn't own the
84
+ /// buffer. Requires @p memory_kind be equal to be equal to
85
+ /// dnnl_sycl_interop_buffer.
86
+ /// - The DNNL_MEMORY_ALLOCATE special value. Instructs the library to
87
+ /// allocate the buffer that corresponds to the memory allocation kind
88
+ /// @p memory_kind for the memory object. In this case the library
89
+ /// owns the buffer.
90
+ /// - The DNNL_MEMORY_NONE specific value. Instructs the library to
91
+ /// create memory object without an underlying buffer.
92
+ /// @returns #dnnl_success on success and a status describing the error
93
+ /// otherwise.
94
+ dnnl_status_t DNNL_API dnnl_sycl_interop_memory_create(dnnl_memory_t *memory,
95
+ const_dnnl_memory_desc_t memory_desc, dnnl_engine_t engine,
96
+ dnnl_sycl_interop_memory_kind_t memory_kind, void *handle);
97
+
98
+ /// Returns the memory allocation kind associated with a memory object.
99
+ ///
100
+ /// @param memory Memory to query.
101
+ /// @param memory_kind Output underlying memory allocation kind of the memory
102
+ /// object.
103
+ /// @returns #dnnl_success on success and a status describing the error
104
+ /// otherwise.
105
+ dnnl_status_t DNNL_API dnnl_sycl_interop_memory_get_memory_kind(
106
+ const_dnnl_memory_t memory,
107
+ dnnl_sycl_interop_memory_kind_t *memory_kind);
108
+
109
+ /// Sets a SYCL buffer for a memory object.
110
+ ///
111
+ /// @param memory Memory object.
112
+ /// @param buffer SYCL buffer to be set in the memory object.
113
+ /// @returns #dnnl_success on success and a status describing the error
114
+ /// otherwise.
115
+ dnnl_status_t DNNL_API dnnl_sycl_interop_memory_set_buffer(
116
+ dnnl_memory_t memory, void *buffer);
117
+
118
+ /// Creates an execution stream for a given engine associated with a SYCL
119
+ /// queue.
120
+ ///
121
+ /// @param stream Output execution stream.
122
+ /// @param engine Engine to create the execution stream on.
123
+ /// @param queue SYCL queue to use.
124
+ /// @returns #dnnl_success on success and a status describing the error
125
+ /// otherwise.
126
+ dnnl_status_t DNNL_API dnnl_sycl_interop_stream_create(
127
+ dnnl_stream_t *stream, dnnl_engine_t engine, void *queue);
128
+
129
+ /// Returns the SYCL queue associated with an execution stream.
130
+ ///
131
+ /// @param stream Execution stream to query.
132
+ /// @param queue Output SYCL command queue.
133
+ /// @returns #dnnl_success on success and a status describing the error
134
+ /// otherwise.
135
+ dnnl_status_t DNNL_API dnnl_sycl_interop_stream_get_queue(
136
+ dnnl_stream_t stream, void **queue);
137
+
138
+ /// Executes computations specified by the primitive in a specified stream and
139
+ /// returns a SYCL event.
140
+ ///
141
+ /// @param primitive Primitive to execute.
142
+ /// @param stream Stream to use.
143
+ /// @param nargs Number of arguments.
144
+ /// @param args Array of arguments. Each argument is an
145
+ /// <index, #dnnl_memory_t> pair. The index is one of the `DNNL_ARG_*`
146
+ /// values such as `DNNL_ARG_SRC`. Unless runtime shapes are used (see
147
+ /// #DNNL_RUNTIME_DIM_VAL), the memory object must have the same memory
148
+ /// descriptor as that returned by
149
+ /// #dnnl_primitive_desc_query_md(#dnnl_query_exec_arg_md, index).
150
+ /// @param deps A pointer to std::vector<sycl::event> that contains
151
+ /// dependencies.
152
+ /// @param return_event Output event.
153
+ /// @returns #dnnl_success on success and a status describing the error
154
+ /// otherwise.
155
+ dnnl_status_t DNNL_API dnnl_sycl_interop_primitive_execute(
156
+ const_dnnl_primitive_t primitive, dnnl_stream_t stream, int nargs,
157
+ const dnnl_exec_arg_t *args, const void *deps, void *return_event);
158
+
159
+ /// @} dnnl_api_sycl_interop
160
+
161
+ /// @} dnnl_api_interop
162
+
163
+ /// @} dnnl_api
164
+
165
+ #ifdef __cplusplus
166
+ }
167
+ #endif
168
+
169
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_sycl_types.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2021 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_SYCL_TYPES_H
18
+ #define ONEAPI_DNNL_DNNL_SYCL_TYPES_H
19
+
20
+ #ifdef __cplusplus
21
+ extern "C" {
22
+ #endif
23
+
24
+ /// @addtogroup dnnl_api
25
+ /// @{
26
+
27
+ /// @addtogroup dnnl_api_interop
28
+ /// @{
29
+
30
+ /// @addtogroup dnnl_api_sycl_interop
31
+ /// @{
32
+
33
+ /// Memory allocation kind.
34
+ typedef enum {
35
+ /// USM (device, shared, host, or unknown) memory allocation kind - default.
36
+ dnnl_sycl_interop_usm,
37
+ /// Buffer memory allocation kind.
38
+ dnnl_sycl_interop_buffer,
39
+ } dnnl_sycl_interop_memory_kind_t;
40
+
41
+ /// @} dnnl_api_sycl_interop
42
+
43
+ /// @} dnnl_api_interop
44
+
45
+ /// @} dnnl_api
46
+
47
+ #ifdef __cplusplus
48
+ }
49
+ #endif
50
+
51
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/include/oneapi/dnnl/dnnl_version.h.in ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef ONEAPI_DNNL_DNNL_VERSION_H
18
+ #define ONEAPI_DNNL_DNNL_VERSION_H
19
+
20
+ // clang-format off
21
+
22
+ /// Major version
23
+ #define DNNL_VERSION_MAJOR @DNNL_VERSION_MAJOR@
24
+
25
+ /// Minor version
26
+ #define DNNL_VERSION_MINOR @DNNL_VERSION_MINOR@
27
+
28
+ /// Patch version
29
+ #define DNNL_VERSION_PATCH @DNNL_VERSION_PATCH@
30
+
31
+ /// Git commit hash
32
+ #define DNNL_VERSION_HASH "@DNNL_VERSION_HASH@"
33
+
34
+ // clang-format on
35
+
36
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/batch_normalization_pd.hpp ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2016-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_BATCH_NORMALIZATION_PD_HPP
18
+ #define COMMON_BATCH_NORMALIZATION_PD_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "c_types_map.hpp"
23
+ #include "primitive_desc.hpp"
24
+ #include "utils.hpp"
25
+
26
+ namespace dnnl {
27
+ namespace impl {
28
+
29
+ struct batch_normalization_fwd_pd_t;
30
+
31
+ struct batch_normalization_pd_t : public primitive_desc_t {
32
+ static constexpr auto base_pkind = primitive_kind::batch_normalization;
33
+
34
+ const batch_normalization_desc_t *desc() const { return &desc_; }
35
+ const op_desc_t *op_desc() const override {
36
+ return reinterpret_cast<const op_desc_t *>(this->desc());
37
+ }
38
+
39
+ status_t query(query_t what, int idx, void *result) const override {
40
+ switch (what) {
41
+ case query::prop_kind:
42
+ *(prop_kind_t *)result = desc()->prop_kind;
43
+ break;
44
+ case query::epsilon_f32:
45
+ *(float *)result = desc()->batch_norm_epsilon;
46
+ break;
47
+ case query::flags: *(uint32_t *)result = desc()->flags; break;
48
+ default: return primitive_desc_t::query(what, idx, result);
49
+ }
50
+ return status::success;
51
+ }
52
+
53
+ /* common batch_normalization aux functions */
54
+
55
+ dim_t MB() const { return src_md()->dims[0]; }
56
+ dim_t C() const { return src_md()->dims[1]; }
57
+ dim_t D() const { return ndims() >= 5 ? src_md()->dims[ndims() - 3] : 1; }
58
+ dim_t H() const { return ndims() >= 4 ? src_md()->dims[ndims() - 2] : 1; }
59
+ dim_t W() const { return ndims() >= 3 ? src_md()->dims[ndims() - 1] : 1; }
60
+
61
+ int ndims() const { return src_md()->ndims; }
62
+
63
+ bool stats_is_src() const {
64
+ return desc_.flags & normalization_flags::use_global_stats;
65
+ }
66
+ bool use_scale() const {
67
+ return desc_.flags & normalization_flags::use_scale;
68
+ }
69
+ bool use_shift() const {
70
+ return desc_.flags & normalization_flags::use_shift;
71
+ }
72
+ bool use_global_stats() const {
73
+ return desc_.flags & normalization_flags::use_global_stats;
74
+ }
75
+ bool fuse_norm_relu() const {
76
+ return desc_.flags & normalization_flags::fuse_norm_relu;
77
+ }
78
+ bool fuse_norm_add_relu() const {
79
+ return desc_.flags & normalization_flags::fuse_norm_add_relu;
80
+ }
81
+ bool with_relu_post_op(bool require_nslope_zero = true) const {
82
+ const auto &p = this->attr()->post_ops_;
83
+ const bool nslope_zero_ok
84
+ = IMPLICATION(is_training(), require_nslope_zero);
85
+ return p.len() == 1 && p.entry_[0].is_relu(true, require_nslope_zero)
86
+ && nslope_zero_ok;
87
+ }
88
+
89
+ float alpha() const {
90
+ const auto &p = attr()->post_ops_;
91
+ const bool entry_size_ok = p.entry_.size() > 0;
92
+ assert(entry_size_ok || fuse_norm_relu() || fuse_norm_add_relu());
93
+ if (entry_size_ok) return p.entry_[0].eltwise.alpha;
94
+ return 0.f;
95
+ }
96
+
97
+ bool is_fwd() const {
98
+ return utils::one_of(desc_.prop_kind, prop_kind::forward_training,
99
+ prop_kind::forward_inference);
100
+ }
101
+
102
+ bool is_training() const {
103
+ return desc_.prop_kind == prop_kind::forward_training;
104
+ }
105
+
106
+ bool has_zero_dim_memory() const {
107
+ return memory_desc_wrapper(src_md()).has_zero_dim();
108
+ }
109
+
110
+ protected:
111
+ batch_normalization_desc_t desc_;
112
+ const batch_normalization_fwd_pd_t *hint_fwd_pd_;
113
+
114
+ memory_desc_t src_md_;
115
+ memory_desc_t stat_md_;
116
+ memory_desc_t scaleshift_md_;
117
+
118
+ memory_desc_t ws_md_;
119
+
120
+ batch_normalization_pd_t(const batch_normalization_desc_t *adesc,
121
+ const primitive_attr_t *attr,
122
+ const batch_normalization_fwd_pd_t *hint_fwd_pd)
123
+ : primitive_desc_t(attr, base_pkind)
124
+ , desc_(*adesc)
125
+ , hint_fwd_pd_(hint_fwd_pd)
126
+ , src_md_(desc_.src_desc)
127
+ , stat_md_(desc_.stat_desc)
128
+ , scaleshift_md_(desc_.scaleshift_desc)
129
+ , ws_md_() {}
130
+
131
+ virtual status_t init_default_ws(size_t bits_per_element) {
132
+ const auto src_mdw = memory_desc_wrapper(src_md_);
133
+
134
+ const dim_t nelems = src_mdw.nelems(true);
135
+ const dim_t bits_per_byte = 8;
136
+ const dims_t ws_sz = {
137
+ (dim_t)utils::div_up(nelems * bits_per_element, bits_per_byte)};
138
+ return memory_desc_init_by_tag(
139
+ ws_md_, 1, ws_sz, data_type::u8, format_tag::x);
140
+ }
141
+ };
142
+
143
+ struct batch_normalization_fwd_pd_t : public batch_normalization_pd_t {
144
+ typedef batch_normalization_fwd_pd_t base_class;
145
+ typedef batch_normalization_fwd_pd_t hint_class;
146
+
147
+ arg_usage_t arg_usage(int arg) const override {
148
+ if (arg == DNNL_ARG_SRC) return arg_usage_t::input;
149
+ if (arg == DNNL_ARG_SRC_1 && fuse_norm_add_relu())
150
+ return arg_usage_t::input;
151
+ if (arg == DNNL_ARG_DST) return arg_usage_t::output;
152
+
153
+ if (utils::one_of(arg, DNNL_ARG_MEAN, DNNL_ARG_VARIANCE)) {
154
+ if (stats_is_src()) return arg_usage_t::input;
155
+ if (!stats_is_src() && is_training()) return arg_usage_t::output;
156
+ return arg_usage_t::unused;
157
+ }
158
+
159
+ if (arg == DNNL_ARG_SCALE && use_scale()) return arg_usage_t::input;
160
+ if (arg == DNNL_ARG_SHIFT && use_shift()) return arg_usage_t::input;
161
+
162
+ if (arg == DNNL_ARG_WORKSPACE && !types::is_zero_md(workspace_md()))
163
+ return arg_usage_t::output;
164
+
165
+ return primitive_desc_t::arg_usage(arg);
166
+ }
167
+
168
+ const memory_desc_t *arg_md(
169
+ int arg, bool user_input = false) const override {
170
+ switch (arg) {
171
+ case DNNL_ARG_SRC_1: return dst_md(3);
172
+ case DNNL_ARG_SRC: return src_md(0);
173
+ case DNNL_ARG_DST: return dst_md(0, user_input);
174
+ case DNNL_ARG_MEAN: return stats_is_src() ? src_md(1) : dst_md(1);
175
+ case DNNL_ARG_VARIANCE:
176
+ return stats_is_src() ? src_md(2) : dst_md(2);
177
+ case DNNL_ARG_SCALE:
178
+ case DNNL_ARG_SHIFT: return weights_md(0);
179
+ default: return batch_normalization_pd_t::arg_md(arg);
180
+ }
181
+ }
182
+
183
+ const memory_desc_t *src_md(
184
+ int index = 0, bool user_input = false) const override {
185
+ if (index == 0) return user_input ? &desc()->src_desc : &src_md_;
186
+ if (stats_is_src() && (index == 1 || index == 2)) return &stat_md_;
187
+ return &glob_zero_md;
188
+ }
189
+
190
+ const memory_desc_t *dst_md(
191
+ int index = 0, bool user_input = false) const override {
192
+ if (index == 0) return user_input ? &desc()->dst_desc : &dst_md_;
193
+ if (!stats_is_src() && is_training() && (index == 1 || index == 2))
194
+ return &stat_md_;
195
+ if (fuse_norm_add_relu() && index == 3) return &dst_md_;
196
+ return &glob_zero_md;
197
+ }
198
+
199
+ const memory_desc_t *weights_md(
200
+ int index = 0, bool user_input = false) const override {
201
+ return index == 0 ? &scaleshift_md_ : &glob_zero_md;
202
+ }
203
+
204
+ const memory_desc_t *workspace_md(int index = 0) const override {
205
+ return index == 0 ? &ws_md_ : &glob_zero_md;
206
+ }
207
+
208
+ const memory_desc_t *stat_md() const {
209
+ return stats_is_src() ? src_md(1) : dst_md(1);
210
+ }
211
+
212
+ int n_inputs() const override {
213
+ return 1 + 2 * stats_is_src() + use_scale() + use_shift()
214
+ + fuse_norm_add_relu();
215
+ }
216
+ int n_outputs() const override {
217
+ return 1 + !types::is_zero_md(workspace_md())
218
+ + (2 * (!stats_is_src())) * is_training();
219
+ }
220
+
221
+ protected:
222
+ memory_desc_t dst_md_;
223
+
224
+ batch_normalization_fwd_pd_t(const batch_normalization_desc_t *adesc,
225
+ const primitive_attr_t *attr,
226
+ const batch_normalization_fwd_pd_t *hint_fwd_pd)
227
+ : batch_normalization_pd_t(adesc, attr, hint_fwd_pd)
228
+ , dst_md_(desc_.dst_desc) {}
229
+
230
+ bool set_default_formats_common() {
231
+ return IMPLICATION(dst_md_.format_kind == format_kind::any,
232
+ memory_desc_init_by_md_and_dt(
233
+ dst_md_, src_md_, dst_md_.data_type)
234
+ == status::success);
235
+ }
236
+ bool check_scale_shift_data_type() const {
237
+ return IMPLICATION(use_scale() || use_shift(),
238
+ weights_md()->data_type == data_type::f32);
239
+ }
240
+ };
241
+
242
+ struct batch_normalization_bwd_pd_t : public batch_normalization_pd_t {
243
+ typedef batch_normalization_bwd_pd_t base_class;
244
+ typedef batch_normalization_fwd_pd_t hint_class;
245
+
246
+ arg_usage_t arg_usage(int arg) const override {
247
+ if (utils::one_of(arg, DNNL_ARG_SRC, DNNL_ARG_MEAN, DNNL_ARG_VARIANCE,
248
+ DNNL_ARG_DIFF_DST))
249
+ return arg_usage_t::input;
250
+
251
+ if (arg == DNNL_ARG_SCALE && use_scale()) return arg_usage_t::input;
252
+ if (arg == DNNL_ARG_SHIFT && use_shift()) return arg_usage_t::input;
253
+
254
+ if (arg == DNNL_ARG_WORKSPACE && !types::is_zero_md(workspace_md()))
255
+ return arg_usage_t::input;
256
+
257
+ if (arg == DNNL_ARG_DIFF_SRC) return arg_usage_t::output;
258
+ if (arg == DNNL_ARG_DIFF_SRC_1 && fuse_norm_add_relu())
259
+ return arg_usage_t::output;
260
+
261
+ if (arg == DNNL_ARG_DIFF_SCALE && use_scale())
262
+ return arg_usage_t::output;
263
+ if (arg == DNNL_ARG_DIFF_SHIFT && use_shift())
264
+ return arg_usage_t::output;
265
+ return primitive_desc_t::arg_usage(arg);
266
+ }
267
+
268
+ const memory_desc_t *arg_md(
269
+ int arg, bool user_input = false) const override {
270
+ switch (arg) {
271
+ case DNNL_ARG_SRC: return src_md(0);
272
+ case DNNL_ARG_MEAN: return src_md(1);
273
+ case DNNL_ARG_VARIANCE: return src_md(2);
274
+ case DNNL_ARG_SCALE:
275
+ case DNNL_ARG_SHIFT: return weights_md(0);
276
+ case DNNL_ARG_DIFF_SRC_1: return diff_dst_md(1);
277
+ case DNNL_ARG_DIFF_SRC: return diff_src_md(0);
278
+ case DNNL_ARG_DIFF_DST: return diff_dst_md(0, user_input);
279
+ case DNNL_ARG_DIFF_SCALE:
280
+ case DNNL_ARG_DIFF_SHIFT: return diff_weights_md(0);
281
+ default: return batch_normalization_pd_t::arg_md(arg);
282
+ }
283
+ }
284
+
285
+ const memory_desc_t *src_md(
286
+ int index = 0, bool user_input = false) const override {
287
+ if (index == 0) return user_input ? &desc()->src_desc : &src_md_;
288
+ if (index == 1 || index == 2) return &stat_md_;
289
+ return &glob_zero_md;
290
+ }
291
+ const memory_desc_t *diff_dst_md(
292
+ int index = 0, bool user_input = false) const override {
293
+ if (index == 0)
294
+ return user_input ? &desc()->diff_dst_desc : &diff_dst_md_;
295
+ if (fuse_norm_add_relu() && index == 1)
296
+ return user_input ? &desc()->diff_dst_desc : &diff_dst_md_;
297
+ return &glob_zero_md;
298
+ }
299
+ const memory_desc_t *diff_src_md(
300
+ int index = 0, bool user_input = false) const override {
301
+ if (index == 0)
302
+ return user_input ? &desc()->diff_src_desc : &diff_src_md_;
303
+ return &glob_zero_md;
304
+ }
305
+
306
+ const memory_desc_t *weights_md(
307
+ int index = 0, bool user_input = false) const override {
308
+ return index == 0 ? &scaleshift_md_ : &glob_zero_md;
309
+ }
310
+ const memory_desc_t *diff_weights_md(
311
+ int index = 0, bool user_input = false) const override {
312
+ return index == 0 ? &diff_scaleshift_md_ : &glob_zero_md;
313
+ }
314
+
315
+ const memory_desc_t *workspace_md(int index = 0) const override {
316
+ return index == 0 ? &ws_md_ : &glob_zero_md;
317
+ }
318
+
319
+ const memory_desc_t *stat_md() const { return src_md(1); }
320
+
321
+ int n_inputs() const override {
322
+ return 4 + (!types::is_zero_md(workspace_md())) + use_scale();
323
+ }
324
+ int n_outputs() const override {
325
+ return 1 + fuse_norm_add_relu()
326
+ + (!types::is_zero_md(diff_weights_md()))
327
+ * (use_scale() + use_shift());
328
+ }
329
+
330
+ protected:
331
+ memory_desc_t diff_src_md_;
332
+ memory_desc_t diff_dst_md_;
333
+ memory_desc_t diff_scaleshift_md_;
334
+
335
+ batch_normalization_bwd_pd_t(const batch_normalization_desc_t *adesc,
336
+ const primitive_attr_t *attr,
337
+ const batch_normalization_fwd_pd_t *hint_fwd_pd)
338
+ : batch_normalization_pd_t(adesc, attr, hint_fwd_pd)
339
+ , diff_src_md_(desc_.diff_src_desc)
340
+ , diff_dst_md_(desc_.diff_dst_desc)
341
+ , diff_scaleshift_md_(desc_.diff_scaleshift_desc) {}
342
+
343
+ bool set_default_formats_common() {
344
+ return IMPLICATION(diff_dst_md_.format_kind == format_kind::any,
345
+ memory_desc_init_by_md_and_dt(
346
+ diff_dst_md_, src_md_, diff_dst_md_.data_type)
347
+ == status::success)
348
+ && IMPLICATION(diff_src_md_.format_kind == format_kind::any,
349
+ memory_desc_init_by_md_and_dt(
350
+ diff_src_md_, src_md_, diff_src_md_.data_type)
351
+ == status::success);
352
+ }
353
+
354
+ bool check_scale_shift_data_type() const {
355
+ return IMPLICATION(use_scale() || use_shift(),
356
+ utils::everyone_is(data_type::f32, weights_md()->data_type,
357
+ diff_weights_md()->data_type));
358
+ }
359
+ };
360
+
361
+ } // namespace impl
362
+ } // namespace dnnl
363
+
364
+ #endif
365
+
366
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/bfloat16.hpp ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_BFLOAT16_HPP
18
+ #define COMMON_BFLOAT16_HPP
19
+
20
+ #include <array>
21
+ #include <cmath>
22
+ #include <cstddef>
23
+ #include <cstdint>
24
+ #include <cstdlib>
25
+ #include <limits>
26
+ #include <type_traits>
27
+
28
+ #include "common/bit_cast.hpp"
29
+
30
+ #include "oneapi/dnnl/dnnl.h"
31
+
32
+ namespace dnnl {
33
+ namespace impl {
34
+
35
+ #if DNNL_CPU_RUNTIME != DNNL_RUNTIME_NONE
36
+ struct bfloat16_t;
37
+ bool try_cvt_float_to_bfloat16(bfloat16_t *out, const float *inp);
38
+ #endif
39
+
40
+ struct bfloat16_t {
41
+ uint16_t raw_bits_;
42
+ bfloat16_t() = default;
43
+ constexpr bfloat16_t(uint16_t r, bool) : raw_bits_(r) {}
44
+ bfloat16_t(float f) { (*this) = f; }
45
+
46
+ template <typename IntegerType,
47
+ typename SFINAE = typename std::enable_if<
48
+ std::is_integral<IntegerType>::value>::type>
49
+ bfloat16_t(const IntegerType i)
50
+ : raw_bits_ {convert_bits_of_normal_or_zero(
51
+ utils::bit_cast<uint32_t>(static_cast<float>(i)))} {}
52
+
53
+ bfloat16_t DNNL_API &operator=(float f);
54
+
55
+ template <typename IntegerType,
56
+ typename SFINAE = typename std::enable_if<
57
+ std::is_integral<IntegerType>::value>::type>
58
+ bfloat16_t &operator=(const IntegerType i) {
59
+ // Call the converting constructor that is optimized for integer types,
60
+ // followed by the fast defaulted move-assignment operator.
61
+ return (*this) = bfloat16_t {i};
62
+ }
63
+
64
+ DNNL_API operator float() const;
65
+
66
+ bfloat16_t &operator+=(const float a) {
67
+ (*this) = float {*this} + a;
68
+ return *this;
69
+ }
70
+
71
+ private:
72
+ // Converts the 32 bits of a normal float or zero to the bits of a bfloat16.
73
+ static constexpr uint16_t convert_bits_of_normal_or_zero(
74
+ const uint32_t bits) {
75
+ return static_cast<uint16_t>(
76
+ uint32_t {bits
77
+ + uint32_t {0x7FFFU + (uint32_t {bits >> 16} & 1U)}}
78
+ >> 16);
79
+ }
80
+ };
81
+
82
+ static_assert(sizeof(bfloat16_t) == 2, "bfloat16_t must be 2 bytes");
83
+
84
+ void cvt_float_to_bfloat16(bfloat16_t *out, const float *inp, size_t nelems);
85
+ void cvt_bfloat16_to_float(float *out, const bfloat16_t *inp, size_t nelems);
86
+
87
+ // performs element-by-element sum of inp and add float arrays and stores
88
+ // result to bfloat16 out array with downconversion
89
+ // out[:] = (bfloat16_t)(inp0[:] + inp1[:])
90
+ void add_floats_and_cvt_to_bfloat16(
91
+ bfloat16_t *out, const float *inp0, const float *inp1, size_t nelems);
92
+
93
+ } // namespace impl
94
+ } // namespace dnnl
95
+
96
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/binary_pd.hpp ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_BINARY_PD_HPP
18
+ #define COMMON_BINARY_PD_HPP
19
+
20
+ #include <assert.h>
21
+
22
+ #include "oneapi/dnnl/dnnl.h"
23
+
24
+ #include "c_types_map.hpp"
25
+ #include "primitive_desc.hpp"
26
+ #include "utils.hpp"
27
+
28
+ namespace dnnl {
29
+ namespace impl {
30
+
31
+ struct binary_pd_t : public primitive_desc_t {
32
+ static constexpr auto base_pkind = primitive_kind::binary;
33
+
34
+ typedef binary_pd_t base_class;
35
+ typedef binary_pd_t hint_class;
36
+
37
+ const binary_desc_t *desc() const { return &desc_; }
38
+ const op_desc_t *op_desc() const override {
39
+ return reinterpret_cast<const op_desc_t *>(this->desc());
40
+ }
41
+
42
+ status_t query(query_t what, int idx, void *result) const override {
43
+ switch (what) {
44
+ case query::alg_kind:
45
+ *(alg_kind_t *)result = desc()->alg_kind;
46
+ break;
47
+ default: return primitive_desc_t::query(what, idx, result);
48
+ }
49
+ return status::success;
50
+ }
51
+
52
+ arg_usage_t arg_usage(int arg) const override {
53
+ if (arg == DNNL_ARG_SRC_0 || arg == DNNL_ARG_SRC_1)
54
+ return arg_usage_t::input;
55
+
56
+ if (arg == DNNL_ARG_DST) return arg_usage_t::output;
57
+
58
+ return primitive_desc_t::arg_usage(arg);
59
+ }
60
+
61
+ const memory_desc_t *arg_md(
62
+ int arg, bool user_input = false) const override {
63
+ switch (arg) {
64
+ case DNNL_ARG_SRC_0: return src_md(0);
65
+ case DNNL_ARG_SRC_1: return src_md(1);
66
+ case DNNL_ARG_DST: return dst_md(0, user_input);
67
+ default: return primitive_desc_t::arg_md(arg);
68
+ }
69
+ }
70
+
71
+ const memory_desc_t *src_md(
72
+ int index = 0, bool user_input = false) const override {
73
+ if (index == 0) return user_input ? &desc()->src_desc[0] : &src0_md_;
74
+ if (index == 1) return user_input ? &desc()->src_desc[1] : &src1_md_;
75
+ return &glob_zero_md;
76
+ }
77
+ const memory_desc_t *dst_md(
78
+ int index = 0, bool user_input = false) const override {
79
+ if (index == 0) return user_input ? &desc()->dst_desc : &dst_md_;
80
+ return &glob_zero_md;
81
+ }
82
+
83
+ int n_inputs() const override { return 2 + n_binary_po_inputs(); }
84
+ int n_outputs() const override { return 1; }
85
+
86
+ const dims_t &broadcast_dims() const { return broadcast_dims_; }
87
+
88
+ bool has_zero_dim_memory() const {
89
+ return memory_desc_wrapper(src_md(0)).has_zero_dim();
90
+ }
91
+
92
+ int ndims() const { return memory_desc_wrapper(src_md(0)).ndims(); }
93
+
94
+ bool is_tensor_op() const {
95
+ const memory_desc_wrapper src0_d(src_md(0));
96
+ const memory_desc_wrapper src1_d(src_md(1));
97
+ return src0_d.consistent_with(src1_d);
98
+ }
99
+
100
+ protected:
101
+ binary_desc_t desc_;
102
+
103
+ memory_desc_t src0_md_;
104
+ memory_desc_t src1_md_;
105
+ memory_desc_t dst_md_;
106
+
107
+ dims_t broadcast_dims_;
108
+
109
+ binary_pd_t(const binary_desc_t *adesc, const primitive_attr_t *attr,
110
+ const binary_pd_t *hint_fwd_pd)
111
+ : primitive_desc_t(attr, base_pkind)
112
+ , desc_(*adesc)
113
+ , src0_md_(desc_.src_desc[0])
114
+ , src1_md_(desc_.src_desc[1])
115
+ , dst_md_(desc_.dst_desc) {
116
+ init_broadcast_dims();
117
+ }
118
+
119
+ status_t set_default_params() {
120
+ if (src1_md_.format_kind == format_kind::any) {
121
+ const memory_desc_wrapper src_d(src_md(0));
122
+ if (src_d.is_blocking_desc()) {
123
+ CHECK(memory_desc_init_by_blocking_desc(
124
+ src1_md_, src_d.blocking_desc()));
125
+ }
126
+ }
127
+
128
+ if (dst_md_.format_kind == format_kind::any) {
129
+ const memory_desc_wrapper src_d(src_md(0));
130
+ if (src_d.is_blocking_desc()) {
131
+ CHECK(memory_desc_init_by_blocking_desc(
132
+ dst_md_, src_d.blocking_desc()));
133
+ }
134
+ }
135
+
136
+ return status::success;
137
+ }
138
+
139
+ bool attr_post_ops_ok() const {
140
+ using namespace primitive_kind;
141
+ const auto &p = attr()->post_ops_;
142
+ switch (p.len()) {
143
+ case 0: return true;
144
+ case 1: return p.contain(sum, 0) || p.contain(eltwise, 0);
145
+ case 2: return p.contain(sum, 0) && p.contain(eltwise, 1);
146
+ default: return false;
147
+ }
148
+ }
149
+
150
+ bool attr_scales_ok(const std::vector<int> &supported_args
151
+ = {DNNL_ARG_SRC_0, DNNL_ARG_SRC_1, DNNL_ARG_DST}) const {
152
+ bool ok = attr()->scales_.has_default_values(supported_args);
153
+ for (int arg : supported_args) {
154
+ const auto &mask = attr()->scales_.get(arg).mask_;
155
+ ok = ok && (mask == 0);
156
+ }
157
+ return ok;
158
+ }
159
+
160
+ private:
161
+ void init_broadcast_dims() {
162
+ const dims_t &dims_A = src_md(0)->dims;
163
+ const dims_t &dims_B = src_md(1)->dims;
164
+
165
+ for (int d = 0; d < ndims(); ++d)
166
+ broadcast_dims_[d]
167
+ = (dims_A[d] == dims_B[d] && dims_A[d] != 1) ? 0 : 1;
168
+ }
169
+ };
170
+
171
+ } // namespace impl
172
+ } // namespace dnnl
173
+
174
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/bit_cast.hpp ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2022 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_BIT_CAST_HPP
18
+ #define COMMON_BIT_CAST_HPP
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <type_traits>
23
+
24
+ namespace dnnl {
25
+ namespace impl {
26
+ namespace utils {
27
+
28
+ // Returns a value of type T by reinterpretting the representation of the input
29
+ // value (part of C++20).
30
+ //
31
+ // Provides a safe implementation of type punning.
32
+ //
33
+ // Constraints:
34
+ // - U and T must have the same size
35
+ // - U and T must be trivially copyable
36
+ template <typename T, typename U>
37
+ inline T bit_cast(const U &u) {
38
+ static_assert(sizeof(T) == sizeof(U), "Bit-casting must preserve size.");
39
+ static_assert(std::is_trivial<T>::value, "T must be trivially copyable.");
40
+ static_assert(std::is_trivial<U>::value, "U must be trivially copyable.");
41
+
42
+ T t;
43
+ // Since bit_cast is used in SYCL kernels it cannot use std::memcpy as it
44
+ // can be implemented as @llvm.objectsize.* + __memcpy_chk for Release
45
+ // builds which cannot be translated to SPIR-V.
46
+ uint8_t *t_ptr = reinterpret_cast<uint8_t *>(&t);
47
+ const uint8_t *u_ptr = reinterpret_cast<const uint8_t *>(&u);
48
+ for (size_t i = 0; i < sizeof(U); i++)
49
+ t_ptr[i] = u_ptr[i];
50
+ return t;
51
+ }
52
+
53
+ } // namespace utils
54
+ } // namespace impl
55
+ } // namespace dnnl
56
+
57
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/c_types_map.hpp ADDED
@@ -0,0 +1,1966 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2016-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_C_TYPES_MAP_HPP
18
+ #define COMMON_C_TYPES_MAP_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl_types.h"
21
+
22
+ // These aliases should be in the global namespace as they are intended
23
+ // to give names that better reflects the meaning of the entities
24
+ using primitive_iface_t = dnnl_primitive;
25
+ using primitive_desc_iface_t = dnnl_primitive_desc;
26
+
27
+ namespace dnnl {
28
+ namespace impl {
29
+
30
+ // TODO: autogenerate this
31
+
32
+ using dim_t = dnnl_dim_t;
33
+ using dims_t = dnnl_dims_t;
34
+ using stride_t = dnnl_dim_t;
35
+ using strides_t = dnnl_dims_t;
36
+
37
+ #ifdef DNNL_STATUS_NODISCARD
38
+ // nodiscard is not allowed in type aliases
39
+ using status_t __attribute__((warn_unused_result)) = dnnl_status_t;
40
+ #else
41
+ using status_t = dnnl_status_t;
42
+ #endif
43
+ namespace status {
44
+ const status_t success = dnnl_success;
45
+ const status_t out_of_memory = dnnl_out_of_memory;
46
+ const status_t invalid_arguments = dnnl_invalid_arguments;
47
+ const status_t unimplemented = dnnl_unimplemented;
48
+ const status_t last_impl_reached = dnnl_last_impl_reached;
49
+ const status_t runtime_error = dnnl_runtime_error;
50
+ const status_t not_required = dnnl_not_required;
51
+ } // namespace status
52
+
53
+ using prop_kind_t = dnnl_prop_kind_t;
54
+ namespace prop_kind {
55
+ const prop_kind_t undef = dnnl_prop_kind_undef;
56
+ const prop_kind_t forward_training = dnnl_forward_training;
57
+ const prop_kind_t forward_inference = dnnl_forward_inference;
58
+ const prop_kind_t forward = dnnl_forward;
59
+ const prop_kind_t backward = dnnl_backward;
60
+ const prop_kind_t backward_data = dnnl_backward_data;
61
+ const prop_kind_t backward_weights = dnnl_backward_weights;
62
+ const prop_kind_t backward_bias = dnnl_backward_bias;
63
+ } // namespace prop_kind
64
+
65
+ using alg_kind_t = dnnl_alg_kind_t;
66
+ namespace alg_kind {
67
+ const alg_kind_t undef = dnnl_alg_kind_undef;
68
+ const alg_kind_t convolution_auto = dnnl_convolution_auto;
69
+ const alg_kind_t convolution_direct = dnnl_convolution_direct;
70
+ const alg_kind_t convolution_winograd = dnnl_convolution_winograd;
71
+ const alg_kind_t deconvolution_direct = dnnl_deconvolution_direct;
72
+ const alg_kind_t deconvolution_winograd = dnnl_deconvolution_winograd;
73
+ const alg_kind_t eltwise_relu = dnnl_eltwise_relu;
74
+ const alg_kind_t eltwise_tanh = dnnl_eltwise_tanh;
75
+ const alg_kind_t eltwise_elu = dnnl_eltwise_elu;
76
+ const alg_kind_t eltwise_square = dnnl_eltwise_square;
77
+ const alg_kind_t eltwise_abs = dnnl_eltwise_abs;
78
+ const alg_kind_t eltwise_sqrt = dnnl_eltwise_sqrt;
79
+ const alg_kind_t eltwise_swish = dnnl_eltwise_swish;
80
+ const alg_kind_t eltwise_linear = dnnl_eltwise_linear;
81
+ const alg_kind_t eltwise_soft_relu = dnnl_eltwise_soft_relu;
82
+ const alg_kind_t eltwise_logistic = dnnl_eltwise_logistic;
83
+ const alg_kind_t eltwise_mish = dnnl_eltwise_mish;
84
+ const alg_kind_t eltwise_exp = dnnl_eltwise_exp;
85
+ const alg_kind_t eltwise_log = dnnl_eltwise_log;
86
+ const alg_kind_t eltwise_clip = dnnl_eltwise_clip;
87
+ const alg_kind_t eltwise_clip_v2 = dnnl_eltwise_clip_v2;
88
+ const alg_kind_t eltwise_pow = dnnl_eltwise_pow;
89
+ const alg_kind_t eltwise_gelu_tanh = dnnl_eltwise_gelu_tanh;
90
+ const alg_kind_t eltwise_gelu_erf = dnnl_eltwise_gelu_erf;
91
+ const alg_kind_t eltwise_hardswish = dnnl_eltwise_hardswish;
92
+ const alg_kind_t eltwise_hardsigmoid = dnnl_eltwise_hardsigmoid;
93
+ const alg_kind_t eltwise_relu_use_dst_for_bwd
94
+ = dnnl_eltwise_relu_use_dst_for_bwd;
95
+ const alg_kind_t eltwise_tanh_use_dst_for_bwd
96
+ = dnnl_eltwise_tanh_use_dst_for_bwd;
97
+ const alg_kind_t eltwise_elu_use_dst_for_bwd = dnnl_eltwise_elu_use_dst_for_bwd;
98
+ const alg_kind_t eltwise_sqrt_use_dst_for_bwd
99
+ = dnnl_eltwise_sqrt_use_dst_for_bwd;
100
+ const alg_kind_t eltwise_logistic_use_dst_for_bwd
101
+ = dnnl_eltwise_logistic_use_dst_for_bwd;
102
+ const alg_kind_t eltwise_exp_use_dst_for_bwd = dnnl_eltwise_exp_use_dst_for_bwd;
103
+ const alg_kind_t eltwise_clip_v2_use_dst_for_bwd
104
+ = dnnl_eltwise_clip_v2_use_dst_for_bwd;
105
+ const alg_kind_t eltwise_round = dnnl_eltwise_round;
106
+ const alg_kind_t pooling_max = dnnl_pooling_max;
107
+ const alg_kind_t pooling_avg_include_padding = dnnl_pooling_avg_include_padding;
108
+ const alg_kind_t pooling_avg_exclude_padding = dnnl_pooling_avg_exclude_padding;
109
+ const alg_kind_t lrn_across_channels = dnnl_lrn_across_channels;
110
+ const alg_kind_t lrn_within_channel = dnnl_lrn_within_channel;
111
+ const alg_kind_t vanilla_rnn = dnnl_vanilla_rnn;
112
+ const alg_kind_t vanilla_lstm = dnnl_vanilla_lstm;
113
+ const alg_kind_t vanilla_gru = dnnl_vanilla_gru;
114
+ const alg_kind_t lbr_gru = dnnl_lbr_gru;
115
+ const alg_kind_t vanilla_augru = dnnl_vanilla_augru;
116
+ const alg_kind_t lbr_augru = dnnl_lbr_augru;
117
+ const alg_kind_t binary_add = dnnl_binary_add;
118
+ const alg_kind_t binary_mul = dnnl_binary_mul;
119
+ const alg_kind_t binary_max = dnnl_binary_max;
120
+ const alg_kind_t binary_min = dnnl_binary_min;
121
+ const alg_kind_t binary_div = dnnl_binary_div;
122
+ const alg_kind_t binary_sub = dnnl_binary_sub;
123
+ const alg_kind_t binary_ge = dnnl_binary_ge;
124
+ const alg_kind_t binary_gt = dnnl_binary_gt;
125
+ const alg_kind_t binary_le = dnnl_binary_le;
126
+ const alg_kind_t binary_lt = dnnl_binary_lt;
127
+ const alg_kind_t binary_eq = dnnl_binary_eq;
128
+ const alg_kind_t binary_ne = dnnl_binary_ne;
129
+ const alg_kind_t resampling_nearest = dnnl_resampling_nearest;
130
+ const alg_kind_t resampling_linear = dnnl_resampling_linear;
131
+ const alg_kind_t reduction_max = dnnl_reduction_max;
132
+ const alg_kind_t reduction_min = dnnl_reduction_min;
133
+ const alg_kind_t reduction_sum = dnnl_reduction_sum;
134
+ const alg_kind_t reduction_mul = dnnl_reduction_mul;
135
+ const alg_kind_t reduction_mean = dnnl_reduction_mean;
136
+ const alg_kind_t reduction_norm_lp_max = dnnl_reduction_norm_lp_max;
137
+ const alg_kind_t reduction_norm_lp_sum = dnnl_reduction_norm_lp_sum;
138
+ const alg_kind_t reduction_norm_lp_power_p_max
139
+ = dnnl_reduction_norm_lp_power_p_max;
140
+ const alg_kind_t reduction_norm_lp_power_p_sum
141
+ = dnnl_reduction_norm_lp_power_p_sum;
142
+ const alg_kind_t softmax_accurate = dnnl_softmax_accurate;
143
+ const alg_kind_t softmax_log = dnnl_softmax_log;
144
+ } // namespace alg_kind
145
+
146
+ using data_type_t = dnnl_data_type_t;
147
+ namespace data_type {
148
+ const data_type_t undef = dnnl_data_type_undef;
149
+ const data_type_t f16 = dnnl_f16;
150
+ const data_type_t bf16 = dnnl_bf16;
151
+ const data_type_t f32 = dnnl_f32;
152
+ const data_type_t f64 = dnnl_f64;
153
+ const data_type_t s32 = dnnl_s32;
154
+ const data_type_t s8 = dnnl_s8;
155
+ const data_type_t u8 = dnnl_u8;
156
+ const data_type_t boolean = dnnl_boolean;
157
+ const data_type_t data_type_max = dnnl_data_type_max;
158
+
159
+ // Not exposed through API as all current uses are internal only
160
+ const data_type_t tf32 = static_cast<data_type_t>(1 << 8);
161
+
162
+ } // namespace data_type
163
+
164
+ using fpmath_mode_t = dnnl_fpmath_mode_t;
165
+ namespace fpmath_mode {
166
+ const fpmath_mode_t strict = dnnl_fpmath_mode_strict;
167
+ const fpmath_mode_t bf16 = dnnl_fpmath_mode_bf16;
168
+ const fpmath_mode_t f16 = dnnl_fpmath_mode_f16;
169
+ const fpmath_mode_t tf32 = dnnl_fpmath_mode_tf32;
170
+ const fpmath_mode_t any = dnnl_fpmath_mode_any;
171
+ } // namespace fpmath_mode
172
+
173
+ using scratchpad_mode_t = dnnl_scratchpad_mode_t;
174
+ namespace scratchpad_mode {
175
+ const scratchpad_mode_t library = dnnl_scratchpad_mode_library;
176
+ const scratchpad_mode_t user = dnnl_scratchpad_mode_user;
177
+ } // namespace scratchpad_mode
178
+
179
+ #ifdef DNNL_EXPERIMENTAL_SPARSE
180
+ using sparse_encoding_t = dnnl_sparse_encoding_t;
181
+ namespace sparse_encoding {
182
+ const sparse_encoding_t undef = dnnl_sparse_encoding_undef;
183
+ const sparse_encoding_t csr = dnnl_csr;
184
+ } // namespace sparse_encoding
185
+ #else
186
+ // Declare dummy values to avoid guarding internal implementation.
187
+ using sparse_encoding_t = int;
188
+ namespace sparse_encoding {
189
+ const sparse_encoding_t undef = 0;
190
+ const sparse_encoding_t csr = 1;
191
+ } // namespace sparse_encoding
192
+ #endif
193
+
194
+ using format_kind_t = dnnl_format_kind_t;
195
+ namespace format_kind {
196
+ const format_kind_t undef = dnnl_format_kind_undef;
197
+ const format_kind_t any = dnnl_format_kind_any;
198
+ const format_kind_t blocked = dnnl_blocked;
199
+ const format_kind_t opaque = dnnl_format_kind_opaque;
200
+ #ifdef DNNL_EXPERIMENTAL_SPARSE
201
+ const format_kind_t sparse = dnnl_format_kind_sparse;
202
+ #else
203
+ const format_kind_t sparse = dnnl_format_kind_undef;
204
+ #endif
205
+
206
+ // Internal only format kinds.
207
+ const format_kind_t internal_only_start = (format_kind_t)(1 << 8);
208
+ const format_kind_t wino = internal_only_start;
209
+ const format_kind_t rnn_packed = (format_kind_t)(internal_only_start + 1);
210
+ } // namespace format_kind
211
+
212
+ #ifdef DNNL_EXPERIMENTAL_PROFILING
213
+ using profiling_data_kind_t = dnnl_profiling_data_kind_t;
214
+ namespace profiling_data_kind {
215
+ const profiling_data_kind_t undef = dnnl_profiling_data_kind_undef;
216
+ const profiling_data_kind_t time = dnnl_profiling_data_kind_time;
217
+ #else
218
+ using profiling_data_kind_t = int;
219
+ namespace profiling_data_kind {
220
+ const profiling_data_kind_t undef = 0;
221
+ const profiling_data_kind_t time = 1;
222
+ #endif
223
+ // Internal only data kinds.
224
+ const profiling_data_kind_t internal_only_start
225
+ = (profiling_data_kind_t)(1 << 8);
226
+ const profiling_data_kind_t cycles
227
+ = (profiling_data_kind_t)(internal_only_start + 1);
228
+ } // namespace profiling_data_kind
229
+
230
+ using format_tag_t = dnnl_format_tag_t;
231
+ namespace format_tag {
232
+ const format_tag_t undef = dnnl_format_tag_undef;
233
+ const format_tag_t any = dnnl_format_tag_any;
234
+ const format_tag_t a = dnnl_a;
235
+ const format_tag_t ab = dnnl_ab;
236
+ const format_tag_t abc = dnnl_abc;
237
+ const format_tag_t abcd = dnnl_abcd;
238
+ const format_tag_t abcde = dnnl_abcde;
239
+ const format_tag_t abcdef = dnnl_abcdef;
240
+ const format_tag_t abcdefg = dnnl_abcdefg;
241
+ const format_tag_t abcdefgh = dnnl_abcdefgh;
242
+ const format_tag_t abcdefghi = dnnl_abcdefghi;
243
+ const format_tag_t abcdefghij = dnnl_abcdefghij;
244
+ const format_tag_t abcdefghijk = dnnl_abcdefghijk;
245
+ const format_tag_t abcdefghijkl = dnnl_abcdefghijkl;
246
+ const format_tag_t abcdefghijlk = dnnl_abcdefghijlk;
247
+ const format_tag_t abcdefghikj = dnnl_abcdefghikj;
248
+ const format_tag_t abcdefghji = dnnl_abcdefghji;
249
+ const format_tag_t abcdefgih = dnnl_abcdefgih;
250
+ const format_tag_t abcdefhg = dnnl_abcdefhg;
251
+ const format_tag_t abcdegf = dnnl_abcdegf;
252
+ const format_tag_t abcdfe = dnnl_abcdfe;
253
+ const format_tag_t abced = dnnl_abced;
254
+ const format_tag_t abdc = dnnl_abdc;
255
+ const format_tag_t acbd = dnnl_acbd;
256
+ const format_tag_t abdec = dnnl_abdec;
257
+ const format_tag_t abdfce = dnnl_abdfce;
258
+ const format_tag_t acb = dnnl_acb;
259
+ const format_tag_t acbde = dnnl_acbde;
260
+ const format_tag_t acbdef = dnnl_acbdef;
261
+ const format_tag_t abdefc = dnnl_abdefc;
262
+ const format_tag_t acdb = dnnl_acdb;
263
+ const format_tag_t acdeb = dnnl_acdeb;
264
+ const format_tag_t adbc = dnnl_adbc;
265
+ const format_tag_t ba = dnnl_ba;
266
+ const format_tag_t bac = dnnl_bac;
267
+ const format_tag_t bacd = dnnl_bacd;
268
+ const format_tag_t bca = dnnl_bca;
269
+ const format_tag_t bcda = dnnl_bcda;
270
+ const format_tag_t bcdea = dnnl_bcdea;
271
+ const format_tag_t bacde = dnnl_bacde;
272
+ const format_tag_t cab = dnnl_cab;
273
+ const format_tag_t cba = dnnl_cba;
274
+ const format_tag_t cdab = dnnl_cdab;
275
+ const format_tag_t cdba = dnnl_cdba;
276
+ const format_tag_t dcab = dnnl_dcab;
277
+ const format_tag_t cdeab = dnnl_cdeab;
278
+ const format_tag_t cdeba = dnnl_cdeba;
279
+ const format_tag_t decab = dnnl_decab;
280
+ const format_tag_t defcab = dnnl_defcab;
281
+ const format_tag_t AB16b16a = dnnl_AB16b16a;
282
+ const format_tag_t AB16b32a = dnnl_AB16b32a;
283
+ const format_tag_t AB16b48a = dnnl_AB16b48a;
284
+ const format_tag_t AB16b64a = dnnl_AB16b64a;
285
+ const format_tag_t AB8b16a2b = dnnl_AB8b16a2b;
286
+ const format_tag_t AB8b32a2b = dnnl_AB8b32a2b;
287
+ const format_tag_t AB8b64a2b = dnnl_AB8b64a2b;
288
+ const format_tag_t AB4b8a4b = dnnl_AB4b8a4b;
289
+ const format_tag_t AB4b16a4b = dnnl_AB4b16a4b;
290
+ const format_tag_t AB4b24a4b = dnnl_AB4b24a4b;
291
+ const format_tag_t AB4b32a4b = dnnl_AB4b32a4b;
292
+ const format_tag_t AB4b64a4b = dnnl_AB4b64a4b;
293
+ const format_tag_t AB32a16b = dnnl_AB32a16b;
294
+ const format_tag_t AB32a32b = dnnl_AB32a32b;
295
+ const format_tag_t AB48a16b = dnnl_AB48a16b;
296
+ const format_tag_t AB48a32b = dnnl_AB48a32b;
297
+ const format_tag_t BA4b8a8b2a = dnnl_BA4b8a8b2a;
298
+ const format_tag_t BA4b8a8b4a = dnnl_BA4b8a8b4a;
299
+ const format_tag_t BA4b8a16b2a = dnnl_BA4b8a16b2a;
300
+ const format_tag_t BA4b8a16b4a = dnnl_BA4b8a16b4a;
301
+ const format_tag_t aBC32b16c = dnnl_aBC32b16c;
302
+ const format_tag_t aBC32b32c = dnnl_aBC32b32c;
303
+ const format_tag_t aBC48b16c = dnnl_aBC48b16c;
304
+ const format_tag_t aBC48b32c = dnnl_aBC48b32c;
305
+ const format_tag_t aCB4c8b8c2b = dnnl_aCB4c8b8c2b;
306
+ const format_tag_t aCB4c8b8c4b = dnnl_aCB4c8b8c4b;
307
+ const format_tag_t aCB4c8b16c2b = dnnl_aCB4c8b16c2b;
308
+ const format_tag_t aCB4c8b16c4b = dnnl_aCB4c8b16c4b;
309
+ const format_tag_t AB16b16a4b = dnnl_AB16b16a4b;
310
+ const format_tag_t AB16b32a4b = dnnl_AB16b32a4b;
311
+ const format_tag_t AB16b48a4b = dnnl_AB16b48a4b;
312
+ const format_tag_t AB16b64a4b = dnnl_AB16b64a4b;
313
+ const format_tag_t AB16b16a2b = dnnl_AB16b16a2b;
314
+ const format_tag_t AB16b32a2b = dnnl_AB16b32a2b;
315
+ const format_tag_t AB16b48a2b = dnnl_AB16b48a2b;
316
+ const format_tag_t AB16b64a2b = dnnl_AB16b64a2b;
317
+ const format_tag_t BA16a16b = dnnl_BA16a16b;
318
+ const format_tag_t BA16a32b = dnnl_BA16a32b;
319
+ const format_tag_t BA16a48b = dnnl_BA16a48b;
320
+ const format_tag_t BA16a64b = dnnl_BA16a64b;
321
+ const format_tag_t BA16a16b2a = dnnl_BA16a16b2a;
322
+ const format_tag_t BA16a32b2a = dnnl_BA16a32b2a;
323
+ const format_tag_t BA16a48b2a = dnnl_BA16a48b2a;
324
+ const format_tag_t BA16a64b2a = dnnl_BA16a64b2a;
325
+ const format_tag_t BA16a16b4a = dnnl_BA16a16b4a;
326
+ const format_tag_t BA16a32b4a = dnnl_BA16a32b4a;
327
+ const format_tag_t BA16a48b4a = dnnl_BA16a48b4a;
328
+ const format_tag_t BA16a64b4a = dnnl_BA16a64b4a;
329
+ const format_tag_t aCB16b16c = dnnl_aCB16b16c;
330
+ const format_tag_t aCB16b32c = dnnl_aCB16b32c;
331
+ const format_tag_t aCB16b48c = dnnl_aCB16b48c;
332
+ const format_tag_t aCB16b64c = dnnl_aCB16b64c;
333
+ const format_tag_t aCB16b16c2b = dnnl_aCB16b16c2b;
334
+ const format_tag_t aCB16b32c2b = dnnl_aCB16b32c2b;
335
+ const format_tag_t aCB16b48c2b = dnnl_aCB16b48c2b;
336
+ const format_tag_t aCB16b64c2b = dnnl_aCB16b64c2b;
337
+ const format_tag_t aCB16b16c4b = dnnl_aCB16b16c4b;
338
+ const format_tag_t aCB16b32c4b = dnnl_aCB16b32c4b;
339
+ const format_tag_t aCB16b48c4b = dnnl_aCB16b48c4b;
340
+ const format_tag_t aCB16b64c4b = dnnl_aCB16b64c4b;
341
+
342
+ const format_tag_t Abc16a = dnnl_Abc16a;
343
+ const format_tag_t ABc16a16b = dnnl_ABc16a16b;
344
+ const format_tag_t ABc4a2b = dnnl_ABc4a2b;
345
+ const format_tag_t ABc4a4b = dnnl_ABc4a4b;
346
+ const format_tag_t aBc16b = dnnl_aBc16b;
347
+ const format_tag_t aBc32b = dnnl_aBc32b;
348
+ const format_tag_t ABc16b16a = dnnl_ABc16b16a;
349
+ const format_tag_t ABc16b32a = dnnl_ABc16b32a;
350
+ const format_tag_t ABc16b48a = dnnl_ABc16b48a;
351
+ const format_tag_t ABc16b64a = dnnl_ABc16b64a;
352
+ const format_tag_t Abc4a = dnnl_Abc4a;
353
+ const format_tag_t aBc4b = dnnl_aBc4b;
354
+ const format_tag_t ABc4b8a4b = dnnl_ABc4b8a4b;
355
+ const format_tag_t ABc4b16a4b = dnnl_ABc4b16a4b;
356
+ const format_tag_t ABc4b24a4b = dnnl_ABc4b24a4b;
357
+ const format_tag_t ABc4b32a4b = dnnl_ABc4b32a4b;
358
+ const format_tag_t ABc4b64a4b = dnnl_ABc4b64a4b;
359
+ const format_tag_t ABc2b8a4b = dnnl_ABc2b8a4b;
360
+ const format_tag_t ABc16b16a4b = dnnl_ABc16b16a4b;
361
+ const format_tag_t ABc16b32a4b = dnnl_ABc16b32a4b;
362
+ const format_tag_t ABc16b48a4b = dnnl_ABc16b48a4b;
363
+ const format_tag_t ABc16b64a4b = dnnl_ABc16b64a4b;
364
+ const format_tag_t ABc16b16a2b = dnnl_ABc16b16a2b;
365
+ const format_tag_t ABc16b32a2b = dnnl_ABc16b32a2b;
366
+ const format_tag_t ABc16b48a2b = dnnl_ABc16b48a2b;
367
+ const format_tag_t ABc16b64a2b = dnnl_ABc16b64a2b;
368
+ const format_tag_t ABc16a16b2a = dnnl_ABc16a16b2a;
369
+ const format_tag_t ABc4b4a = dnnl_ABc4b4a;
370
+ const format_tag_t ABc8a16b2a = dnnl_ABc8a16b2a;
371
+ const format_tag_t BAc8a16b2a = dnnl_BAc8a16b2a;
372
+ const format_tag_t ABc8a8b = dnnl_ABc8a8b;
373
+ const format_tag_t ABc8a2b = dnnl_ABc8a2b;
374
+ const format_tag_t ABc8a4b = dnnl_ABc8a4b;
375
+ const format_tag_t aBc8b = dnnl_aBc8b;
376
+ const format_tag_t ABc8b16a2b = dnnl_ABc8b16a2b;
377
+ const format_tag_t ABc8b32a2b = dnnl_ABc8b32a2b;
378
+ const format_tag_t ABc8b64a2b = dnnl_ABc8b64a2b;
379
+ const format_tag_t ABc8b8a = dnnl_ABc8b8a;
380
+ const format_tag_t Abcd16a = dnnl_Abcd16a;
381
+ const format_tag_t Abcd8a = dnnl_Abcd8a;
382
+ const format_tag_t Abcd32a = dnnl_Abcd32a;
383
+ const format_tag_t ABcd16a16b = dnnl_ABcd16a16b;
384
+ const format_tag_t aBcd16b = dnnl_aBcd16b;
385
+ const format_tag_t aBcd32b = dnnl_aBcd32b;
386
+ const format_tag_t ABcd16b16a = dnnl_ABcd16b16a;
387
+ const format_tag_t ABcd16b32a = dnnl_ABcd16b32a;
388
+ const format_tag_t ABcd16b48a = dnnl_ABcd16b48a;
389
+ const format_tag_t ABcd16b64a = dnnl_ABcd16b64a;
390
+ const format_tag_t aBCd16b16c = dnnl_aBCd16b16c;
391
+ const format_tag_t aBCd16c16b = dnnl_aBCd16c16b;
392
+ const format_tag_t Abcd4a = dnnl_Abcd4a;
393
+ const format_tag_t aBcd4b = dnnl_aBcd4b;
394
+ const format_tag_t ABcd4b8a4b = dnnl_ABcd4b8a4b;
395
+ const format_tag_t ABcd4b16a4b = dnnl_ABcd4b16a4b;
396
+ const format_tag_t ABcd4b24a4b = dnnl_ABcd4b24a4b;
397
+ const format_tag_t ABcd4b32a4b = dnnl_ABcd4b32a4b;
398
+ const format_tag_t ABcd4b64a4b = dnnl_ABcd4b64a4b;
399
+ const format_tag_t ABcd16b16a4b = dnnl_ABcd16b16a4b;
400
+ const format_tag_t ABcd16b32a4b = dnnl_ABcd16b32a4b;
401
+ const format_tag_t ABcd16b48a4b = dnnl_ABcd16b48a4b;
402
+ const format_tag_t ABcd16b64a4b = dnnl_ABcd16b64a4b;
403
+ const format_tag_t ABcd16b16a2b = dnnl_ABcd16b16a2b;
404
+ const format_tag_t ABcd16b32a2b = dnnl_ABcd16b32a2b;
405
+ const format_tag_t ABcd16b48a2b = dnnl_ABcd16b48a2b;
406
+ const format_tag_t ABcd16b64a2b = dnnl_ABcd16b64a2b;
407
+ const format_tag_t ABcd16a16b2a = dnnl_ABcd16a16b2a;
408
+ const format_tag_t ABcde16a16b2a = dnnl_ABcde16a16b2a;
409
+ const format_tag_t ABcd4b4a = dnnl_ABcd4b4a;
410
+ const format_tag_t ABcd4a2b = dnnl_ABcd4a2b;
411
+ const format_tag_t ABcd4a4b = dnnl_ABcd4a4b;
412
+ const format_tag_t aBCd4c16b4c = dnnl_aBCd4c16b4c;
413
+ const format_tag_t aBCd2c8b4c = dnnl_aBCd2c8b4c;
414
+ const format_tag_t aBCd16c16b4c = dnnl_aBCd16c16b4c;
415
+ const format_tag_t aBCd16c16b2c = dnnl_aBCd16c16b2c;
416
+ const format_tag_t aBCd16b16c2b = dnnl_aBCd16b16c2b;
417
+ const format_tag_t aBCd4c4b = dnnl_aBCd4c4b;
418
+ const format_tag_t aBCd4b4c = dnnl_aBCd4b4c;
419
+ const format_tag_t ABcd8a16b2a = dnnl_ABcd8a16b2a;
420
+ const format_tag_t BAcd8a16b2a = dnnl_BAcd8a16b2a;
421
+ const format_tag_t ABcd8a8b = dnnl_ABcd8a8b;
422
+ const format_tag_t ABcd8a4b = dnnl_ABcd8a4b;
423
+ const format_tag_t ABcd8a2b = dnnl_ABcd8a2b;
424
+ const format_tag_t aBcd8b = dnnl_aBcd8b;
425
+ const format_tag_t ABcd8b16a2b = dnnl_ABcd8b16a2b;
426
+ const format_tag_t ABcd8b32a2b = dnnl_ABcd8b32a2b;
427
+ const format_tag_t ABcd8b64a2b = dnnl_ABcd8b64a2b;
428
+ const format_tag_t ABcd2b8a4b = dnnl_ABcd2b8a4b;
429
+ const format_tag_t aBCd8b16c2b = dnnl_aBCd8b16c2b;
430
+ const format_tag_t aCBd8b16c2b = dnnl_aCBd8b16c2b;
431
+ const format_tag_t aBCd2c8b16c2b = dnnl_aBCd2c8b16c2b;
432
+ const format_tag_t ABcd8b8a = dnnl_ABcd8b8a;
433
+ const format_tag_t aBCd8b8c = dnnl_aBCd8b8c;
434
+ const format_tag_t aBCd8b2c = dnnl_aBCd8b2c;
435
+ const format_tag_t aBCd8b4c = dnnl_aBCd8b4c;
436
+ const format_tag_t aBCd8c16b2c = dnnl_aBCd8c16b2c;
437
+ const format_tag_t aBCd8c8b = dnnl_aBCd8c8b;
438
+ const format_tag_t Abcde16a = dnnl_Abcde16a;
439
+ const format_tag_t Abcde32a = dnnl_Abcde32a;
440
+ const format_tag_t ABcde16a16b = dnnl_ABcde16a16b;
441
+ const format_tag_t aBcde16b = dnnl_aBcde16b;
442
+ const format_tag_t aBcde32b = dnnl_aBcde32b;
443
+ const format_tag_t ABcde16b16a = dnnl_ABcde16b16a;
444
+ const format_tag_t ABcde16b32a = dnnl_ABcde16b32a;
445
+ const format_tag_t ABcde16b48a = dnnl_ABcde16b48a;
446
+ const format_tag_t ABcde16b64a = dnnl_ABcde16b64a;
447
+ const format_tag_t aBCde16b16c = dnnl_aBCde16b16c;
448
+ const format_tag_t aBCde16c16b = dnnl_aBCde16c16b;
449
+ const format_tag_t aBCde2c8b4c = dnnl_aBCde2c8b4c;
450
+ const format_tag_t Abcde4a = dnnl_Abcde4a;
451
+ const format_tag_t aBcde4b = dnnl_aBcde4b;
452
+ const format_tag_t ABcde4b4a = dnnl_ABcde4b4a;
453
+ const format_tag_t ABcde4a2b = dnnl_ABcde4a2b;
454
+ const format_tag_t ABcde4a4b = dnnl_ABcde4a4b;
455
+ const format_tag_t aBCde4b4c = dnnl_aBCde4b4c;
456
+ const format_tag_t aBCde4c16b4c = dnnl_aBCde4c16b4c;
457
+ const format_tag_t aBCde16c16b4c = dnnl_aBCde16c16b4c;
458
+ const format_tag_t aBCde16c16b2c = dnnl_aBCde16c16b2c;
459
+ const format_tag_t aBCde16b16c2b = dnnl_aBCde16b16c2b;
460
+ const format_tag_t aBCde4c4b = dnnl_aBCde4c4b;
461
+ const format_tag_t Abcde8a = dnnl_Abcde8a;
462
+ const format_tag_t ABcde8a8b = dnnl_ABcde8a8b;
463
+ const format_tag_t ABcde8a2b = dnnl_ABcde8a2b;
464
+ const format_tag_t ABcde8a4b = dnnl_ABcde8a4b;
465
+ const format_tag_t aBcde8b = dnnl_aBcde8b;
466
+ const format_tag_t ABcde8b16a2b = dnnl_ABcde8b16a2b;
467
+ const format_tag_t ABcde8b32a2b = dnnl_ABcde8b32a2b;
468
+ const format_tag_t ABcde8b64a2b = dnnl_ABcde8b64a2b;
469
+ const format_tag_t ABcde8a16b2a = dnnl_ABcde8a16b2a;
470
+ const format_tag_t BAcde8a16b2a = dnnl_BAcde8a16b2a;
471
+ const format_tag_t ABcde4b8a4b = dnnl_ABcde4b8a4b;
472
+ const format_tag_t ABcde4b16a4b = dnnl_ABcde4b16a4b;
473
+ const format_tag_t ABcde4b24a4b = dnnl_ABcde4b24a4b;
474
+ const format_tag_t ABcde4b32a4b = dnnl_ABcde4b32a4b;
475
+ const format_tag_t ABcde4b64a4b = dnnl_ABcde4b64a4b;
476
+ const format_tag_t ABcde16b16a4b = dnnl_ABcde16b16a4b;
477
+ const format_tag_t ABcde16b32a4b = dnnl_ABcde16b32a4b;
478
+ const format_tag_t ABcde16b48a4b = dnnl_ABcde16b48a4b;
479
+ const format_tag_t ABcde16b64a4b = dnnl_ABcde16b64a4b;
480
+ const format_tag_t ABcde2b8a4b = dnnl_ABcde2b8a4b;
481
+ const format_tag_t aBCde8b16c2b = dnnl_aBCde8b16c2b;
482
+ const format_tag_t aCBde8b16c2b = dnnl_aCBde8b16c2b;
483
+ const format_tag_t ABcde8b8a = dnnl_ABcde8b8a;
484
+ const format_tag_t aBCde8b8c = dnnl_aBCde8b8c;
485
+ const format_tag_t aBCde8b2c = dnnl_aBCde8b2c;
486
+ const format_tag_t aBCde8b4c = dnnl_aBCde8b4c;
487
+ const format_tag_t ABc4a8b8a4b = dnnl_ABc4a8b8a4b;
488
+ const format_tag_t ABcd4a8b8a4b = dnnl_ABcd4a8b8a4b;
489
+ const format_tag_t ABcde4a8b8a4b = dnnl_ABcde4a8b8a4b;
490
+ const format_tag_t ABcd2a8b8a2b = dnnl_ABcd2a8b8a2b;
491
+ const format_tag_t ABcde4a8b8a2b = dnnl_ABcde4a8b8a2b;
492
+ const format_tag_t ABcd4a8b8a2b = dnnl_ABcd4a8b8a2b;
493
+ const format_tag_t ABc4a8b8a2b = dnnl_ABc4a8b8a2b;
494
+ const format_tag_t aBCdef4b8c8b2c = dnnl_aBCdef4b8c8b2c;
495
+ const format_tag_t aBCde4b8c8b2c = dnnl_aBCde4b8c8b2c;
496
+ const format_tag_t aBCd4b8c8b2c = dnnl_aBCd4b8c8b2c;
497
+ const format_tag_t BAcde4b8a8b2a = dnnl_BAcde4b8a8b2a;
498
+ const format_tag_t BAcd4b8a8b2a = dnnl_BAcd4b8a8b2a;
499
+ const format_tag_t BAc4b8a8b2a = dnnl_BAc4b8a8b2a;
500
+ const format_tag_t aCBdef4c8b8c2b = dnnl_aCBdef4c8b8c2b;
501
+ const format_tag_t aCBde4c8b8c2b = dnnl_aCBde4c8b8c2b;
502
+ const format_tag_t aCBd4c8b8c2b = dnnl_aCBd4c8b8c2b;
503
+ const format_tag_t aBCd4b8c8b4c = dnnl_aBCd4b8c8b4c;
504
+ const format_tag_t aBCde4b8c8b4c = dnnl_aBCde4b8c8b4c;
505
+ const format_tag_t aBCdef4b8c8b4c = dnnl_aBCdef4b8c8b4c;
506
+ const format_tag_t BAc4b8a8b4a = dnnl_BAc4b8a8b4a;
507
+ const format_tag_t BAcd4b8a8b4a = dnnl_BAcd4b8a8b4a;
508
+ const format_tag_t BAcde4b8a8b4a = dnnl_BAcde4b8a8b4a;
509
+ const format_tag_t aCBd4c8b8c4b = dnnl_aCBd4c8b8c4b;
510
+ const format_tag_t aCBde4c8b8c4b = dnnl_aCBde4c8b8c4b;
511
+ const format_tag_t aCBdef4c8b8c4b = dnnl_aCBdef4c8b8c4b;
512
+ const format_tag_t aBCde2b8c8b2c = dnnl_aBCde2b8c8b2c;
513
+ const format_tag_t aBCde8c16b2c = dnnl_aBCde8c16b2c;
514
+ const format_tag_t aBCde8c8b = dnnl_aBCde8c8b;
515
+ const format_tag_t aBcdef16b = dnnl_aBcdef16b;
516
+ const format_tag_t aBCdef16b16c = dnnl_aBCdef16b16c;
517
+ const format_tag_t aBCdef16b16c2b = dnnl_aBCdef16b16c2b;
518
+ const format_tag_t aBCdef16c16b = dnnl_aBCdef16c16b;
519
+ const format_tag_t aBCdef4c16b4c = dnnl_aBCdef4c16b4c;
520
+ const format_tag_t aBCdef2c8b4c = dnnl_aBCdef2c8b4c;
521
+ const format_tag_t aBcdef4b = dnnl_aBcdef4b;
522
+ const format_tag_t aBCdef4c4b = dnnl_aBCdef4c4b;
523
+ const format_tag_t aBCdef4b4c = dnnl_aBCdef4b4c;
524
+ const format_tag_t aBCdef8b8c = dnnl_aBCdef8b8c;
525
+ const format_tag_t aBCdef8b2c = dnnl_aBCdef8b2c;
526
+ const format_tag_t aBCdef8b4c = dnnl_aBCdef8b4c;
527
+ const format_tag_t aBCdef8c16b2c = dnnl_aBCdef8c16b2c;
528
+ const format_tag_t aBCdef8b16c2b = dnnl_aBCdef8b16c2b;
529
+ const format_tag_t aCBdef8b16c2b = dnnl_aCBdef8b16c2b;
530
+ const format_tag_t aBCdef8c8b = dnnl_aBCdef8c8b;
531
+ const format_tag_t aBdc16b = dnnl_aBdc16b;
532
+ const format_tag_t aBdC16b2c = dnnl_aBdC16b2c;
533
+ const format_tag_t aBdC16b4c = dnnl_aBdC16b4c;
534
+ const format_tag_t aBdc4b = dnnl_aBdc4b;
535
+ const format_tag_t aBdc8b = dnnl_aBdc8b;
536
+ const format_tag_t aBdC8b2c = dnnl_aBdC8b2c;
537
+ const format_tag_t aBdC8b4c = dnnl_aBdC8b4c;
538
+ const format_tag_t aBdec16b = dnnl_aBdec16b;
539
+ const format_tag_t aBdeC16b2c = dnnl_aBdeC16b2c;
540
+ const format_tag_t aBdeC16b4c = dnnl_aBdeC16b4c;
541
+ const format_tag_t aBdec4b = dnnl_aBdec4b;
542
+ const format_tag_t aBdec8b = dnnl_aBdec8b;
543
+ const format_tag_t aBdeC8b2c = dnnl_aBdeC8b2c;
544
+ const format_tag_t aBdeC8b4c = dnnl_aBdeC8b4c;
545
+ const format_tag_t aBdefc16b = dnnl_aBdefc16b;
546
+ const format_tag_t aBdefC16b2c = dnnl_aBdefC16b2c;
547
+ const format_tag_t aBdefC16b4c = dnnl_aBdefC16b4c;
548
+ const format_tag_t aCBdef16c16b = dnnl_aCBdef16c16b;
549
+ const format_tag_t aCBdef16b16c = dnnl_aCBdef16b16c;
550
+ const format_tag_t aBdefc4b = dnnl_aBdefc4b;
551
+ const format_tag_t aBdefc8b = dnnl_aBdefc8b;
552
+ const format_tag_t aBdefC8b2c = dnnl_aBdefC8b2c;
553
+ const format_tag_t aBdefC8b4c = dnnl_aBdefC8b4c;
554
+ const format_tag_t aBdfec16b = dnnl_aBdfec16b;
555
+ const format_tag_t aBedc16b = dnnl_aBedc16b;
556
+ const format_tag_t Acb16a = dnnl_Acb16a;
557
+ const format_tag_t AcB16a2b = dnnl_AcB16a2b;
558
+ const format_tag_t AcB16a4b = dnnl_AcB16a4b;
559
+ const format_tag_t Acb4a = dnnl_Acb4a;
560
+ const format_tag_t Acb8a = dnnl_Acb8a;
561
+ const format_tag_t AcB8a2b = dnnl_AcB8a2b;
562
+ const format_tag_t AcB8a4b = dnnl_AcB8a4b;
563
+ const format_tag_t aCBd16b16c = dnnl_aCBd16b16c;
564
+ const format_tag_t aCBd16c16b = dnnl_aCBd16c16b;
565
+ const format_tag_t aCBde16b16c = dnnl_aCBde16b16c;
566
+ const format_tag_t aCBde16c16b = dnnl_aCBde16c16b;
567
+ const format_tag_t Acdb16a = dnnl_Acdb16a;
568
+ const format_tag_t AcdB16a2b = dnnl_AcdB16a2b;
569
+ const format_tag_t AcdB16a4b = dnnl_AcdB16a4b;
570
+ const format_tag_t Acdb4a = dnnl_Acdb4a;
571
+ const format_tag_t Acdb8a = dnnl_Acdb8a;
572
+ const format_tag_t AcdB8a2b = dnnl_AcdB8a2b;
573
+ const format_tag_t AcdB8a4b = dnnl_AcdB8a4b;
574
+ const format_tag_t Acdeb16a = dnnl_Acdeb16a;
575
+ const format_tag_t AcdeB16a2b = dnnl_AcdeB16a2b;
576
+ const format_tag_t AcdeB16a4b = dnnl_AcdeB16a4b;
577
+ const format_tag_t Acdeb4a = dnnl_Acdeb4a;
578
+ const format_tag_t Acdeb8a = dnnl_Acdeb8a;
579
+ const format_tag_t AcdeB8a2b = dnnl_AcdeB8a2b;
580
+ const format_tag_t AcdeB8a4b = dnnl_AcdeB8a4b;
581
+ const format_tag_t Acedb16a = dnnl_Acedb16a;
582
+ const format_tag_t Adcb16a = dnnl_Adcb16a;
583
+ const format_tag_t BAc16a16b = dnnl_BAc16a16b;
584
+ const format_tag_t BAcd16a16b = dnnl_BAcd16a16b;
585
+ const format_tag_t ABc32a16b = dnnl_ABc32a16b;
586
+ const format_tag_t ABcd32a16b = dnnl_ABcd32a16b;
587
+ const format_tag_t ABcde32a16b = dnnl_ABcde32a16b;
588
+ const format_tag_t ABc40a16b = dnnl_ABc40a16b;
589
+ const format_tag_t ABcd40a16b = dnnl_ABcd40a16b;
590
+ const format_tag_t ABcde40a16b = dnnl_ABcde40a16b;
591
+ const format_tag_t ABc32a32b = dnnl_ABc32a32b;
592
+ const format_tag_t BAcde16a16b = dnnl_BAcde16a16b;
593
+ const format_tag_t ABcd32a32b = dnnl_ABcd32a32b;
594
+ const format_tag_t ABcde32a32b = dnnl_ABcde32a32b;
595
+ const format_tag_t ABc40a32b = dnnl_ABc40a32b;
596
+ const format_tag_t ABcd40a32b = dnnl_ABcd40a32b;
597
+ const format_tag_t ABcde40a32b = dnnl_ABcde40a32b;
598
+ const format_tag_t BAcde16b16a = dnnl_BAcde16b16a;
599
+ const format_tag_t aBdec32b = dnnl_aBdec32b;
600
+ const format_tag_t Abcdef16a = dnnl_Abcdef16a;
601
+ const format_tag_t Abcdef32a = dnnl_Abcdef32a;
602
+ const format_tag_t Acdb32a = dnnl_Acdb32a;
603
+ const format_tag_t BAc16b16a = dnnl_BAc16b16a;
604
+ const format_tag_t BAcd16b16a = dnnl_BAcd16b16a;
605
+ const format_tag_t aBCd2b4c2b = dnnl_aBCd2b4c2b;
606
+ const format_tag_t aBCde2b4c2b = dnnl_aBCde2b4c2b;
607
+ const format_tag_t aBCdef2b4c2b = dnnl_aBCdef2b4c2b;
608
+ const format_tag_t aBCd2c4b2c = dnnl_aBCd2c4b2c;
609
+ const format_tag_t aBCde2c4b2c = dnnl_aBCde2c4b2c;
610
+ const format_tag_t aBCdef2c4b2c = dnnl_aBCdef2c4b2c;
611
+ const format_tag_t aBCd4b8c2b = dnnl_aBCd4b8c2b;
612
+ const format_tag_t aBCde4b8c2b = dnnl_aBCde4b8c2b;
613
+ const format_tag_t aBCdef4b8c2b = dnnl_aBCdef4b8c2b;
614
+ const format_tag_t aBCd4c8b2c = dnnl_aBCd4c8b2c;
615
+ const format_tag_t aBCde4c8b2c = dnnl_aBCde4c8b2c;
616
+ const format_tag_t aBCdef4c8b2c = dnnl_aBCdef4c8b2c;
617
+ const format_tag_t AB32a32b8a4b = dnnl_AB32a32b8a4b;
618
+ const format_tag_t AB8a4b = dnnl_AB8a4b;
619
+ const format_tag_t AB32a32b8a2b = dnnl_AB32a32b8a2b;
620
+ const format_tag_t AB8a2b = dnnl_AB8a2b;
621
+ const format_tag_t abDc16d = dnnl_abDc16d;
622
+ const format_tag_t abDc32d = dnnl_abDc32d;
623
+ const format_tag_t abDC32d4c = dnnl_abDC32d4c;
624
+ const format_tag_t abCd4c = dnnl_abCd4c;
625
+ const format_tag_t abCde4c = dnnl_abCde4c;
626
+ const format_tag_t abCdef4c = dnnl_abCdef4c;
627
+ const format_tag_t abCd32c = dnnl_abCd32c;
628
+ const format_tag_t abCde32c = dnnl_abCde32c;
629
+ const format_tag_t abCdef32c = dnnl_abCdef32c;
630
+ const format_tag_t abdEc16e = dnnl_abdEc16e;
631
+ const format_tag_t abdEc32e = dnnl_abdEc32e;
632
+ const format_tag_t abdEC32e2c = dnnl_abdEC32e2c;
633
+ const format_tag_t abdEC32e4c = dnnl_abdEC32e4c;
634
+ const format_tag_t abdEC64e2c = dnnl_abdEC64e2c;
635
+ const format_tag_t abdEC64e4c = dnnl_abdEC64e4c;
636
+ const format_tag_t abdCe16c = dnnl_abdCe16c;
637
+ const format_tag_t abdCe32c = dnnl_abdCe32c;
638
+ const format_tag_t abdCE32c2e = dnnl_abdCE32c2e;
639
+ const format_tag_t aBCdef16c16b4c = dnnl_aBCdef16c16b4c;
640
+ const format_tag_t ABcde16b16a2b = dnnl_ABcde16b16a2b;
641
+ const format_tag_t ABcde16b32a2b = dnnl_ABcde16b32a2b;
642
+ const format_tag_t ABcde16b48a2b = dnnl_ABcde16b48a2b;
643
+ const format_tag_t ABcde16b64a2b = dnnl_ABcde16b64a2b;
644
+ const format_tag_t aBCdef16c16b2c = dnnl_aBCdef16c16b2c;
645
+ const format_tag_t cBa2b = dnnl_cBa2b;
646
+ const format_tag_t cBa4b = dnnl_cBa4b;
647
+ const format_tag_t adcb = dnnl_adcb;
648
+ const format_tag_t adCb2c = dnnl_adCb2c;
649
+ const format_tag_t adCb4c = dnnl_adCb4c;
650
+ const format_tag_t cdBa2b = dnnl_cdBa2b;
651
+ const format_tag_t cdBa4b = dnnl_cdBa4b;
652
+ const format_tag_t adecb = dnnl_adecb;
653
+ const format_tag_t adeCb2c = dnnl_adeCb2c;
654
+ const format_tag_t adeCb4c = dnnl_adeCb4c;
655
+ const format_tag_t cdeBa2b = dnnl_cdeBa2b;
656
+ const format_tag_t cdeBa4b = dnnl_cdeBa4b;
657
+ const format_tag_t adefcb = dnnl_adefcb;
658
+ const format_tag_t adefCb2c = dnnl_adefCb2c;
659
+ const format_tag_t adefCb4c = dnnl_adefCb4c;
660
+ const format_tag_t Acb32a = dnnl_Acb32a;
661
+ const format_tag_t AcB32a2b = dnnl_AcB32a2b;
662
+ const format_tag_t AcB32a4b = dnnl_AcB32a4b;
663
+ const format_tag_t Acb48a = dnnl_Acb48a;
664
+ const format_tag_t AcB48a2b = dnnl_AcB48a2b;
665
+ const format_tag_t AcB48a4b = dnnl_AcB48a4b;
666
+ const format_tag_t Acb64a = dnnl_Acb64a;
667
+ const format_tag_t AcB64a2b = dnnl_AcB64a2b;
668
+ const format_tag_t AcB64a4b = dnnl_AcB64a4b;
669
+ const format_tag_t aBdc32b = dnnl_aBdc32b;
670
+ const format_tag_t aBdC32b2c = dnnl_aBdC32b2c;
671
+ const format_tag_t aBdC32b4c = dnnl_aBdC32b4c;
672
+ const format_tag_t aBdc48b = dnnl_aBdc48b;
673
+ const format_tag_t aBdC48b2c = dnnl_aBdC48b2c;
674
+ const format_tag_t aBdC48b4c = dnnl_aBdC48b4c;
675
+ const format_tag_t aBdc64b = dnnl_aBdc64b;
676
+ const format_tag_t aBdC64b2c = dnnl_aBdC64b2c;
677
+ const format_tag_t aBdC64b4c = dnnl_aBdC64b4c;
678
+ const format_tag_t AcdB32a2b = dnnl_AcdB32a2b;
679
+ const format_tag_t AcdB32a4b = dnnl_AcdB32a4b;
680
+ const format_tag_t Acdb48a = dnnl_Acdb48a;
681
+ const format_tag_t AcdB48a2b = dnnl_AcdB48a2b;
682
+ const format_tag_t AcdB48a4b = dnnl_AcdB48a4b;
683
+ const format_tag_t Acdb64a = dnnl_Acdb64a;
684
+ const format_tag_t AcdB64a2b = dnnl_AcdB64a2b;
685
+ const format_tag_t AcdB64a4b = dnnl_AcdB64a4b;
686
+ const format_tag_t aBdeC32b2c = dnnl_aBdeC32b2c;
687
+ const format_tag_t aBdeC32b4c = dnnl_aBdeC32b4c;
688
+ const format_tag_t aBdec48b = dnnl_aBdec48b;
689
+ const format_tag_t aBdeC48b2c = dnnl_aBdeC48b2c;
690
+ const format_tag_t aBdeC48b4c = dnnl_aBdeC48b4c;
691
+ const format_tag_t aBdec64b = dnnl_aBdec64b;
692
+ const format_tag_t aBdeC64b2c = dnnl_aBdeC64b2c;
693
+ const format_tag_t aBdeC64b4c = dnnl_aBdeC64b4c;
694
+ const format_tag_t Acdeb32a = dnnl_Acdeb32a;
695
+ const format_tag_t AcdeB32a2b = dnnl_AcdeB32a2b;
696
+ const format_tag_t AcdeB32a4b = dnnl_AcdeB32a4b;
697
+ const format_tag_t Acdeb48a = dnnl_Acdeb48a;
698
+ const format_tag_t AcdeB48a2b = dnnl_AcdeB48a2b;
699
+ const format_tag_t AcdeB48a4b = dnnl_AcdeB48a4b;
700
+ const format_tag_t Acdeb64a = dnnl_Acdeb64a;
701
+ const format_tag_t AcdeB64a2b = dnnl_AcdeB64a2b;
702
+ const format_tag_t AcdeB64a4b = dnnl_AcdeB64a4b;
703
+ const format_tag_t aBdefc32b = dnnl_aBdefc32b;
704
+ const format_tag_t aBdefC32b2c = dnnl_aBdefC32b2c;
705
+ const format_tag_t aBdefC32b4c = dnnl_aBdefC32b4c;
706
+ const format_tag_t aBdefc48b = dnnl_aBdefc48b;
707
+ const format_tag_t aBdefC48b2c = dnnl_aBdefC48b2c;
708
+ const format_tag_t aBdefC48b4c = dnnl_aBdefC48b4c;
709
+ const format_tag_t aBdefc64b = dnnl_aBdefc64b;
710
+ const format_tag_t aBdefC64b2c = dnnl_aBdefC64b2c;
711
+ const format_tag_t aBdefC64b4c = dnnl_aBdefC64b4c;
712
+ const format_tag_t aBdeC16c16b2c = dnnl_aBdeC16c16b2c;
713
+ const format_tag_t aBdeC16c16b4c = dnnl_aBdeC16c16b4c;
714
+ const format_tag_t aBdefC16c16b2c = dnnl_aBdefC16c16b2c;
715
+ const format_tag_t aBdefC16c16b4c = dnnl_aBdefC16c16b4c;
716
+ const format_tag_t AcB16b16a2b = dnnl_AcB16b16a2b;
717
+ const format_tag_t AcB16b16a4b = dnnl_AcB16b16a4b;
718
+ const format_tag_t aBdC16c16b2c = dnnl_aBdC16c16b2c;
719
+ const format_tag_t aBdC16c16b4c = dnnl_aBdC16c16b4c;
720
+ const format_tag_t AcdB16b16a2b = dnnl_AcdB16b16a2b;
721
+ const format_tag_t AcdB16b16a4b = dnnl_AcdB16b16a4b;
722
+ const format_tag_t AcdeB16b16a2b = dnnl_AcdeB16b16a2b;
723
+ const format_tag_t AcdeB16b16a4b = dnnl_AcdeB16b16a4b;
724
+ const format_tag_t AcB16b32a2b = dnnl_AcB16b32a2b;
725
+ const format_tag_t AcB16b32a4b = dnnl_AcB16b32a4b;
726
+ const format_tag_t AcB16b48a2b = dnnl_AcB16b48a2b;
727
+ const format_tag_t AcB16b48a4b = dnnl_AcB16b48a4b;
728
+ const format_tag_t AcB16b64a2b = dnnl_AcB16b64a2b;
729
+ const format_tag_t AcB16b64a4b = dnnl_AcB16b64a4b;
730
+ const format_tag_t aBdC16c32b2c = dnnl_aBdC16c32b2c;
731
+ const format_tag_t aBdC16c32b4c = dnnl_aBdC16c32b4c;
732
+ const format_tag_t aBdC16c48b2c = dnnl_aBdC16c48b2c;
733
+ const format_tag_t aBdC16c48b4c = dnnl_aBdC16c48b4c;
734
+ const format_tag_t aBdC16c64b2c = dnnl_aBdC16c64b2c;
735
+ const format_tag_t aBdC16c64b4c = dnnl_aBdC16c64b4c;
736
+ const format_tag_t AcdB16b32a2b = dnnl_AcdB16b32a2b;
737
+ const format_tag_t AcdB16b32a4b = dnnl_AcdB16b32a4b;
738
+ const format_tag_t AcdB16b48a2b = dnnl_AcdB16b48a2b;
739
+ const format_tag_t AcdB16b48a4b = dnnl_AcdB16b48a4b;
740
+ const format_tag_t AcdB16b64a2b = dnnl_AcdB16b64a2b;
741
+ const format_tag_t AcdB16b64a4b = dnnl_AcdB16b64a4b;
742
+ const format_tag_t aBdeC16c32b2c = dnnl_aBdeC16c32b2c;
743
+ const format_tag_t aBdeC16c32b4c = dnnl_aBdeC16c32b4c;
744
+ const format_tag_t aBdeC16c48b2c = dnnl_aBdeC16c48b2c;
745
+ const format_tag_t aBdeC16c48b4c = dnnl_aBdeC16c48b4c;
746
+ const format_tag_t aBdeC16c64b2c = dnnl_aBdeC16c64b2c;
747
+ const format_tag_t aBdeC16c64b4c = dnnl_aBdeC16c64b4c;
748
+ const format_tag_t AcdeB16b32a2b = dnnl_AcdeB16b32a2b;
749
+ const format_tag_t AcdeB16b32a4b = dnnl_AcdeB16b32a4b;
750
+ const format_tag_t AcdeB16b48a2b = dnnl_AcdeB16b48a2b;
751
+ const format_tag_t AcdeB16b48a4b = dnnl_AcdeB16b48a4b;
752
+ const format_tag_t AcdeB16b64a2b = dnnl_AcdeB16b64a2b;
753
+ const format_tag_t AcdeB16b64a4b = dnnl_AcdeB16b64a4b;
754
+ const format_tag_t aBdefC16c32b2c = dnnl_aBdefC16c32b2c;
755
+ const format_tag_t aBdefC16c32b4c = dnnl_aBdefC16c32b4c;
756
+ const format_tag_t aBdefC16c48b2c = dnnl_aBdefC16c48b2c;
757
+ const format_tag_t aBdefC16c48b4c = dnnl_aBdefC16c48b4c;
758
+ const format_tag_t aBdefC16c64b2c = dnnl_aBdefC16c64b2c;
759
+ const format_tag_t aBdefC16c64b4c = dnnl_aBdefC16c64b4c;
760
+ const format_tag_t decbA16a = dnnl_decbA16a;
761
+ const format_tag_t decbA8a = dnnl_decbA8a;
762
+ const format_tag_t defcbA16a = dnnl_defcbA16a;
763
+ const format_tag_t defcbA8a = dnnl_defcbA8a;
764
+ const format_tag_t aCB16c2b = dnnl_aCB16c2b;
765
+ const format_tag_t aCB16c4b = dnnl_aCB16c4b;
766
+ const format_tag_t BA16b2a = dnnl_BA16b2a;
767
+ const format_tag_t BA16b4a = dnnl_BA16b4a;
768
+ const format_tag_t aBC16b16c = dnnl_aBC16b16c;
769
+ const format_tag_t aBC16b32c = dnnl_aBC16b32c;
770
+ const format_tag_t AB16a16b = dnnl_AB16a16b;
771
+ const format_tag_t AB16a32b = dnnl_AB16a32b;
772
+ const format_tag_t ABcd16a32b = dnnl_ABcd16a32b;
773
+ const format_tag_t aCdefB16b32c2b = dnnl_aCdefB16b32c2b;
774
+ const format_tag_t aCdefB16b32c4b = dnnl_aCdefB16b32c4b;
775
+ const format_tag_t aCdefB16b48c2b = dnnl_aCdefB16b48c2b;
776
+ const format_tag_t aCdefB16b48c4b = dnnl_aCdefB16b48c4b;
777
+ const format_tag_t aCdefB16b64c2b = dnnl_aCdefB16b64c2b;
778
+ const format_tag_t aCdefB16b64c4b = dnnl_aCdefB16b64c4b;
779
+ const format_tag_t BcdeA16a32b2a = dnnl_BcdeA16a32b2a;
780
+ const format_tag_t BcdeA16a32b4a = dnnl_BcdeA16a32b4a;
781
+ const format_tag_t BcdeA16a48b2a = dnnl_BcdeA16a48b2a;
782
+ const format_tag_t BcdeA16a48b4a = dnnl_BcdeA16a48b4a;
783
+ const format_tag_t BcdeA16a64b2a = dnnl_BcdeA16a64b2a;
784
+ const format_tag_t BcdeA16a64b4a = dnnl_BcdeA16a64b4a;
785
+ const format_tag_t aCdefb32c = dnnl_aCdefb32c;
786
+ const format_tag_t aCdefB32c2b = dnnl_aCdefB32c2b;
787
+ const format_tag_t aCdefB32c4b = dnnl_aCdefB32c4b;
788
+ const format_tag_t aCdefb48c = dnnl_aCdefb48c;
789
+ const format_tag_t aCdefB48c2b = dnnl_aCdefB48c2b;
790
+ const format_tag_t aCdefB48c4b = dnnl_aCdefB48c4b;
791
+ const format_tag_t aCdefb64c = dnnl_aCdefb64c;
792
+ const format_tag_t aCdefB64c2b = dnnl_aCdefB64c2b;
793
+ const format_tag_t aCdefB64c4b = dnnl_aCdefB64c4b;
794
+ const format_tag_t Bcdea32b = dnnl_Bcdea32b;
795
+ const format_tag_t BcdeA32b2a = dnnl_BcdeA32b2a;
796
+ const format_tag_t BcdeA32b4a = dnnl_BcdeA32b4a;
797
+ const format_tag_t Bcdea48b = dnnl_Bcdea48b;
798
+ const format_tag_t BcdeA48b2a = dnnl_BcdeA48b2a;
799
+ const format_tag_t BcdeA48b4a = dnnl_BcdeA48b4a;
800
+ const format_tag_t Bcdea64b = dnnl_Bcdea64b;
801
+ const format_tag_t BcdeA64b2a = dnnl_BcdeA64b2a;
802
+ const format_tag_t BcdeA64b4a = dnnl_BcdeA64b4a;
803
+ const format_tag_t Bca32b = dnnl_Bca32b;
804
+ const format_tag_t BcA32b2a = dnnl_BcA32b2a;
805
+ const format_tag_t BcA32b4a = dnnl_BcA32b4a;
806
+ const format_tag_t Bca48b = dnnl_Bca48b;
807
+ const format_tag_t BcA48b2a = dnnl_BcA48b2a;
808
+ const format_tag_t BcA48b4a = dnnl_BcA48b4a;
809
+ const format_tag_t Bca64b = dnnl_Bca64b;
810
+ const format_tag_t BcA64b2a = dnnl_BcA64b2a;
811
+ const format_tag_t BcA64b4a = dnnl_BcA64b4a;
812
+ const format_tag_t aCdb32c = dnnl_aCdb32c;
813
+ const format_tag_t aCdB32c2b = dnnl_aCdB32c2b;
814
+ const format_tag_t aCdB32c4b = dnnl_aCdB32c4b;
815
+ const format_tag_t aCdb48c = dnnl_aCdb48c;
816
+ const format_tag_t aCdB48c2b = dnnl_aCdB48c2b;
817
+ const format_tag_t aCdB48c4b = dnnl_aCdB48c4b;
818
+ const format_tag_t aCdb64c = dnnl_aCdb64c;
819
+ const format_tag_t aCdB64c2b = dnnl_aCdB64c2b;
820
+ const format_tag_t aCdB64c4b = dnnl_aCdB64c4b;
821
+ const format_tag_t BcA16a16b2a = dnnl_BcA16a16b2a;
822
+ const format_tag_t BcA16a16b4a = dnnl_BcA16a16b4a;
823
+ const format_tag_t BcdA16a16b2a = dnnl_BcdA16a16b2a;
824
+ const format_tag_t BcdA16a16b4a = dnnl_BcdA16a16b4a;
825
+ const format_tag_t BcdeA16a16b2a = dnnl_BcdeA16a16b2a;
826
+ const format_tag_t BcdeA16a16b4a = dnnl_BcdeA16a16b4a;
827
+ const format_tag_t aCdB16b16c2b = dnnl_aCdB16b16c2b;
828
+ const format_tag_t aCdB16b16c4b = dnnl_aCdB16b16c4b;
829
+ const format_tag_t aCdeB16b16c2b = dnnl_aCdeB16b16c2b;
830
+ const format_tag_t aCdeB16b16c4b = dnnl_aCdeB16b16c4b;
831
+ const format_tag_t aCdefB16b16c2b = dnnl_aCdefB16b16c2b;
832
+ const format_tag_t aCdefB16b16c4b = dnnl_aCdefB16b16c4b;
833
+ const format_tag_t BcA16a32b2a = dnnl_BcA16a32b2a;
834
+ const format_tag_t BcA16a32b4a = dnnl_BcA16a32b4a;
835
+ const format_tag_t BcA16a48b2a = dnnl_BcA16a48b2a;
836
+ const format_tag_t BcA16a48b4a = dnnl_BcA16a48b4a;
837
+ const format_tag_t BcA16a64b2a = dnnl_BcA16a64b2a;
838
+ const format_tag_t BcA16a64b4a = dnnl_BcA16a64b4a;
839
+ const format_tag_t aCdB16b32c2b = dnnl_aCdB16b32c2b;
840
+ const format_tag_t aCdB16b32c4b = dnnl_aCdB16b32c4b;
841
+ const format_tag_t aCdB16b48c2b = dnnl_aCdB16b48c2b;
842
+ const format_tag_t aCdB16b48c4b = dnnl_aCdB16b48c4b;
843
+ const format_tag_t aCdB16b64c2b = dnnl_aCdB16b64c2b;
844
+ const format_tag_t aCdB16b64c4b = dnnl_aCdB16b64c4b;
845
+ const format_tag_t BcdA16a32b2a = dnnl_BcdA16a32b2a;
846
+ const format_tag_t BcdA16a32b4a = dnnl_BcdA16a32b4a;
847
+ const format_tag_t BcdA16a48b2a = dnnl_BcdA16a48b2a;
848
+ const format_tag_t BcdA16a48b4a = dnnl_BcdA16a48b4a;
849
+ const format_tag_t BcdA16a64b2a = dnnl_BcdA16a64b2a;
850
+ const format_tag_t BcdA16a64b4a = dnnl_BcdA16a64b4a;
851
+ const format_tag_t aCdeB16b32c2b = dnnl_aCdeB16b32c2b;
852
+ const format_tag_t aCdeB16b32c4b = dnnl_aCdeB16b32c4b;
853
+ const format_tag_t aCdeB16b48c2b = dnnl_aCdeB16b48c2b;
854
+ const format_tag_t aCdeB16b48c4b = dnnl_aCdeB16b48c4b;
855
+ const format_tag_t aCdeB16b64c2b = dnnl_aCdeB16b64c2b;
856
+ const format_tag_t aCdeB16b64c4b = dnnl_aCdeB16b64c4b;
857
+ const format_tag_t Bca8b = dnnl_Bca8b;
858
+ const format_tag_t BcA8b2a = dnnl_BcA8b2a;
859
+ const format_tag_t BcA8b4a = dnnl_BcA8b4a;
860
+ const format_tag_t Bcda8b = dnnl_Bcda8b;
861
+ const format_tag_t BcdA8b2a = dnnl_BcdA8b2a;
862
+ const format_tag_t BcdA8b4a = dnnl_BcdA8b4a;
863
+ const format_tag_t Bcdea8b = dnnl_Bcdea8b;
864
+ const format_tag_t BcdeA8b2a = dnnl_BcdeA8b2a;
865
+ const format_tag_t BcdeA8b4a = dnnl_BcdeA8b4a;
866
+ const format_tag_t aCdb8c = dnnl_aCdb8c;
867
+ const format_tag_t aCdB8c2b = dnnl_aCdB8c2b;
868
+ const format_tag_t aCdB8c4b = dnnl_aCdB8c4b;
869
+ const format_tag_t aCdeb8c = dnnl_aCdeb8c;
870
+ const format_tag_t aCdeB8c2b = dnnl_aCdeB8c2b;
871
+ const format_tag_t aCdeB8c4b = dnnl_aCdeB8c4b;
872
+ const format_tag_t aCdefb8c = dnnl_aCdefb8c;
873
+ const format_tag_t aCdefB8c2b = dnnl_aCdefB8c2b;
874
+ const format_tag_t aCdefB8c4b = dnnl_aCdefB8c4b;
875
+ const format_tag_t Bca16b = dnnl_Bca16b;
876
+ const format_tag_t BcA16b2a = dnnl_BcA16b2a;
877
+ const format_tag_t BcA16b4a = dnnl_BcA16b4a;
878
+ const format_tag_t Bcda16b = dnnl_Bcda16b;
879
+ const format_tag_t BcdA16b2a = dnnl_BcdA16b2a;
880
+ const format_tag_t BcdA16b4a = dnnl_BcdA16b4a;
881
+ const format_tag_t Bcdea16b = dnnl_Bcdea16b;
882
+ const format_tag_t BcdeA16b2a = dnnl_BcdeA16b2a;
883
+ const format_tag_t BcdeA16b4a = dnnl_BcdeA16b4a;
884
+ const format_tag_t aCdb16c = dnnl_aCdb16c;
885
+ const format_tag_t aCdB16c2b = dnnl_aCdB16c2b;
886
+ const format_tag_t aCdB16c4b = dnnl_aCdB16c4b;
887
+ const format_tag_t aCdeb16c = dnnl_aCdeb16c;
888
+ const format_tag_t aCdeB16c2b = dnnl_aCdeB16c2b;
889
+ const format_tag_t aCdeB16c4b = dnnl_aCdeB16c4b;
890
+ const format_tag_t aCdefb16c = dnnl_aCdefb16c;
891
+ const format_tag_t aCdefB16c2b = dnnl_aCdefB16c2b;
892
+ const format_tag_t aCdefB16c4b = dnnl_aCdefB16c4b;
893
+ const format_tag_t Bca24b = dnnl_Bca24b;
894
+ const format_tag_t BcA24b2a = dnnl_BcA24b2a;
895
+ const format_tag_t BcA24b4a = dnnl_BcA24b4a;
896
+ const format_tag_t Bcda24b = dnnl_Bcda24b;
897
+ const format_tag_t BcdA24b2a = dnnl_BcdA24b2a;
898
+ const format_tag_t BcdA24b4a = dnnl_BcdA24b4a;
899
+ const format_tag_t Bcdea24b = dnnl_Bcdea24b;
900
+ const format_tag_t BcdeA24b2a = dnnl_BcdeA24b2a;
901
+ const format_tag_t BcdeA24b4a = dnnl_BcdeA24b4a;
902
+ const format_tag_t aCdb24c = dnnl_aCdb24c;
903
+ const format_tag_t aCdB24c2b = dnnl_aCdB24c2b;
904
+ const format_tag_t aCdB24c4b = dnnl_aCdB24c4b;
905
+ const format_tag_t aCdeb24c = dnnl_aCdeb24c;
906
+ const format_tag_t aCdeB24c2b = dnnl_aCdeB24c2b;
907
+ const format_tag_t aCdeB24c4b = dnnl_aCdeB24c4b;
908
+ const format_tag_t aCdefb24c = dnnl_aCdefb24c;
909
+ const format_tag_t aCdefB24c2b = dnnl_aCdefB24c2b;
910
+ const format_tag_t aCdefB24c4b = dnnl_aCdefB24c4b;
911
+ const format_tag_t Bcda32b = dnnl_Bcda32b;
912
+ const format_tag_t BcdA32b2a = dnnl_BcdA32b2a;
913
+ const format_tag_t BcdA32b4a = dnnl_BcdA32b4a;
914
+ const format_tag_t Bcda48b = dnnl_Bcda48b;
915
+ const format_tag_t BcdA48b2a = dnnl_BcdA48b2a;
916
+ const format_tag_t BcdA48b4a = dnnl_BcdA48b4a;
917
+ const format_tag_t Bcda64b = dnnl_Bcda64b;
918
+ const format_tag_t BcdA64b2a = dnnl_BcdA64b2a;
919
+ const format_tag_t BcdA64b4a = dnnl_BcdA64b4a;
920
+ const format_tag_t aCdeb32c = dnnl_aCdeb32c;
921
+ const format_tag_t aCdeB32c2b = dnnl_aCdeB32c2b;
922
+ const format_tag_t aCdeB32c4b = dnnl_aCdeB32c4b;
923
+ const format_tag_t aCdeb48c = dnnl_aCdeb48c;
924
+ const format_tag_t aCdeB48c2b = dnnl_aCdeB48c2b;
925
+ const format_tag_t aCdeB48c4b = dnnl_aCdeB48c4b;
926
+ const format_tag_t aCdeb64c = dnnl_aCdeb64c;
927
+ const format_tag_t aCdeB64c2b = dnnl_aCdeB64c2b;
928
+ const format_tag_t aCdeB64c4b = dnnl_aCdeB64c4b;
929
+ const format_tag_t Acb24a = dnnl_Acb24a;
930
+ const format_tag_t Acdb24a = dnnl_Acdb24a;
931
+ const format_tag_t Acdeb24a = dnnl_Acdeb24a;
932
+ const format_tag_t aBdc24b = dnnl_aBdc24b;
933
+ const format_tag_t aBdec24b = dnnl_aBdec24b;
934
+ const format_tag_t aBdefc24b = dnnl_aBdefc24b;
935
+ const format_tag_t AcB24a2b = dnnl_AcB24a2b;
936
+ const format_tag_t AcdB24a2b = dnnl_AcdB24a2b;
937
+ const format_tag_t AcdeB24a2b = dnnl_AcdeB24a2b;
938
+ const format_tag_t aBdC24b2c = dnnl_aBdC24b2c;
939
+ const format_tag_t aBdeC24b2c = dnnl_aBdeC24b2c;
940
+ const format_tag_t aBdefC24b2c = dnnl_aBdefC24b2c;
941
+ const format_tag_t AB8b32a = dnnl_AB8b32a;
942
+ const format_tag_t ABc8b32a = dnnl_ABc8b32a;
943
+ const format_tag_t ABcd8b32a = dnnl_ABcd8b32a;
944
+ const format_tag_t ABcde8b32a = dnnl_ABcde8b32a;
945
+ const format_tag_t AB8b24a = dnnl_AB8b24a;
946
+ const format_tag_t ABc8b24a = dnnl_ABc8b24a;
947
+ const format_tag_t ABcd8b24a = dnnl_ABcd8b24a;
948
+ const format_tag_t ABcde8b24a = dnnl_ABcde8b24a;
949
+ const format_tag_t AB8b16a = dnnl_AB8b16a;
950
+ const format_tag_t ABc8b16a = dnnl_ABc8b16a;
951
+ const format_tag_t ABcd8b16a = dnnl_ABcd8b16a;
952
+ const format_tag_t ABcde8b16a = dnnl_ABcde8b16a;
953
+ const format_tag_t AB8b8a = dnnl_AB8b8a;
954
+ const format_tag_t AcB24a4b = dnnl_AcB24a4b;
955
+ const format_tag_t AcdB24a4b = dnnl_AcdB24a4b;
956
+ const format_tag_t AcdeB24a4b = dnnl_AcdeB24a4b;
957
+ const format_tag_t aBdC24b4c = dnnl_aBdC24b4c;
958
+ const format_tag_t aBdeC24b4c = dnnl_aBdeC24b4c;
959
+ const format_tag_t aBdefC24b4c = dnnl_aBdefC24b4c;
960
+ const format_tag_t AB8b8a2b = dnnl_AB8b8a2b;
961
+ const format_tag_t ABc8b8a2b = dnnl_ABc8b8a2b;
962
+ const format_tag_t ABcd8b8a2b = dnnl_ABcd8b8a2b;
963
+ const format_tag_t ABcde8b8a2b = dnnl_ABcde8b8a2b;
964
+ const format_tag_t AB8b24a2b = dnnl_AB8b24a2b;
965
+ const format_tag_t ABc8b24a2b = dnnl_ABc8b24a2b;
966
+ const format_tag_t ABcd8b24a2b = dnnl_ABcd8b24a2b;
967
+ const format_tag_t ABcde8b24a2b = dnnl_ABcde8b24a2b;
968
+
969
+ const format_tag_t last = dnnl_format_tag_last;
970
+
971
+ const format_tag_t x = dnnl_x;
972
+ const format_tag_t nc = dnnl_nc;
973
+ const format_tag_t cn = dnnl_cn;
974
+ const format_tag_t ncw = dnnl_ncw;
975
+ const format_tag_t nwc = dnnl_nwc;
976
+ const format_tag_t nchw = dnnl_nchw;
977
+ const format_tag_t nhwc = dnnl_nhwc;
978
+ const format_tag_t chwn = dnnl_chwn;
979
+ const format_tag_t ncdhw = dnnl_ncdhw;
980
+ const format_tag_t ndhwc = dnnl_ndhwc;
981
+ const format_tag_t oi = dnnl_oi;
982
+ const format_tag_t io = dnnl_io;
983
+ const format_tag_t oiw = dnnl_oiw;
984
+ const format_tag_t wio = dnnl_wio;
985
+ const format_tag_t woi = dnnl_woi;
986
+ const format_tag_t owi = dnnl_owi;
987
+ const format_tag_t iwo = dnnl_iwo;
988
+ const format_tag_t oihw = dnnl_oihw;
989
+ const format_tag_t hwio = dnnl_hwio;
990
+ const format_tag_t hwoi = dnnl_hwoi;
991
+ const format_tag_t ohwi = dnnl_ohwi;
992
+ const format_tag_t ihwo = dnnl_ihwo;
993
+ const format_tag_t iohw = dnnl_iohw;
994
+ const format_tag_t oidhw = dnnl_oidhw;
995
+ const format_tag_t dhwio = dnnl_dhwio;
996
+ const format_tag_t dhwoi = dnnl_dhwoi;
997
+ const format_tag_t odhwi = dnnl_odhwi;
998
+ const format_tag_t idhwo = dnnl_idhwo;
999
+
1000
+ const format_tag_t iodhw = dnnl_iodhw;
1001
+ const format_tag_t goiw = dnnl_goiw;
1002
+ const format_tag_t goihw = dnnl_goihw;
1003
+ const format_tag_t wigo = dnnl_wigo;
1004
+ const format_tag_t hwigo = dnnl_hwigo;
1005
+ const format_tag_t dhwigo = dnnl_dhwigo;
1006
+ const format_tag_t giohw = dnnl_giohw;
1007
+ const format_tag_t goidhw = dnnl_goidhw;
1008
+ const format_tag_t giodhw = dnnl_giodhw;
1009
+ const format_tag_t gowi = dnnl_gowi;
1010
+ const format_tag_t gohwi = dnnl_gohwi;
1011
+ const format_tag_t godhwi = dnnl_godhwi;
1012
+ const format_tag_t tnc = dnnl_tnc;
1013
+ const format_tag_t ntc = dnnl_ntc;
1014
+ const format_tag_t ldnc = dnnl_ldnc;
1015
+ const format_tag_t ldigo = dnnl_ldigo;
1016
+ const format_tag_t ldgoi = dnnl_ldgoi;
1017
+ const format_tag_t ldio = dnnl_ldio;
1018
+ const format_tag_t ldoi = dnnl_ldoi;
1019
+ const format_tag_t ldgo = dnnl_ldgo;
1020
+ const format_tag_t nCdhw32c = dnnl_nCdhw32c;
1021
+ const format_tag_t nCdhw16c = dnnl_nCdhw16c;
1022
+ const format_tag_t nCdhw4c = dnnl_nCdhw4c;
1023
+ const format_tag_t nCdhw8c = dnnl_nCdhw8c;
1024
+ const format_tag_t nChw32c = dnnl_nChw32c;
1025
+ const format_tag_t nChw16c = dnnl_nChw16c;
1026
+ const format_tag_t nChw4c = dnnl_nChw4c;
1027
+ const format_tag_t nChw8c = dnnl_nChw8c;
1028
+ const format_tag_t nCw32c = dnnl_nCw32c;
1029
+ const format_tag_t nCw16c = dnnl_nCw16c;
1030
+ const format_tag_t nCw4c = dnnl_nCw4c;
1031
+ const format_tag_t nCw8c = dnnl_nCw8c;
1032
+ const format_tag_t NCw16n16c = dnnl_NCw16n16c;
1033
+ const format_tag_t NChw16n16c = dnnl_NChw16n16c;
1034
+ const format_tag_t NCdhw16n16c = dnnl_NCdhw16n16c;
1035
+ const format_tag_t NCw32n16c = dnnl_NCw32n16c;
1036
+ const format_tag_t NChw32n16c = dnnl_NChw32n16c;
1037
+ const format_tag_t NCdhw32n16c = dnnl_NCdhw32n16c;
1038
+ const format_tag_t NCw40n16c = dnnl_NCw40n16c;
1039
+ const format_tag_t NChw40n16c = dnnl_NChw40n16c;
1040
+ const format_tag_t NCdhw40n16c = dnnl_NCdhw40n16c;
1041
+ const format_tag_t NCw32n32c = dnnl_NCw32n32c;
1042
+ const format_tag_t NChw32n32c = dnnl_NChw32n32c;
1043
+ const format_tag_t NCdhw32n32c = dnnl_NCdhw32n32c;
1044
+ const format_tag_t NCw40n32c = dnnl_NCw40n32c;
1045
+ const format_tag_t NChw40n32c = dnnl_NChw40n32c;
1046
+ const format_tag_t NCdhw40n32c = dnnl_NCdhw40n32c;
1047
+ const format_tag_t OI16i16o = dnnl_OI16i16o;
1048
+ const format_tag_t OI16i32o = dnnl_OI16i32o;
1049
+ const format_tag_t OI16i48o = dnnl_OI16i48o;
1050
+ const format_tag_t OI16i64o = dnnl_OI16i64o;
1051
+ const format_tag_t OI8i16o2i = dnnl_OI8i16o2i;
1052
+ const format_tag_t OI8i32o2i = dnnl_OI8i32o2i;
1053
+ const format_tag_t OI8i64o2i = dnnl_OI8i64o2i;
1054
+ const format_tag_t OI4i8o4i = dnnl_OI4i8o4i;
1055
+ const format_tag_t OI4i16o4i = dnnl_OI4i16o4i;
1056
+ const format_tag_t OI4i24o4i = dnnl_OI4i24o4i;
1057
+ const format_tag_t OI4i32o4i = dnnl_OI4i32o4i;
1058
+ const format_tag_t OI4i64o4i = dnnl_OI4i64o4i;
1059
+ const format_tag_t OI16i16o4i = dnnl_OI16i16o4i;
1060
+ const format_tag_t OI16i32o4i = dnnl_OI16i32o4i;
1061
+ const format_tag_t OI16i48o4i = dnnl_OI16i48o4i;
1062
+ const format_tag_t OI16i64o4i = dnnl_OI16i64o4i;
1063
+ const format_tag_t OI16i16o2i = dnnl_OI16i16o2i;
1064
+ const format_tag_t OI16i32o2i = dnnl_OI16i32o2i;
1065
+ const format_tag_t OI16i48o2i = dnnl_OI16i48o2i;
1066
+ const format_tag_t OI16i64o2i = dnnl_OI16i64o2i;
1067
+ const format_tag_t IOdhw16i16o = dnnl_IOdhw16i16o;
1068
+ const format_tag_t IOhw16i16o = dnnl_IOhw16i16o;
1069
+ const format_tag_t Ohwi32o = dnnl_Ohwi32o;
1070
+ const format_tag_t gIOhw16i16o = dnnl_gIOhw16i16o;
1071
+ const format_tag_t gOhwi32o = dnnl_gOhwi32o;
1072
+ const format_tag_t Goidhw16g = dnnl_Goidhw16g;
1073
+ const format_tag_t IOw16o16i = dnnl_IOw16o16i;
1074
+ const format_tag_t IOw16i16o = dnnl_IOw16i16o;
1075
+ const format_tag_t gIOw16i16o = dnnl_gIOw16i16o;
1076
+ const format_tag_t OIw16i16o = dnnl_OIw16i16o;
1077
+ const format_tag_t OIw16i32o = dnnl_OIw16i32o;
1078
+ const format_tag_t OIw16i48o = dnnl_OIw16i48o;
1079
+ const format_tag_t OIw16i64o = dnnl_OIw16i64o;
1080
+ const format_tag_t OIw16o16i = dnnl_OIw16o16i;
1081
+ const format_tag_t Oiw16o = dnnl_Oiw16o;
1082
+ const format_tag_t OIw4i8o4i = dnnl_OIw4i8o4i;
1083
+ const format_tag_t OIw4i16o4i = dnnl_OIw4i16o4i;
1084
+ const format_tag_t OIw4i24o4i = dnnl_OIw4i24o4i;
1085
+ const format_tag_t OIw4i32o4i = dnnl_OIw4i32o4i;
1086
+ const format_tag_t OIw4i64o4i = dnnl_OIw4i64o4i;
1087
+ const format_tag_t OIw2i8o4i = dnnl_OIw2i8o4i;
1088
+ const format_tag_t OIw16i16o4i = dnnl_OIw16i16o4i;
1089
+ const format_tag_t OIw16i32o4i = dnnl_OIw16i32o4i;
1090
+ const format_tag_t OIw16i48o4i = dnnl_OIw16i48o4i;
1091
+ const format_tag_t OIw16i64o4i = dnnl_OIw16i64o4i;
1092
+ const format_tag_t OIw16i16o2i = dnnl_OIw16i16o2i;
1093
+ const format_tag_t OIw16i32o2i = dnnl_OIw16i32o2i;
1094
+ const format_tag_t OIw16i48o2i = dnnl_OIw16i48o2i;
1095
+ const format_tag_t OIw16i64o2i = dnnl_OIw16i64o2i;
1096
+ const format_tag_t OIw16o16i2o = dnnl_OIw16o16i2o;
1097
+ const format_tag_t OIw4i4o = dnnl_OIw4i4o;
1098
+ const format_tag_t OIw4o4i = dnnl_OIw4o4i;
1099
+ const format_tag_t Oiw4o = dnnl_Oiw4o;
1100
+ const format_tag_t OIw8i16o2i = dnnl_OIw8i16o2i;
1101
+ const format_tag_t OIw8i32o2i = dnnl_OIw8i32o2i;
1102
+ const format_tag_t OIw8i64o2i = dnnl_OIw8i64o2i;
1103
+ const format_tag_t OIw8i8o = dnnl_OIw8i8o;
1104
+ const format_tag_t OIw8o16i2o = dnnl_OIw8o16i2o;
1105
+ const format_tag_t IOw8o16i2o = dnnl_IOw8o16i2o;
1106
+ const format_tag_t OIw8o8i = dnnl_OIw8o8i;
1107
+ const format_tag_t OIw8o4i = dnnl_OIw8o4i;
1108
+ const format_tag_t Owi16o = dnnl_Owi16o;
1109
+ const format_tag_t OwI16o2i = dnnl_OwI16o2i;
1110
+ const format_tag_t OwI16o4i = dnnl_OwI16o4i;
1111
+ const format_tag_t Owi4o = dnnl_Owi4o;
1112
+ const format_tag_t Owi8o = dnnl_Owi8o;
1113
+ const format_tag_t OwI8o2i = dnnl_OwI8o2i;
1114
+ const format_tag_t OwI8o4i = dnnl_OwI8o4i;
1115
+ const format_tag_t IOdhw16o16i = dnnl_IOdhw16o16i;
1116
+ const format_tag_t IOhw16o16i = dnnl_IOhw16o16i;
1117
+ const format_tag_t Ohwi16o = dnnl_Ohwi16o;
1118
+ const format_tag_t OhwI16o2i = dnnl_OhwI16o2i;
1119
+ const format_tag_t OhwI16o4i = dnnl_OhwI16o4i;
1120
+ const format_tag_t Ohwi4o = dnnl_Ohwi4o;
1121
+ const format_tag_t Ohwi8o = dnnl_Ohwi8o;
1122
+ const format_tag_t OhwI8o2i = dnnl_OhwI8o2i;
1123
+ const format_tag_t OhwI8o4i = dnnl_OhwI8o4i;
1124
+ const format_tag_t OIhw16i16o = dnnl_OIhw16i16o;
1125
+ const format_tag_t OIhw16i32o = dnnl_OIhw16i32o;
1126
+ const format_tag_t OIhw16i48o = dnnl_OIhw16i48o;
1127
+ const format_tag_t OIhw16i64o = dnnl_OIhw16i64o;
1128
+ const format_tag_t OIhw16o16i = dnnl_OIhw16o16i;
1129
+ const format_tag_t Oihw16o = dnnl_Oihw16o;
1130
+ const format_tag_t OIhw4i8o4i = dnnl_OIhw4i8o4i;
1131
+ const format_tag_t OIhw4i16o4i = dnnl_OIhw4i16o4i;
1132
+ const format_tag_t OIhw4i24o4i = dnnl_OIhw4i24o4i;
1133
+ const format_tag_t OIhw4i32o4i = dnnl_OIhw4i32o4i;
1134
+ const format_tag_t OIhw4i64o4i = dnnl_OIhw4i64o4i;
1135
+ const format_tag_t OIhw16i16o4i = dnnl_OIhw16i16o4i;
1136
+ const format_tag_t OIhw16i32o4i = dnnl_OIhw16i32o4i;
1137
+ const format_tag_t OIhw16i48o4i = dnnl_OIhw16i48o4i;
1138
+ const format_tag_t OIhw16i64o4i = dnnl_OIhw16i64o4i;
1139
+ const format_tag_t OIhw16i16o2i = dnnl_OIhw16i16o2i;
1140
+ const format_tag_t OIhw16i32o2i = dnnl_OIhw16i32o2i;
1141
+ const format_tag_t OIhw16i48o2i = dnnl_OIhw16i48o2i;
1142
+ const format_tag_t OIhw16i64o2i = dnnl_OIhw16i64o2i;
1143
+ const format_tag_t OIhw16o16i2o = dnnl_OIhw16o16i2o;
1144
+ const format_tag_t OIhw4i4o = dnnl_OIhw4i4o;
1145
+ const format_tag_t OIhw4o4i = dnnl_OIhw4o4i;
1146
+ const format_tag_t Oihw4o = dnnl_Oihw4o;
1147
+ const format_tag_t OIhw8i16o2i = dnnl_OIhw8i16o2i;
1148
+ const format_tag_t OIhw8i32o2i = dnnl_OIhw8i32o2i;
1149
+ const format_tag_t OIhw8i64o2i = dnnl_OIhw8i64o2i;
1150
+ const format_tag_t OIhw2i8o4i = dnnl_OIhw2i8o4i;
1151
+ const format_tag_t OIhw8i8o = dnnl_OIhw8i8o;
1152
+ const format_tag_t OIhw8o16i2o = dnnl_OIhw8o16i2o;
1153
+ const format_tag_t IOhw8o16i2o = dnnl_IOhw8o16i2o;
1154
+ const format_tag_t OIhw8o8i = dnnl_OIhw8o8i;
1155
+ const format_tag_t OIhw8o4i = dnnl_OIhw8o4i;
1156
+ const format_tag_t Owhi16o = dnnl_Owhi16o;
1157
+ const format_tag_t Odwhi16o = dnnl_Odwhi16o;
1158
+ const format_tag_t Odhwi16o = dnnl_Odhwi16o;
1159
+ const format_tag_t OdhwI16o2i = dnnl_OdhwI16o2i;
1160
+ const format_tag_t OdhwI16o4i = dnnl_OdhwI16o4i;
1161
+ const format_tag_t Odhwi4o = dnnl_Odhwi4o;
1162
+ const format_tag_t Odhwi8o = dnnl_Odhwi8o;
1163
+ const format_tag_t OdhwI8o2i = dnnl_OdhwI8o2i;
1164
+ const format_tag_t OdhwI8o4i = dnnl_OdhwI8o4i;
1165
+ const format_tag_t OIdhw16i16o = dnnl_OIdhw16i16o;
1166
+ const format_tag_t OIdhw16i32o = dnnl_OIdhw16i32o;
1167
+ const format_tag_t OIdhw16i48o = dnnl_OIdhw16i48o;
1168
+ const format_tag_t OIdhw16i64o = dnnl_OIdhw16i64o;
1169
+ const format_tag_t OIdhw16o16i = dnnl_OIdhw16o16i;
1170
+ const format_tag_t OIdhw16o16i2o = dnnl_OIdhw16o16i2o;
1171
+ const format_tag_t Oidhw16o = dnnl_Oidhw16o;
1172
+ const format_tag_t OIdhw4i4o = dnnl_OIdhw4i4o;
1173
+ const format_tag_t OIdhw4o4i = dnnl_OIdhw4o4i;
1174
+ const format_tag_t Oidhw4o = dnnl_Oidhw4o;
1175
+ const format_tag_t OIdhw8i16o2i = dnnl_OIdhw8i16o2i;
1176
+ const format_tag_t OIdhw8i32o2i = dnnl_OIdhw8i32o2i;
1177
+ const format_tag_t OIdhw8i64o2i = dnnl_OIdhw8i64o2i;
1178
+ const format_tag_t OIdhw4i8o4i = dnnl_OIdhw4i8o4i;
1179
+ const format_tag_t OIdhw4i16o4i = dnnl_OIdhw4i16o4i;
1180
+ const format_tag_t OIdhw4i24o4i = dnnl_OIdhw4i24o4i;
1181
+ const format_tag_t OIdhw4i32o4i = dnnl_OIdhw4i32o4i;
1182
+ const format_tag_t OIdhw4i64o4i = dnnl_OIdhw4i64o4i;
1183
+ const format_tag_t OIdhw16i16o4i = dnnl_OIdhw16i16o4i;
1184
+ const format_tag_t OIdhw16i32o4i = dnnl_OIdhw16i32o4i;
1185
+ const format_tag_t OIdhw16i48o4i = dnnl_OIdhw16i48o4i;
1186
+ const format_tag_t OIdhw16i64o4i = dnnl_OIdhw16i64o4i;
1187
+ const format_tag_t OIdhw16i16o2i = dnnl_OIdhw16i16o2i;
1188
+ const format_tag_t OIdhw16i32o2i = dnnl_OIdhw16i32o2i;
1189
+ const format_tag_t OIdhw16i48o2i = dnnl_OIdhw16i48o2i;
1190
+ const format_tag_t OIdhw16i64o2i = dnnl_OIdhw16i64o2i;
1191
+ const format_tag_t OIdhw2i8o4i = dnnl_OIdhw2i8o4i;
1192
+ const format_tag_t OIdhw8o16i2o = dnnl_OIdhw8o16i2o;
1193
+ const format_tag_t IOdhw8o16i2o = dnnl_IOdhw8o16i2o;
1194
+ const format_tag_t OIdhw8i8o = dnnl_OIdhw8i8o;
1195
+ const format_tag_t OIdhw8o8i = dnnl_OIdhw8o8i;
1196
+ const format_tag_t OIdhw8o4i = dnnl_OIdhw8o4i;
1197
+ const format_tag_t gIOw16o16i = dnnl_gIOw16o16i;
1198
+ const format_tag_t Goiw16g = dnnl_Goiw16g;
1199
+ const format_tag_t Goiw8g = dnnl_Goiw8g;
1200
+ const format_tag_t Goiw4g = dnnl_Goiw4g;
1201
+ const format_tag_t gOIw16i16o = dnnl_gOIw16i16o;
1202
+ const format_tag_t gOIw16o16i = dnnl_gOIw16o16i;
1203
+ const format_tag_t gOiw16o = dnnl_gOiw16o;
1204
+ const format_tag_t gOIw4i16o4i = dnnl_gOIw4i16o4i;
1205
+ const format_tag_t gOIw2i8o4i = dnnl_gOIw2i8o4i;
1206
+ const format_tag_t gOIw16i16o4i = dnnl_gOIw16i16o4i;
1207
+ const format_tag_t gOIw16i16o2i = dnnl_gOIw16i16o2i;
1208
+ const format_tag_t gOIw16o16i2o = dnnl_gOIw16o16i2o;
1209
+ const format_tag_t gOIw4i4o = dnnl_gOIw4i4o;
1210
+ const format_tag_t gOIw4o4i = dnnl_gOIw4o4i;
1211
+ const format_tag_t gOiw4o = dnnl_gOiw4o;
1212
+ const format_tag_t gOIw8i16o2i = dnnl_gOIw8i16o2i;
1213
+ const format_tag_t gOIw8i8o = dnnl_gOIw8i8o;
1214
+ const format_tag_t gOIw8o16i2o = dnnl_gOIw8o16i2o;
1215
+ const format_tag_t gIOw8o16i2o = dnnl_gIOw8o16i2o;
1216
+ const format_tag_t gOIw8o8i = dnnl_gOIw8o8i;
1217
+ const format_tag_t gOIw8o4i = dnnl_gOIw8o4i;
1218
+ const format_tag_t gOwi16o = dnnl_gOwi16o;
1219
+ const format_tag_t gOwI16o2i = dnnl_gOwI16o2i;
1220
+ const format_tag_t gOwI16o4i = dnnl_gOwI16o4i;
1221
+ const format_tag_t gOwi4o = dnnl_gOwi4o;
1222
+ const format_tag_t gOwi8o = dnnl_gOwi8o;
1223
+ const format_tag_t gOwI8o2i = dnnl_gOwI8o2i;
1224
+ const format_tag_t gOwI8o4i = dnnl_gOwI8o4i;
1225
+ const format_tag_t gIOdhw16o16i = dnnl_gIOdhw16o16i;
1226
+ const format_tag_t gIOhw16o16i = dnnl_gIOhw16o16i;
1227
+ const format_tag_t gOhwi16o = dnnl_gOhwi16o;
1228
+ const format_tag_t gOhwI16o2i = dnnl_gOhwI16o2i;
1229
+ const format_tag_t gOhwI16o4i = dnnl_gOhwI16o4i;
1230
+ const format_tag_t gOhwi4o = dnnl_gOhwi4o;
1231
+ const format_tag_t gOhwi8o = dnnl_gOhwi8o;
1232
+ const format_tag_t gOhwI8o2i = dnnl_gOhwI8o2i;
1233
+ const format_tag_t gOhwI8o4i = dnnl_gOhwI8o4i;
1234
+ const format_tag_t Goihw16g = dnnl_Goihw16g;
1235
+ const format_tag_t gOIhw16i16o = dnnl_gOIhw16i16o;
1236
+ const format_tag_t gOIhw16o16i = dnnl_gOIhw16o16i;
1237
+ const format_tag_t gOihw16o = dnnl_gOihw16o;
1238
+ const format_tag_t gOIhw2i8o4i = dnnl_gOIhw2i8o4i;
1239
+ const format_tag_t gOIhw4i16o4i = dnnl_gOIhw4i16o4i;
1240
+ const format_tag_t gOIhw16i16o4i = dnnl_gOIhw16i16o4i;
1241
+ const format_tag_t gOIhw16i16o2i = dnnl_gOIhw16i16o2i;
1242
+ const format_tag_t gOIhw16o16i2o = dnnl_gOIhw16o16i2o;
1243
+ const format_tag_t gOIhw4i4o = dnnl_gOIhw4i4o;
1244
+ const format_tag_t gOIhw4o4i = dnnl_gOIhw4o4i;
1245
+ const format_tag_t gOihw4o = dnnl_gOihw4o;
1246
+ const format_tag_t Goihw8g = dnnl_Goihw8g;
1247
+ const format_tag_t Goihw4g = dnnl_Goihw4g;
1248
+ const format_tag_t gOIhw8i16o2i = dnnl_gOIhw8i16o2i;
1249
+ const format_tag_t gOIhw8i8o = dnnl_gOIhw8i8o;
1250
+ const format_tag_t gOIhw8o16i2o = dnnl_gOIhw8o16i2o;
1251
+ const format_tag_t OIw4o8i8o4i = dnnl_OIw4o8i8o4i;
1252
+ const format_tag_t gIOhw8o16i2o = dnnl_gIOhw8o16i2o;
1253
+ const format_tag_t OIhw4o8i8o4i = dnnl_OIhw4o8i8o4i;
1254
+ const format_tag_t OIdhw4o8i8o4i = dnnl_OIdhw4o8i8o4i;
1255
+ const format_tag_t IOw4i8o8i4o = dnnl_IOw4i8o8i4o;
1256
+ const format_tag_t IOhw4i8o8i4o = dnnl_IOhw4i8o8i4o;
1257
+ const format_tag_t IOdhw4i8o8i4o = dnnl_IOdhw4i8o8i4o;
1258
+ const format_tag_t gIOw4i8o8i4o = dnnl_gIOw4i8o8i4o;
1259
+ const format_tag_t gIOhw4i8o8i4o = dnnl_gIOhw4i8o8i4o;
1260
+ const format_tag_t gIOdhw4i8o8i4o = dnnl_gIOdhw4i8o8i4o;
1261
+ const format_tag_t OIhw2o8i8o2i = dnnl_OIhw2o8i8o2i;
1262
+ const format_tag_t gOIw4o8i8o4i = dnnl_gOIw4o8i8o4i;
1263
+ const format_tag_t gOIhw4o8i8o4i = dnnl_gOIhw4o8i8o4i;
1264
+ const format_tag_t gOIdhw4o8i8o4i = dnnl_gOIdhw4o8i8o4i;
1265
+ const format_tag_t gOIhw2o8i8o2i = dnnl_gOIhw2o8i8o2i;
1266
+ const format_tag_t gOIhw8o8i = dnnl_gOIhw8o8i;
1267
+ const format_tag_t gOIhw8o4i = dnnl_gOIhw8o4i;
1268
+ const format_tag_t gOwhi16o = dnnl_gOwhi16o;
1269
+ const format_tag_t gOdwhi16o = dnnl_gOdwhi16o;
1270
+ const format_tag_t gIOdhw16i16o = dnnl_gIOdhw16i16o;
1271
+ const format_tag_t gOdhwi16o = dnnl_gOdhwi16o;
1272
+ const format_tag_t gOdhwI16o2i = dnnl_gOdhwI16o2i;
1273
+ const format_tag_t gOdhwI16o4i = dnnl_gOdhwI16o4i;
1274
+ const format_tag_t gOdhwi4o = dnnl_gOdhwi4o;
1275
+ const format_tag_t gOdhwi8o = dnnl_gOdhwi8o;
1276
+ const format_tag_t gOdhwI8o2i = dnnl_gOdhwI8o2i;
1277
+ const format_tag_t gOdhwI8o4i = dnnl_gOdhwI8o4i;
1278
+ const format_tag_t gOIdhw16i16o = dnnl_gOIdhw16i16o;
1279
+ const format_tag_t gOIdhw16o16i = dnnl_gOIdhw16o16i;
1280
+ const format_tag_t gOIdhw16o16i2o = dnnl_gOIdhw16o16i2o;
1281
+ const format_tag_t gOidhw16o = dnnl_gOidhw16o;
1282
+ const format_tag_t gOIdhw4i4o = dnnl_gOIdhw4i4o;
1283
+ const format_tag_t gOIdhw4o4i = dnnl_gOIdhw4o4i;
1284
+ const format_tag_t gOidhw4o = dnnl_gOidhw4o;
1285
+ const format_tag_t gOIdhw8i16o2i = dnnl_gOIdhw8i16o2i;
1286
+ const format_tag_t gOIdhw4i16o4i = dnnl_gOIdhw4i16o4i;
1287
+ const format_tag_t gOIdhw16i16o4i = dnnl_gOIdhw16i16o4i;
1288
+ const format_tag_t gOIdhw2i8o4i = dnnl_gOIdhw2i8o4i;
1289
+ const format_tag_t gOIdhw16i16o2i = dnnl_gOIdhw16i16o2i;
1290
+ const format_tag_t gOIdhw8o16i2o = dnnl_gOIdhw8o16i2o;
1291
+ const format_tag_t gIOdhw8o16i2o = dnnl_gIOdhw8o16i2o;
1292
+ const format_tag_t gOIdhw8i8o = dnnl_gOIdhw8i8o;
1293
+ const format_tag_t gOIdhw8o8i = dnnl_gOIdhw8o8i;
1294
+ const format_tag_t gOIdhw8o4i = dnnl_gOIdhw8o4i;
1295
+ const format_tag_t Goiw32g = dnnl_Goiw32g;
1296
+ const format_tag_t Goihw32g = dnnl_Goihw32g;
1297
+ const format_tag_t Goidhw32g = dnnl_Goidhw32g;
1298
+ const format_tag_t OIdhw4o8i8o2i = dnnl_OIdhw4o8i8o2i;
1299
+ const format_tag_t OIhw4o8i8o2i = dnnl_OIhw4o8i8o2i;
1300
+ const format_tag_t OIw4o8i8o2i = dnnl_OIw4o8i8o2i;
1301
+ const format_tag_t gOIdhw4o8i8o2i = dnnl_gOIdhw4o8i8o2i;
1302
+ const format_tag_t gOIhw4o8i8o2i = dnnl_gOIhw4o8i8o2i;
1303
+ const format_tag_t gOIw4o8i8o2i = dnnl_gOIw4o8i8o2i;
1304
+ const format_tag_t IOdhw4i8o8i2o = dnnl_IOdhw4i8o8i2o;
1305
+ const format_tag_t IOhw4i8o8i2o = dnnl_IOhw4i8o8i2o;
1306
+ const format_tag_t IOw4i8o8i2o = dnnl_IOw4i8o8i2o;
1307
+ const format_tag_t gIOdhw4i8o8i2o = dnnl_gIOdhw4i8o8i2o;
1308
+ const format_tag_t gIOhw4i8o8i2o = dnnl_gIOhw4i8o8i2o;
1309
+ const format_tag_t gIOw4i8o8i2o = dnnl_gIOw4i8o8i2o;
1310
+ const format_tag_t gOIw2i4o2i = dnnl_gOIw2i4o2i;
1311
+ const format_tag_t gOIhw2i4o2i = dnnl_gOIhw2i4o2i;
1312
+ const format_tag_t gOIdhw2i4o2i = dnnl_gOIdhw2i4o2i;
1313
+ const format_tag_t gOIw2o4i2o = dnnl_gOIw2o4i2o;
1314
+ const format_tag_t gOIhw2o4i2o = dnnl_gOIhw2o4i2o;
1315
+ const format_tag_t gOIdhw2o4i2o = dnnl_gOIdhw2o4i2o;
1316
+ const format_tag_t gOIw4i8o2i = dnnl_gOIw4i8o2i;
1317
+ const format_tag_t gOIhw4i8o2i = dnnl_gOIhw4i8o2i;
1318
+ const format_tag_t gOIdhw4i8o2i = dnnl_gOIdhw4i8o2i;
1319
+ const format_tag_t gOIw4o8i2o = dnnl_gOIw4o8i2o;
1320
+ const format_tag_t gOIhw4o8i2o = dnnl_gOIhw4o8i2o;
1321
+ const format_tag_t gOIdhw4o8i2o = dnnl_gOIdhw4o8i2o;
1322
+ const format_tag_t ldOi16o = dnnl_ldOi16o;
1323
+ const format_tag_t ldOi32o = dnnl_ldOi32o;
1324
+ const format_tag_t ldOI32o4i = dnnl_ldOI32o4i;
1325
+ const format_tag_t ldIo32i = dnnl_ldIo32i;
1326
+ const format_tag_t ldgOi16o = dnnl_ldgOi16o;
1327
+ const format_tag_t ldgOi32o = dnnl_ldgOi32o;
1328
+ const format_tag_t ldgOI32o2i = dnnl_ldgOI32o2i;
1329
+ const format_tag_t ldgOI32o4i = dnnl_ldgOI32o4i;
1330
+ const format_tag_t ldgOI64o2i = dnnl_ldgOI64o2i;
1331
+ const format_tag_t ldgOI64o4i = dnnl_ldgOI64o4i;
1332
+ const format_tag_t ldgIo16i = dnnl_ldgIo16i;
1333
+ const format_tag_t ldgIo32i = dnnl_ldgIo32i;
1334
+ const format_tag_t ldgIO32i2o = dnnl_ldgIO32i2o;
1335
+
1336
+ const format_tag_t wIo2i = dnnl_wIo2i;
1337
+ const format_tag_t wIo4i = dnnl_wIo4i;
1338
+ const format_tag_t gwio = dnnl_gwio;
1339
+ const format_tag_t gwIo2i = dnnl_gwIo2i;
1340
+ const format_tag_t gwIo4i = dnnl_gwIo4i;
1341
+ const format_tag_t hwIo2i = dnnl_hwIo2i;
1342
+ const format_tag_t hwIo4i = dnnl_hwIo4i;
1343
+ const format_tag_t ghwio = dnnl_ghwio;
1344
+ const format_tag_t ghwIo2i = dnnl_ghwIo2i;
1345
+ const format_tag_t ghwIo4i = dnnl_ghwIo4i;
1346
+ const format_tag_t dhwIo2i = dnnl_dhwIo2i;
1347
+ const format_tag_t dhwIo4i = dnnl_dhwIo4i;
1348
+ const format_tag_t gdhwio = dnnl_gdhwio;
1349
+ const format_tag_t gdhwIo2i = dnnl_gdhwIo2i;
1350
+ const format_tag_t gdhwIo4i = dnnl_gdhwIo4i;
1351
+ const format_tag_t Owi32o = dnnl_Owi32o;
1352
+ const format_tag_t OwI32o2i = dnnl_OwI32o2i;
1353
+ const format_tag_t OwI32o4i = dnnl_OwI32o4i;
1354
+ const format_tag_t Owi48o = dnnl_Owi48o;
1355
+ const format_tag_t OwI48o2i = dnnl_OwI48o2i;
1356
+ const format_tag_t OwI48o4i = dnnl_OwI48o4i;
1357
+ const format_tag_t Owi64o = dnnl_Owi64o;
1358
+ const format_tag_t OwI64o2i = dnnl_OwI64o2i;
1359
+ const format_tag_t OwI64o4i = dnnl_OwI64o4i;
1360
+ const format_tag_t OhwI32o2i = dnnl_OhwI32o2i;
1361
+ const format_tag_t OhwI32o4i = dnnl_OhwI32o4i;
1362
+ const format_tag_t Ohwi48o = dnnl_Ohwi48o;
1363
+ const format_tag_t OhwI48o2i = dnnl_OhwI48o2i;
1364
+ const format_tag_t OhwI48o4i = dnnl_OhwI48o4i;
1365
+ const format_tag_t Ohwi64o = dnnl_Ohwi64o;
1366
+ const format_tag_t OhwI64o2i = dnnl_OhwI64o2i;
1367
+ const format_tag_t OhwI64o4i = dnnl_OhwI64o4i;
1368
+ const format_tag_t Odhwi32o = dnnl_Odhwi32o;
1369
+ const format_tag_t OdhwI32o2i = dnnl_OdhwI32o2i;
1370
+ const format_tag_t OdhwI32o4i = dnnl_OdhwI32o4i;
1371
+ const format_tag_t Odhwi48o = dnnl_Odhwi48o;
1372
+ const format_tag_t OdhwI48o2i = dnnl_OdhwI48o2i;
1373
+ const format_tag_t OdhwI48o4i = dnnl_OdhwI48o4i;
1374
+ const format_tag_t Odhwi64o = dnnl_Odhwi64o;
1375
+ const format_tag_t OdhwI64o2i = dnnl_OdhwI64o2i;
1376
+ const format_tag_t OdhwI64o4i = dnnl_OdhwI64o4i;
1377
+ const format_tag_t gOwi32o = dnnl_gOwi32o;
1378
+ const format_tag_t gOwI32o2i = dnnl_gOwI32o2i;
1379
+ const format_tag_t gOwI32o4i = dnnl_gOwI32o4i;
1380
+ const format_tag_t gOwi48o = dnnl_gOwi48o;
1381
+ const format_tag_t gOwI48o2i = dnnl_gOwI48o2i;
1382
+ const format_tag_t gOwI48o4i = dnnl_gOwI48o4i;
1383
+ const format_tag_t gOwi64o = dnnl_gOwi64o;
1384
+ const format_tag_t gOwI64o2i = dnnl_gOwI64o2i;
1385
+ const format_tag_t gOwI64o4i = dnnl_gOwI64o4i;
1386
+ const format_tag_t gOhwI32o2i = dnnl_gOhwI32o2i;
1387
+ const format_tag_t gOhwI32o4i = dnnl_gOhwI32o4i;
1388
+ const format_tag_t gOhwi48o = dnnl_gOhwi48o;
1389
+ const format_tag_t gOhwI48o2i = dnnl_gOhwI48o2i;
1390
+ const format_tag_t gOhwI48o4i = dnnl_gOhwI48o4i;
1391
+ const format_tag_t gOhwi64o = dnnl_gOhwi64o;
1392
+ const format_tag_t gOhwI64o2i = dnnl_gOhwI64o2i;
1393
+ const format_tag_t gOhwI64o4i = dnnl_gOhwI64o4i;
1394
+ const format_tag_t gOdhwi32o = dnnl_gOdhwi32o;
1395
+ const format_tag_t gOdhwI32o2i = dnnl_gOdhwI32o2i;
1396
+ const format_tag_t gOdhwI32o4i = dnnl_gOdhwI32o4i;
1397
+ const format_tag_t gOdhwi48o = dnnl_gOdhwi48o;
1398
+ const format_tag_t gOdhwI48o2i = dnnl_gOdhwI48o2i;
1399
+ const format_tag_t gOdhwI48o4i = dnnl_gOdhwI48o4i;
1400
+ const format_tag_t gOdhwi64o = dnnl_gOdhwi64o;
1401
+ const format_tag_t gOdhwI64o2i = dnnl_gOdhwI64o2i;
1402
+ const format_tag_t gOdhwI64o4i = dnnl_gOdhwI64o4i;
1403
+ const format_tag_t ABc2b8a16b4a = dnnl_ABc2b8a16b4a;
1404
+ const format_tag_t ABcd2b8a16b4a = dnnl_ABcd2b8a16b4a;
1405
+ const format_tag_t ABcde2b8a16b4a = dnnl_ABcde2b8a16b4a;
1406
+ const format_tag_t ABc2a8b16a4b = dnnl_ABc2a8b16a4b;
1407
+ const format_tag_t ABcd2a8b16a4b = dnnl_ABcd2a8b16a4b;
1408
+ const format_tag_t ABcde2a8b16a4b = dnnl_ABcde2a8b16a4b;
1409
+ const format_tag_t ABc2a8b16a2b = dnnl_ABc2a8b16a2b;
1410
+ const format_tag_t ABcd2a8b16a2b = dnnl_ABcd2a8b16a2b;
1411
+ const format_tag_t ABcde2a8b16a2b = dnnl_ABcde2a8b16a2b;
1412
+ const format_tag_t aBCd2b8c16b2c = dnnl_aBCd2b8c16b2c;
1413
+ const format_tag_t aBCde2b8c16b2c = dnnl_aBCde2b8c16b2c;
1414
+ const format_tag_t aBCdef2b8c16b2c = dnnl_aBCdef2b8c16b2c;
1415
+ const format_tag_t aBCd2b8c16b4c = dnnl_aBCd2b8c16b4c;
1416
+ const format_tag_t aBCde2b8c16b4c = dnnl_aBCde2b8c16b4c;
1417
+ const format_tag_t BAc2b8a16b2a = dnnl_BAc2b8a16b2a;
1418
+ const format_tag_t aBCde2c8b16c2b = dnnl_aBCde2c8b16c2b;
1419
+ const format_tag_t aBCdef2c8b16c2b = dnnl_aBCdef2c8b16c2b;
1420
+ const format_tag_t BAcd2b8a16b2a = dnnl_BAcd2b8a16b2a;
1421
+ const format_tag_t BAcde2b8a16b2a = dnnl_BAcde2b8a16b2a;
1422
+ const format_tag_t aCBd2c8b16c2b = dnnl_aCBd2c8b16c2b;
1423
+ const format_tag_t aCBde2c8b16c2b = dnnl_aCBde2c8b16c2b;
1424
+ const format_tag_t aCBdef2c8b16c2b = dnnl_aCBdef2c8b16c2b;
1425
+ const format_tag_t BAc2b8a16b4a = dnnl_BAc2b8a16b4a;
1426
+ const format_tag_t BAcd2b8a16b4a = dnnl_BAcd2b8a16b4a;
1427
+ const format_tag_t BAcde2b8a16b4a = dnnl_BAcde2b8a16b4a;
1428
+ const format_tag_t ABc2b32a8b = dnnl_ABc2b32a8b;
1429
+ const format_tag_t ABcd2b32a8b = dnnl_ABcd2b32a8b;
1430
+ const format_tag_t ABcde2b32a8b = dnnl_ABcde2b32a8b;
1431
+ const format_tag_t aBC2b8c16b2c = dnnl_aBC2b8c16b2c;
1432
+ const format_tag_t ABc16a4b = dnnl_ABc16a4b;
1433
+ const format_tag_t ABcd16a4b = dnnl_ABcd16a4b;
1434
+ const format_tag_t ABcde16a4b = dnnl_ABcde16a4b;
1435
+ const format_tag_t NCw2c32n8c = dnnl_NCw2c32n8c;
1436
+ const format_tag_t NChw2c32n8c = dnnl_NChw2c32n8c;
1437
+ const format_tag_t NCdhw2c32n8c = dnnl_NCdhw2c32n8c;
1438
+ const format_tag_t OIw2i8o16i4o = dnnl_OIw2i8o16i4o;
1439
+ const format_tag_t OIhw2i8o16i4o = dnnl_OIhw2i8o16i4o;
1440
+ const format_tag_t OIdhw2i8o16i4o = dnnl_OIdhw2i8o16i4o;
1441
+ const format_tag_t OIw2o8i16o4i = dnnl_OIw2o8i16o4i;
1442
+ const format_tag_t OIhw2o8i16o4i = dnnl_OIhw2o8i16o4i;
1443
+ const format_tag_t OIdhw2o8i16o4i = dnnl_OIdhw2o8i16o4i;
1444
+ const format_tag_t OIw2o8i16o2i = dnnl_OIw2o8i16o2i;
1445
+ const format_tag_t OIhw2o8i16o2i = dnnl_OIhw2o8i16o2i;
1446
+ const format_tag_t OIdhw2o8i16o2i = dnnl_OIdhw2o8i16o2i;
1447
+ const format_tag_t IOw2i8o16i4o = dnnl_IOw2i8o16i4o;
1448
+ const format_tag_t IOhw2i8o16i4o = dnnl_IOhw2i8o16i4o;
1449
+ const format_tag_t IOdhw2i8o16i4o = dnnl_IOdhw2i8o16i4o;
1450
+ const format_tag_t IOw2i8o16i2o = dnnl_IOw2i8o16i2o;
1451
+ const format_tag_t IOhw2i8o16i2o = dnnl_IOhw2i8o16i2o;
1452
+ const format_tag_t IOdhw2i8o16i2o = dnnl_IOdhw2i8o16i2o;
1453
+ const format_tag_t gOIw2o8i16o2i = dnnl_gOIw2o8i16o2i;
1454
+ const format_tag_t gOIhw2o8i16o2i = dnnl_gOIhw2o8i16o2i;
1455
+ const format_tag_t gOIdhw2o8i16o2i = dnnl_gOIdhw2o8i16o2i;
1456
+ const format_tag_t gOIw2o8i16o4i = dnnl_gOIw2o8i16o4i;
1457
+ const format_tag_t gOIhw2o8i16o4i = dnnl_gOIhw2o8i16o4i;
1458
+ const format_tag_t gIOw2i8o16i2o = dnnl_gIOw2i8o16i2o;
1459
+ const format_tag_t gIOhw2i8o16i2o = dnnl_gIOhw2i8o16i2o;
1460
+ const format_tag_t gIOdhw2i8o16i2o = dnnl_gIOdhw2i8o16i2o;
1461
+ const format_tag_t OwI16i16o2i = dnnl_OwI16i16o2i;
1462
+ const format_tag_t OwI16i16o4i = dnnl_OwI16i16o4i;
1463
+ const format_tag_t OhwI16i16o2i = dnnl_OhwI16i16o2i;
1464
+ const format_tag_t OhwI16i16o4i = dnnl_OhwI16i16o4i;
1465
+ const format_tag_t OdhwI16i16o2i = dnnl_OdhwI16i16o2i;
1466
+ const format_tag_t OdhwI16i16o4i = dnnl_OdhwI16i16o4i;
1467
+ const format_tag_t gOwI16i16o2i = dnnl_gOwI16i16o2i;
1468
+ const format_tag_t gOwI16i16o4i = dnnl_gOwI16i16o4i;
1469
+ const format_tag_t gOhwI16i16o2i = dnnl_gOhwI16i16o2i;
1470
+ const format_tag_t gOhwI16i16o4i = dnnl_gOhwI16i16o4i;
1471
+ const format_tag_t gOdhwI16i16o2i = dnnl_gOdhwI16i16o2i;
1472
+ const format_tag_t gOdhwI16i16o4i = dnnl_gOdhwI16i16o4i;
1473
+ const format_tag_t OwI16i32o2i = dnnl_OwI16i32o2i;
1474
+ const format_tag_t OwI16i32o4i = dnnl_OwI16i32o4i;
1475
+ const format_tag_t OwI16i48o2i = dnnl_OwI16i48o2i;
1476
+ const format_tag_t OwI16i48o4i = dnnl_OwI16i48o4i;
1477
+ const format_tag_t OwI16i64o2i = dnnl_OwI16i64o2i;
1478
+ const format_tag_t OwI16i64o4i = dnnl_OwI16i64o4i;
1479
+ const format_tag_t OhwI16i32o2i = dnnl_OhwI16i32o2i;
1480
+ const format_tag_t OhwI16i32o4i = dnnl_OhwI16i32o4i;
1481
+ const format_tag_t OhwI16i48o2i = dnnl_OhwI16i48o2i;
1482
+ const format_tag_t OhwI16i48o4i = dnnl_OhwI16i48o4i;
1483
+ const format_tag_t OhwI16i64o2i = dnnl_OhwI16i64o2i;
1484
+ const format_tag_t OhwI16i64o4i = dnnl_OhwI16i64o4i;
1485
+ const format_tag_t OdhwI16i32o2i = dnnl_OdhwI16i32o2i;
1486
+ const format_tag_t OdhwI16i32o4i = dnnl_OdhwI16i32o4i;
1487
+ const format_tag_t OdhwI16i48o2i = dnnl_OdhwI16i48o2i;
1488
+ const format_tag_t OdhwI16i48o4i = dnnl_OdhwI16i48o4i;
1489
+ const format_tag_t OdhwI16i64o2i = dnnl_OdhwI16i64o2i;
1490
+ const format_tag_t OdhwI16i64o4i = dnnl_OdhwI16i64o4i;
1491
+ const format_tag_t IdhwO16o32i2o = dnnl_IdhwO16o32i2o;
1492
+ const format_tag_t IdhwO16o32i4o = dnnl_IdhwO16o32i4o;
1493
+ const format_tag_t IdhwO16o48i2o = dnnl_IdhwO16o48i2o;
1494
+ const format_tag_t IdhwO16o48i4o = dnnl_IdhwO16o48i4o;
1495
+ const format_tag_t IdhwO16o64i2o = dnnl_IdhwO16o64i2o;
1496
+ const format_tag_t IdhwO16o64i4o = dnnl_IdhwO16o64i4o;
1497
+ const format_tag_t gOwI16i32o2i = dnnl_gOwI16i32o2i;
1498
+ const format_tag_t gOwI16i32o4i = dnnl_gOwI16i32o4i;
1499
+ const format_tag_t gOwI16i48o2i = dnnl_gOwI16i48o2i;
1500
+ const format_tag_t gOwI16i48o4i = dnnl_gOwI16i48o4i;
1501
+ const format_tag_t gOwI16i64o2i = dnnl_gOwI16i64o2i;
1502
+ const format_tag_t gOwI16i64o4i = dnnl_gOwI16i64o4i;
1503
+ const format_tag_t gOhwI16i32o2i = dnnl_gOhwI16i32o2i;
1504
+ const format_tag_t gOhwI16i32o4i = dnnl_gOhwI16i32o4i;
1505
+ const format_tag_t gOhwI16i48o2i = dnnl_gOhwI16i48o2i;
1506
+ const format_tag_t gOhwI16i48o4i = dnnl_gOhwI16i48o4i;
1507
+ const format_tag_t gOhwI16i64o2i = dnnl_gOhwI16i64o2i;
1508
+ const format_tag_t gOhwI16i64o4i = dnnl_gOhwI16i64o4i;
1509
+ const format_tag_t gOdhwI16i32o2i = dnnl_gOdhwI16i32o2i;
1510
+ const format_tag_t gOdhwI16i32o4i = dnnl_gOdhwI16i32o4i;
1511
+ const format_tag_t gOdhwI16i48o2i = dnnl_gOdhwI16i48o2i;
1512
+ const format_tag_t gOdhwI16i48o4i = dnnl_gOdhwI16i48o4i;
1513
+ const format_tag_t gOdhwI16i64o2i = dnnl_gOdhwI16i64o2i;
1514
+ const format_tag_t gOdhwI16i64o4i = dnnl_gOdhwI16i64o4i;
1515
+ const format_tag_t gIdhwO16o32i2o = dnnl_gIdhwO16o32i2o;
1516
+ const format_tag_t gIdhwO16o32i4o = dnnl_gIdhwO16o32i4o;
1517
+ const format_tag_t gIdhwO16o48i2o = dnnl_gIdhwO16o48i2o;
1518
+ const format_tag_t gIdhwO16o48i4o = dnnl_gIdhwO16o48i4o;
1519
+ const format_tag_t gIdhwO16o64i2o = dnnl_gIdhwO16o64i2o;
1520
+ const format_tag_t gIdhwO16o64i4o = dnnl_gIdhwO16o64i4o;
1521
+
1522
+ const format_tag_t Idhwo32i = dnnl_Idhwo32i;
1523
+ const format_tag_t IdhwO32i2o = dnnl_IdhwO32i2o;
1524
+ const format_tag_t IdhwO32i4o = dnnl_IdhwO32i4o;
1525
+ const format_tag_t Idhwo48i = dnnl_Idhwo48i;
1526
+ const format_tag_t IdhwO48i2o = dnnl_IdhwO48i2o;
1527
+ const format_tag_t IdhwO48i4o = dnnl_IdhwO48i4o;
1528
+ const format_tag_t Idhwo64i = dnnl_Idhwo64i;
1529
+ const format_tag_t IdhwO64i2o = dnnl_IdhwO64i2o;
1530
+ const format_tag_t IdhwO64i4o = dnnl_IdhwO64i4o;
1531
+
1532
+ const format_tag_t gIdhwo32i = dnnl_gIdhwo32i;
1533
+ const format_tag_t gIdhwO32i2o = dnnl_gIdhwO32i2o;
1534
+ const format_tag_t gIdhwO32i4o = dnnl_gIdhwO32i4o;
1535
+ const format_tag_t gIdhwo48i = dnnl_gIdhwo48i;
1536
+ const format_tag_t gIdhwO48i2o = dnnl_gIdhwO48i2o;
1537
+ const format_tag_t gIdhwO48i4o = dnnl_gIdhwO48i4o;
1538
+ const format_tag_t gIdhwo64i = dnnl_gIdhwo64i;
1539
+ const format_tag_t gIdhwO64i2o = dnnl_gIdhwO64i2o;
1540
+ const format_tag_t gIdhwO64i4o = dnnl_gIdhwO64i4o;
1541
+
1542
+ const format_tag_t Iwo32i = dnnl_Iwo32i;
1543
+ const format_tag_t IwO32i2o = dnnl_IwO32i2o;
1544
+ const format_tag_t IwO32i4o = dnnl_IwO32i4o;
1545
+ const format_tag_t Iwo48i = dnnl_Iwo48i;
1546
+ const format_tag_t IwO48i2o = dnnl_IwO48i2o;
1547
+ const format_tag_t IwO48i4o = dnnl_IwO48i4o;
1548
+ const format_tag_t Iwo64i = dnnl_Iwo64i;
1549
+ const format_tag_t IwO64i2o = dnnl_IwO64i2o;
1550
+ const format_tag_t IwO64i4o = dnnl_IwO64i4o;
1551
+
1552
+ const format_tag_t gIwo32i = dnnl_gIwo32i;
1553
+ const format_tag_t gIwO32i2o = dnnl_gIwO32i2o;
1554
+ const format_tag_t gIwO32i4o = dnnl_gIwO32i4o;
1555
+ const format_tag_t gIwo48i = dnnl_gIwo48i;
1556
+ const format_tag_t gIwO48i2o = dnnl_gIwO48i2o;
1557
+ const format_tag_t gIwO48i4o = dnnl_gIwO48i4o;
1558
+ const format_tag_t gIwo64i = dnnl_gIwo64i;
1559
+ const format_tag_t gIwO64i2o = dnnl_gIwO64i2o;
1560
+ const format_tag_t gIwO64i4o = dnnl_gIwO64i4o;
1561
+
1562
+ const format_tag_t IwO16o16i2o = dnnl_IwO16o16i2o;
1563
+ const format_tag_t IwO16o16i4o = dnnl_IwO16o16i4o;
1564
+ const format_tag_t IhwO16o16i2o = dnnl_IhwO16o16i2o;
1565
+ const format_tag_t IhwO16o16i4o = dnnl_IhwO16o16i4o;
1566
+ const format_tag_t IdhwO16o16i2o = dnnl_IdhwO16o16i2o;
1567
+ const format_tag_t IdhwO16o16i4o = dnnl_IdhwO16o16i4o;
1568
+
1569
+ const format_tag_t gIwO16o16i2o = dnnl_gIwO16o16i2o;
1570
+ const format_tag_t gIwO16o16i4o = dnnl_gIwO16o16i4o;
1571
+ const format_tag_t gIhwO16o16i2o = dnnl_gIhwO16o16i2o;
1572
+ const format_tag_t gIhwO16o16i4o = dnnl_gIhwO16o16i4o;
1573
+ const format_tag_t gIdhwO16o16i2o = dnnl_gIdhwO16o16i2o;
1574
+ const format_tag_t gIdhwO16o16i4o = dnnl_gIdhwO16o16i4o;
1575
+
1576
+ const format_tag_t IwO16o32i2o = dnnl_IwO16o32i2o;
1577
+ const format_tag_t IwO16o32i4o = dnnl_IwO16o32i4o;
1578
+ const format_tag_t IwO16o48i2o = dnnl_IwO16o48i2o;
1579
+ const format_tag_t IwO16o48i4o = dnnl_IwO16o48i4o;
1580
+ const format_tag_t IwO16o64i2o = dnnl_IwO16o64i2o;
1581
+ const format_tag_t IwO16o64i4o = dnnl_IwO16o64i4o;
1582
+
1583
+ const format_tag_t gIwO16o32i2o = dnnl_gIwO16o32i2o;
1584
+ const format_tag_t gIwO16o32i4o = dnnl_gIwO16o32i4o;
1585
+ const format_tag_t gIwO16o48i2o = dnnl_gIwO16o48i2o;
1586
+ const format_tag_t gIwO16o48i4o = dnnl_gIwO16o48i4o;
1587
+ const format_tag_t gIwO16o64i2o = dnnl_gIwO16o64i2o;
1588
+ const format_tag_t gIwO16o64i4o = dnnl_gIwO16o64i4o;
1589
+
1590
+ const format_tag_t IhwO16o32i2o = dnnl_IhwO16o32i2o;
1591
+ const format_tag_t IhwO16o32i4o = dnnl_IhwO16o32i4o;
1592
+ const format_tag_t IhwO16o48i2o = dnnl_IhwO16o48i2o;
1593
+ const format_tag_t IhwO16o48i4o = dnnl_IhwO16o48i4o;
1594
+ const format_tag_t IhwO16o64i2o = dnnl_IhwO16o64i2o;
1595
+ const format_tag_t IhwO16o64i4o = dnnl_IhwO16o64i4o;
1596
+
1597
+ const format_tag_t gIhwO16o32i2o = dnnl_gIhwO16o32i2o;
1598
+ const format_tag_t gIhwO16o32i4o = dnnl_gIhwO16o32i4o;
1599
+ const format_tag_t gIhwO16o48i2o = dnnl_gIhwO16o48i2o;
1600
+ const format_tag_t gIhwO16o48i4o = dnnl_gIhwO16o48i4o;
1601
+ const format_tag_t gIhwO16o64i2o = dnnl_gIhwO16o64i2o;
1602
+ const format_tag_t gIhwO16o64i4o = dnnl_gIhwO16o64i4o;
1603
+
1604
+ const format_tag_t Ihwo32i = dnnl_Ihwo32i;
1605
+ const format_tag_t IhwO32i2o = dnnl_IhwO32i2o;
1606
+ const format_tag_t IhwO32i4o = dnnl_IhwO32i4o;
1607
+ const format_tag_t Ihwo48i = dnnl_Ihwo48i;
1608
+ const format_tag_t IhwO48i2o = dnnl_IhwO48i2o;
1609
+ const format_tag_t IhwO48i4o = dnnl_IhwO48i4o;
1610
+ const format_tag_t Ihwo64i = dnnl_Ihwo64i;
1611
+ const format_tag_t IhwO64i2o = dnnl_IhwO64i2o;
1612
+ const format_tag_t IhwO64i4o = dnnl_IhwO64i4o;
1613
+
1614
+ const format_tag_t gIhwo32i = dnnl_gIhwo32i;
1615
+ const format_tag_t gIhwO32i2o = dnnl_gIhwO32i2o;
1616
+ const format_tag_t gIhwO32i4o = dnnl_gIhwO32i4o;
1617
+ const format_tag_t gIhwo48i = dnnl_gIhwo48i;
1618
+ const format_tag_t gIhwO48i2o = dnnl_gIhwO48i2o;
1619
+ const format_tag_t gIhwO48i4o = dnnl_gIhwO48i4o;
1620
+ const format_tag_t gIhwo64i = dnnl_gIhwo64i;
1621
+ const format_tag_t gIhwO64i2o = dnnl_gIhwO64i2o;
1622
+ const format_tag_t gIhwO64i4o = dnnl_gIhwO64i4o;
1623
+
1624
+ const format_tag_t Iwo8i = dnnl_Iwo8i;
1625
+ const format_tag_t IwO8i2o = dnnl_IwO8i2o;
1626
+ const format_tag_t IwO8i4o = dnnl_IwO8i4o;
1627
+ const format_tag_t Ihwo8i = dnnl_Ihwo8i;
1628
+ const format_tag_t IhwO8i2o = dnnl_IhwO8i2o;
1629
+ const format_tag_t IhwO8i4o = dnnl_IhwO8i4o;
1630
+ const format_tag_t Idhwo8i = dnnl_Idhwo8i;
1631
+ const format_tag_t IdhwO8i2o = dnnl_IdhwO8i2o;
1632
+ const format_tag_t IdhwO8i4o = dnnl_IdhwO8i4o;
1633
+
1634
+ const format_tag_t Iwo16i = dnnl_Iwo16i;
1635
+ const format_tag_t IwO16i2o = dnnl_IwO16i2o;
1636
+ const format_tag_t IwO16i4o = dnnl_IwO16i4o;
1637
+ const format_tag_t Ihwo16i = dnnl_Ihwo16i;
1638
+ const format_tag_t IhwO16i2o = dnnl_IhwO16i2o;
1639
+ const format_tag_t IhwO16i4o = dnnl_IhwO16i4o;
1640
+ const format_tag_t Idhwo16i = dnnl_Idhwo16i;
1641
+ const format_tag_t IdhwO16i2o = dnnl_IdhwO16i2o;
1642
+ const format_tag_t IdhwO16i4o = dnnl_IdhwO16i4o;
1643
+
1644
+ const format_tag_t Iwo24i = dnnl_Iwo24i;
1645
+ const format_tag_t IwO24i2o = dnnl_IwO24i2o;
1646
+ const format_tag_t IwO24i4o = dnnl_IwO24i4o;
1647
+ const format_tag_t Ihwo24i = dnnl_Ihwo24i;
1648
+ const format_tag_t IhwO24i2o = dnnl_IhwO24i2o;
1649
+ const format_tag_t IhwO24i4o = dnnl_IhwO24i4o;
1650
+ const format_tag_t Idhwo24i = dnnl_Idhwo24i;
1651
+ const format_tag_t IdhwO24i2o = dnnl_IdhwO24i2o;
1652
+ const format_tag_t IdhwO24i4o = dnnl_IdhwO24i4o;
1653
+
1654
+ const format_tag_t gIwo8i = dnnl_gIwo8i;
1655
+ const format_tag_t gIwO8i2o = dnnl_gIwO8i2o;
1656
+ const format_tag_t gIwO8i4o = dnnl_gIwO8i4o;
1657
+ const format_tag_t gIhwo8i = dnnl_gIhwo8i;
1658
+ const format_tag_t gIhwO8i2o = dnnl_gIhwO8i2o;
1659
+ const format_tag_t gIhwO8i4o = dnnl_gIhwO8i4o;
1660
+ const format_tag_t gIdhwo8i = dnnl_gIdhwo8i;
1661
+ const format_tag_t gIdhwO8i2o = dnnl_gIdhwO8i2o;
1662
+ const format_tag_t gIdhwO8i4o = dnnl_gIdhwO8i4o;
1663
+
1664
+ const format_tag_t gIwo16i = dnnl_gIwo16i;
1665
+ const format_tag_t gIwO16i2o = dnnl_gIwO16i2o;
1666
+ const format_tag_t gIwO16i4o = dnnl_gIwO16i4o;
1667
+
1668
+ const format_tag_t gIhwo16i = dnnl_gIhwo16i;
1669
+ const format_tag_t gIhwO16i2o = dnnl_gIhwO16i2o;
1670
+ const format_tag_t gIhwO16i4o = dnnl_gIhwO16i4o;
1671
+
1672
+ const format_tag_t gIdhwo16i = dnnl_gIdhwo16i;
1673
+ const format_tag_t gIdhwO16i2o = dnnl_gIdhwO16i2o;
1674
+ const format_tag_t gIdhwO16i4o = dnnl_gIdhwO16i4o;
1675
+
1676
+ const format_tag_t gIwo24i = dnnl_gIwo24i;
1677
+ const format_tag_t gIwO24i2o = dnnl_gIwO24i2o;
1678
+ const format_tag_t gIwO24i4o = dnnl_gIwO24i4o;
1679
+ const format_tag_t gIhwo24i = dnnl_gIhwo24i;
1680
+ const format_tag_t gIhwO24i2o = dnnl_gIhwO24i2o;
1681
+ const format_tag_t gIhwO24i4o = dnnl_gIhwO24i4o;
1682
+ const format_tag_t gIdhwo24i = dnnl_gIdhwo24i;
1683
+ const format_tag_t gIdhwO24i2o = dnnl_gIdhwO24i2o;
1684
+ const format_tag_t gIdhwO24i4o = dnnl_gIdhwO24i4o;
1685
+
1686
+ const format_tag_t hwioG16g = dnnl_hwioG16g;
1687
+ const format_tag_t hwioG8g = dnnl_hwioG8g;
1688
+ const format_tag_t dhwioG16g = dnnl_dhwioG16g;
1689
+ const format_tag_t dhwioG8g = dnnl_dhwioG8g;
1690
+ const format_tag_t Owi24o = dnnl_Owi24o;
1691
+ const format_tag_t Ohwi24o = dnnl_Ohwi24o;
1692
+ const format_tag_t Odhwi24o = dnnl_Odhwi24o;
1693
+ const format_tag_t gOwi24o = dnnl_gOwi24o;
1694
+ const format_tag_t gOhwi24o = dnnl_gOhwi24o;
1695
+ const format_tag_t gOdhwi24o = dnnl_gOdhwi24o;
1696
+ const format_tag_t OwI24o2i = dnnl_OwI24o2i;
1697
+ const format_tag_t OhwI24o2i = dnnl_OhwI24o2i;
1698
+ const format_tag_t OdhwI24o2i = dnnl_OdhwI24o2i;
1699
+ const format_tag_t gOwI24o2i = dnnl_gOwI24o2i;
1700
+ const format_tag_t gOhwI24o2i = dnnl_gOhwI24o2i;
1701
+ const format_tag_t gOdhwI24o2i = dnnl_gOdhwI24o2i;
1702
+ const format_tag_t OwI24o4i = dnnl_OwI24o4i;
1703
+ const format_tag_t OhwI24o4i = dnnl_OhwI24o4i;
1704
+ const format_tag_t OdhwI24o4i = dnnl_OdhwI24o4i;
1705
+ const format_tag_t gOwI24o4i = dnnl_gOwI24o4i;
1706
+ const format_tag_t gOhwI24o4i = dnnl_gOhwI24o4i;
1707
+ const format_tag_t gOdhwI24o4i = dnnl_gOdhwI24o4i;
1708
+
1709
+ const format_tag_t OI8i32o = dnnl_OI8i32o;
1710
+ const format_tag_t OIw8i32o = dnnl_OIw8i32o;
1711
+ const format_tag_t OIhw8i32o = dnnl_OIhw8i32o;
1712
+ const format_tag_t OIdhw8i32o = dnnl_OIdhw8i32o;
1713
+ const format_tag_t OI8i24o = dnnl_OI8i24o;
1714
+ const format_tag_t OIw8i24o = dnnl_OIw8i24o;
1715
+ const format_tag_t OIhw8i24o = dnnl_OIhw8i24o;
1716
+ const format_tag_t OIdhw8i24o = dnnl_OIdhw8i24o;
1717
+ const format_tag_t OI8i16o = dnnl_OI8i16o;
1718
+ const format_tag_t OIw8i16o = dnnl_OIw8i16o;
1719
+ const format_tag_t OIhw8i16o = dnnl_OIhw8i16o;
1720
+ const format_tag_t OIdhw8i16o = dnnl_OIdhw8i16o;
1721
+ const format_tag_t OI8i8o = dnnl_OI8i8o;
1722
+ const format_tag_t OI8i8o2i = dnnl_OI8i8o2i;
1723
+ const format_tag_t OIw8i8o2i = dnnl_OIw8i8o2i;
1724
+ const format_tag_t OIhw8i8o2i = dnnl_OIhw8i8o2i;
1725
+ const format_tag_t OIdhw8i8o2i = dnnl_OIdhw8i8o2i;
1726
+ const format_tag_t OI8i24o2i = dnnl_OI8i24o2i;
1727
+ const format_tag_t OIw8i24o2i = dnnl_OIw8i24o2i;
1728
+ const format_tag_t OIhw8i24o2i = dnnl_OIhw8i24o2i;
1729
+ const format_tag_t OIdhw8i24o2i = dnnl_OIdhw8i24o2i;
1730
+ } // namespace format_tag
1731
+
1732
+ using normalization_flags_t = dnnl_normalization_flags_t;
1733
+ namespace normalization_flags {
1734
+ const normalization_flags_t none = dnnl_normalization_flags_none;
1735
+ const normalization_flags_t use_global_stats = dnnl_use_global_stats;
1736
+ const normalization_flags_t use_scale = dnnl_use_scale;
1737
+ const normalization_flags_t use_shift = dnnl_use_shift;
1738
+ const normalization_flags_t fuse_norm_relu = dnnl_fuse_norm_relu;
1739
+ const normalization_flags_t fuse_norm_add_relu = dnnl_fuse_norm_add_relu;
1740
+ } // namespace normalization_flags
1741
+
1742
+ using rnn_flags_t = dnnl_rnn_flags_t;
1743
+ namespace rnn_flags {
1744
+ const rnn_flags_t undef = dnnl_rnn_flags_undef;
1745
+ const rnn_flags_t diff_weights_overwrite
1746
+ = dnnl_rnn_flags_diff_weights_overwrite;
1747
+ } // namespace rnn_flags
1748
+
1749
+ using engine_kind_t = dnnl_engine_kind_t;
1750
+ namespace engine_kind {
1751
+ const engine_kind_t any_engine = dnnl_any_engine;
1752
+ const engine_kind_t cpu = dnnl_cpu;
1753
+ const engine_kind_t gpu = dnnl_gpu;
1754
+ } // namespace engine_kind
1755
+
1756
+ enum runtime_kind_t {
1757
+ dnnl_runtime_none,
1758
+ dnnl_runtime_seq,
1759
+ dnnl_runtime_omp,
1760
+ dnnl_runtime_tbb,
1761
+ dnnl_runtime_threadpool,
1762
+ dnnl_runtime_ocl,
1763
+ dnnl_runtime_sycl,
1764
+ };
1765
+
1766
+ namespace runtime_kind {
1767
+ const runtime_kind_t none = dnnl_runtime_none;
1768
+ const runtime_kind_t seq = dnnl_runtime_seq;
1769
+ const runtime_kind_t omp = dnnl_runtime_omp;
1770
+ const runtime_kind_t tbb = dnnl_runtime_tbb;
1771
+ const runtime_kind_t threadpool = dnnl_runtime_threadpool;
1772
+ const runtime_kind_t ocl = dnnl_runtime_ocl;
1773
+ const runtime_kind_t sycl = dnnl_runtime_sycl;
1774
+ } // namespace runtime_kind
1775
+
1776
+ using primitive_kind_t = dnnl_primitive_kind_t;
1777
+ namespace primitive_kind {
1778
+ const primitive_kind_t undefined = dnnl_undefined_primitive;
1779
+ const primitive_kind_t reorder = dnnl_reorder;
1780
+ const primitive_kind_t concat = dnnl_concat;
1781
+ const primitive_kind_t sum = dnnl_sum;
1782
+ const primitive_kind_t convolution = dnnl_convolution;
1783
+ const primitive_kind_t deconvolution = dnnl_deconvolution;
1784
+ const primitive_kind_t shuffle = dnnl_shuffle;
1785
+ const primitive_kind_t eltwise = dnnl_eltwise;
1786
+ const primitive_kind_t pooling = dnnl_pooling;
1787
+ const primitive_kind_t prelu = dnnl_prelu;
1788
+ const primitive_kind_t lrn = dnnl_lrn;
1789
+ const primitive_kind_t batch_normalization = dnnl_batch_normalization;
1790
+ const primitive_kind_t inner_product = dnnl_inner_product;
1791
+ const primitive_kind_t rnn = dnnl_rnn;
1792
+ const primitive_kind_t gemm = dnnl_gemm;
1793
+ const primitive_kind_t binary = dnnl_binary;
1794
+ const primitive_kind_t matmul = dnnl_matmul;
1795
+ const primitive_kind_t resampling = dnnl_resampling;
1796
+ const primitive_kind_t reduction = dnnl_reduction;
1797
+ const primitive_kind_t softmax = dnnl_softmax;
1798
+ const primitive_kind_t layer_normalization = dnnl_layer_normalization;
1799
+ const primitive_kind_t group_normalization = dnnl_group_normalization;
1800
+
1801
+ // Internal only primitive kinds.
1802
+ const primitive_kind_t internal_only_start = (primitive_kind_t)(1 << 12);
1803
+ const primitive_kind_t zero_pad = internal_only_start;
1804
+ } // namespace primitive_kind
1805
+
1806
+ using query_t = dnnl_query_t;
1807
+ namespace query {
1808
+ const query_t undef = dnnl_query_undef;
1809
+
1810
+ const query_t engine = dnnl_query_engine;
1811
+ const query_t primitive_kind = dnnl_query_primitive_kind;
1812
+
1813
+ const query_t num_of_inputs_s32 = dnnl_query_num_of_inputs_s32;
1814
+ const query_t num_of_outputs_s32 = dnnl_query_num_of_outputs_s32;
1815
+
1816
+ const query_t time_estimate_f64 = dnnl_query_time_estimate_f64;
1817
+ const query_t memory_consumption_s64 = dnnl_query_memory_consumption_s64;
1818
+
1819
+ const query_t scratchpad_engine = dnnl_query_scratchpad_engine;
1820
+
1821
+ const query_t impl_info_str = dnnl_query_impl_info_str;
1822
+
1823
+ const query_t reorder_src_engine = dnnl_query_reorder_src_engine;
1824
+ const query_t reorder_dst_engine = dnnl_query_reorder_dst_engine;
1825
+
1826
+ const query_t prop_kind = dnnl_query_prop_kind;
1827
+
1828
+ const query_t cache_blob_id_size_s64 = dnnl_query_cache_blob_id_size_s64;
1829
+ const query_t cache_blob_id = dnnl_query_cache_blob_id;
1830
+
1831
+ const query_t strides = dnnl_query_strides;
1832
+ const query_t dilations = dnnl_query_dilations;
1833
+ const query_t padding_l = dnnl_query_padding_l;
1834
+ const query_t padding_r = dnnl_query_padding_r;
1835
+ const query_t epsilon_f32 = dnnl_query_epsilon_f32;
1836
+ const query_t flags = dnnl_query_flags;
1837
+ const query_t alg_kind = dnnl_query_alg_kind;
1838
+ const query_t alpha_f32 = dnnl_query_alpha_f32;
1839
+ const query_t beta_f32 = dnnl_query_beta_f32;
1840
+ const query_t axis_s32 = dnnl_query_axis_s32;
1841
+ const query_t local_size_s64 = dnnl_query_local_size_s64;
1842
+ const query_t k_f32 = dnnl_query_k_f32;
1843
+ const query_t p_f32 = dnnl_query_p_f32;
1844
+ const query_t factors = dnnl_query_factors;
1845
+ const query_t cell_kind = dnnl_query_cell_kind;
1846
+ const query_t direction = dnnl_query_direction;
1847
+ const query_t activation_kind = dnnl_query_activation_kind;
1848
+ const query_t kernel = dnnl_query_kernel;
1849
+ const query_t group_size_s64 = dnnl_query_group_size_s64;
1850
+
1851
+ const query_t some_md = dnnl_query_some_md;
1852
+ const query_t src_md = dnnl_query_src_md;
1853
+ const query_t diff_src_md = dnnl_query_diff_src_md;
1854
+ const query_t weights_md = dnnl_query_weights_md;
1855
+ const query_t diff_weights_md = dnnl_query_diff_weights_md;
1856
+ const query_t dst_md = dnnl_query_dst_md;
1857
+ const query_t diff_dst_md = dnnl_query_diff_dst_md;
1858
+ const query_t exec_arg_md = dnnl_query_exec_arg_md;
1859
+
1860
+ const query_t workspace_md = dnnl_query_workspace_md;
1861
+ const query_t scratchpad_md = dnnl_query_scratchpad_md;
1862
+
1863
+ const query_t ndims_s32 = dnnl_query_ndims_s32;
1864
+ const query_t dims = dnnl_query_dims;
1865
+ const query_t data_type = dnnl_query_data_type;
1866
+ const query_t submemory_offset_s64 = dnnl_query_submemory_offset_s64;
1867
+ const query_t padded_dims = dnnl_query_padded_dims;
1868
+ const query_t padded_offsets = dnnl_query_padded_offsets;
1869
+ const query_t format_kind = dnnl_query_format_kind;
1870
+ const query_t inner_nblks_s32 = dnnl_query_inner_nblks_s32;
1871
+ const query_t inner_blks = dnnl_query_inner_blks;
1872
+ const query_t inner_idxs = dnnl_query_inner_idxs;
1873
+
1874
+ #ifdef DNNL_EXPERIMENTAL_SPARSE
1875
+ const query_t sparse_encoding = dnnl_query_sparse_encoding;
1876
+ const query_t nnz_s64 = dnnl_query_nnz_s64;
1877
+ const query_t num_handles_s32 = dnnl_query_num_handles_s32;
1878
+ #else
1879
+ const query_t sparse_encoding = dnnl_query_undef;
1880
+ const query_t nnz_s64 = dnnl_query_undef;
1881
+ const query_t num_handles_s32 = dnnl_query_undef;
1882
+ #endif
1883
+
1884
+ // Internal only query kinds.
1885
+ const query_t internal_only_start = (query_t)(1 << 12);
1886
+ const query_t zero_pad_d = internal_only_start;
1887
+ const query_t preferred_gpu_threads_per_eu = (query_t)(internal_only_start + 1);
1888
+ } // namespace query
1889
+
1890
+ using rnn_direction_t = dnnl_rnn_direction_t;
1891
+
1892
+ using engine_t = dnnl_engine;
1893
+ using primitive_attr_t = dnnl_primitive_attr;
1894
+ using post_ops_t = dnnl_post_ops;
1895
+ using memory_desc_t = dnnl_memory_desc;
1896
+ using memory_t = dnnl_memory;
1897
+
1898
+ using stream_flags_t = dnnl_stream_flags_t;
1899
+ namespace stream_flags {
1900
+ const stream_flags_t in_order = dnnl_stream_in_order;
1901
+ const stream_flags_t out_of_order = dnnl_stream_out_of_order;
1902
+ const stream_flags_t default_flags = dnnl_stream_default_flags;
1903
+ #ifdef DNNL_EXPERIMENTAL_PROFILING
1904
+ const stream_flags_t profiling = dnnl_stream_profiling;
1905
+ #else
1906
+ const stream_flags_t profiling = static_cast<stream_flags_t>(1 << 2);
1907
+ #endif
1908
+ } // namespace stream_flags
1909
+ using stream_t = dnnl_stream;
1910
+
1911
+ struct memory_storage_t;
1912
+
1913
+ /* forward declaration of the internal primitive_desc types */
1914
+ struct batch_normalization_bwd_pd_t;
1915
+ struct batch_normalization_fwd_pd_t;
1916
+ struct batch_normalization_pd_t;
1917
+ struct binary_pd_t;
1918
+ struct concat_pd_t;
1919
+ struct convolution_bwd_data_pd_t;
1920
+ struct convolution_bwd_weights_pd_t;
1921
+ struct convolution_fwd_pd_t;
1922
+ struct convolution_pd_t;
1923
+ struct deconvolution_bwd_data_pd_t;
1924
+ struct deconvolution_bwd_weights_pd_t;
1925
+ struct deconvolution_fwd_pd_t;
1926
+ struct deconvolution_pd_t;
1927
+ struct eltwise_bwd_pd_t;
1928
+ struct eltwise_fwd_pd_t;
1929
+ struct eltwise_pd_t;
1930
+ struct gemm_pd_t;
1931
+ struct group_normalization_bwd_pd_t;
1932
+ struct group_normalization_fwd_pd_t;
1933
+ struct group_normalization_pd_t;
1934
+ struct inner_product_bwd_data_pd_t;
1935
+ struct inner_product_bwd_weights_pd_t;
1936
+ struct inner_product_fwd_pd_t;
1937
+ struct inner_product_pd_t;
1938
+ struct layer_normalization_bwd_pd_t;
1939
+ struct layer_normalization_fwd_pd_t;
1940
+ struct layer_normalization_pd_t;
1941
+ struct lrn_bwd_pd_t;
1942
+ struct lrn_fwd_pd_t;
1943
+ struct lrn_pd_t;
1944
+ struct matmul_pd_t;
1945
+ struct pooling_bwd_pd_t;
1946
+ struct pooling_fwd_pd_t;
1947
+ struct pooling_pd_t;
1948
+ struct prelu_pd_t;
1949
+ struct reduction_pd_t;
1950
+ struct reorder_pd_t;
1951
+ struct resampling_pd_t;
1952
+ struct rnn_bwd_pd_t;
1953
+ struct rnn_fwd_pd_t;
1954
+ struct rnn_pd_t;
1955
+ struct shuffle_pd_t;
1956
+ struct softmax_bwd_pd_t;
1957
+ struct softmax_fwd_pd_t;
1958
+ struct softmax_pd_t;
1959
+ struct sum_pd_t;
1960
+
1961
+ } // namespace impl
1962
+ } // namespace dnnl
1963
+
1964
+ #endif
1965
+
1966
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_blob.hpp ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2021-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_CACHE_BLOB_HPP
18
+ #define COMMON_CACHE_BLOB_HPP
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+
24
+ #include "c_types_map.hpp"
25
+
26
+ namespace dnnl {
27
+ namespace impl {
28
+
29
+ struct cache_blob_impl_t {
30
+ cache_blob_impl_t() = delete;
31
+ cache_blob_impl_t(uint8_t *data, size_t size)
32
+ : pos_(0), data_(data), size_(size) {}
33
+
34
+ status_t add_binary(const uint8_t *binary, size_t binary_size) {
35
+ if (!binary || binary_size == 0) { return status::invalid_arguments; }
36
+ if (pos_ + sizeof(binary_size) + binary_size > size_) {
37
+ return status::invalid_arguments;
38
+ }
39
+
40
+ std::memcpy(data_ + pos_, &binary_size, sizeof(binary_size));
41
+ pos_ += sizeof(binary_size);
42
+ std::memcpy(data_ + pos_, binary, binary_size);
43
+ pos_ += binary_size;
44
+ return status::success;
45
+ }
46
+
47
+ status_t get_binary(const uint8_t **binary, size_t *binary_size) {
48
+ if (!binary || !binary_size) { return status::invalid_arguments; }
49
+ if (pos_ >= size_) { return status::invalid_arguments; }
50
+ (*binary_size) = *reinterpret_cast<size_t *>(data_ + pos_);
51
+ pos_ += sizeof(*binary_size);
52
+ (*binary) = data_ + pos_;
53
+ pos_ += *binary_size;
54
+ return status::success;
55
+ }
56
+
57
+ status_t add_value(const uint8_t *value_ptr, size_t size) {
58
+ if (!value_ptr) { return status::invalid_arguments; }
59
+ if (pos_ + size > size_) { return status::invalid_arguments; }
60
+
61
+ std::memcpy(data_ + pos_, value_ptr, size);
62
+ pos_ += size;
63
+ return status::success;
64
+ }
65
+
66
+ status_t get_value(uint8_t *value_ptr, size_t size) {
67
+ if (!value_ptr) { return status::invalid_arguments; }
68
+ if (pos_ >= size_) { return status::invalid_arguments; }
69
+ std::memcpy(value_ptr, data_ + pos_, size);
70
+ pos_ += size;
71
+ return status::success;
72
+ }
73
+
74
+ private:
75
+ size_t pos_;
76
+ uint8_t *data_;
77
+ size_t size_;
78
+ };
79
+
80
+ struct cache_blob_t {
81
+ cache_blob_t() = default;
82
+ cache_blob_t(uint8_t *data, size_t size)
83
+ : impl_(std::make_shared<cache_blob_impl_t>(data, size)) {}
84
+
85
+ status_t add_binary(const uint8_t *binary, size_t binary_size) {
86
+ if (!impl_) return status::runtime_error;
87
+ return impl_->add_binary(binary, binary_size);
88
+ }
89
+
90
+ status_t get_binary(const uint8_t **binary, size_t *binary_size) const {
91
+ if (!impl_) return status::runtime_error;
92
+ return impl_->get_binary(binary, binary_size);
93
+ }
94
+
95
+ status_t add_value(const uint8_t *value_ptr, size_t size) {
96
+ if (!impl_) return status::runtime_error;
97
+ return impl_->add_value(value_ptr, size);
98
+ }
99
+
100
+ status_t get_value(uint8_t *value_ptr, size_t size) {
101
+ if (!impl_) return status::runtime_error;
102
+ return impl_->get_value(value_ptr, size);
103
+ }
104
+
105
+ explicit operator bool() const { return bool(impl_); }
106
+
107
+ private:
108
+ std::shared_ptr<cache_blob_impl_t> impl_;
109
+ };
110
+
111
+ } // namespace impl
112
+ } // namespace dnnl
113
+
114
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_blob_id.hpp ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2021 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_CACHE_BLOB_ID_HPP
18
+ #define COMMON_CACHE_BLOB_ID_HPP
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <mutex>
23
+ #include <vector>
24
+ #include <type_traits>
25
+
26
+ #include "common/serialization_stream.hpp"
27
+
28
+ namespace dnnl {
29
+ namespace impl {
30
+
31
+ struct primitive_desc_t;
32
+ struct cache_blob_id_t {
33
+ cache_blob_id_t() : is_initialized_ {false} {}
34
+ cache_blob_id_t(const cache_blob_id_t &other)
35
+ : sstream_(other.is_initialized_ ? other.sstream_
36
+ : serialization_stream_t {})
37
+ , is_initialized_(!sstream_.empty()) {}
38
+
39
+ cache_blob_id_t(cache_blob_id_t &&other) = delete;
40
+ cache_blob_id_t &operator=(const cache_blob_id_t &other) = delete;
41
+ cache_blob_id_t &operator=(cache_blob_id_t &&other) = delete;
42
+
43
+ const std::vector<uint8_t> &get(
44
+ const engine_t *engine, const primitive_desc_t *pd);
45
+
46
+ private:
47
+ serialization_stream_t sstream_;
48
+ std::once_flag flag_;
49
+
50
+ // The `std::once_flag` is neither copyable nor movable therefore we
51
+ // define a copy constructor that skips copying the `flag_`. To be able
52
+ // to carry over the `flag_`'s state from the `other` object we introduce
53
+ // an atomic `is_initialized_` flag.
54
+ std::atomic<bool> is_initialized_;
55
+ };
56
+
57
+ } // namespace impl
58
+ } // namespace dnnl
59
+
60
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cache_utils.hpp ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_CACHE_UTILS_HPP
18
+ #define COMMON_CACHE_UTILS_HPP
19
+
20
+ #include <algorithm>
21
+ #include <future>
22
+ #include <memory>
23
+ #include <thread>
24
+ #include <unordered_map>
25
+
26
+ #include "oneapi/dnnl/dnnl_config.h"
27
+
28
+ #if DNNL_CPU_RUNTIME != DNNL_RUNTIME_NONE
29
+ #include "cpu/platform.hpp"
30
+ #else
31
+ #include <chrono>
32
+ #endif
33
+
34
+ #ifdef _WIN32
35
+ #include <windows.h>
36
+ #endif
37
+
38
+ #include "rw_mutex.hpp"
39
+
40
+ namespace dnnl {
41
+ namespace impl {
42
+ namespace utils {
43
+
44
+ // A key k and object o may share resources. This function moves the shared
45
+ // resources from a copy of object o into the key k. This is used to deduplicate
46
+ // data stored in cached objects.
47
+ template <typename K, typename O>
48
+ using key_merge_t = void (*)(const K &, const O &);
49
+
50
+ template <typename K, typename O, typename C,
51
+ key_merge_t<K, O> key_merge = nullptr>
52
+ struct cache_t {
53
+ using key_t = K;
54
+ using object_t = O;
55
+ using cache_object_t = C;
56
+ using value_t = std::shared_future<cache_object_t>;
57
+ using create_func_t = cache_object_t (&)(void *);
58
+
59
+ virtual ~cache_t() = default;
60
+
61
+ virtual status_t set_capacity(int capacity) = 0;
62
+ virtual int get_capacity() const = 0;
63
+
64
+ virtual int get_size() const = 0;
65
+
66
+ // Returns the cached value or cache_object_t() on a miss
67
+ virtual cache_object_t get(const key_t &key) = 0;
68
+
69
+ // Returns the cached object associated with key, the object generated by
70
+ // the create(create_context) function, or an empty object in case of
71
+ // errors. The function create() is only called on a cache miss. The
72
+ // returned object is added to the cache on a cache miss.
73
+ cache_object_t get_or_create(
74
+ const key_t &key, create_func_t create, void *create_context) {
75
+ std::promise<cache_object_t> p_promise;
76
+ // Try to get the shared future from the cache, if it's missing then a
77
+ // shared future with no shared state is returned and the passed shared
78
+ // future is added, otherwise a valid shared future is returned and no
79
+ // insertion is performed.
80
+ auto p_future = get_or_add(key, p_promise.get_future());
81
+
82
+ if (p_future.valid()) {
83
+ // The requested object is present in the cache or is being created
84
+ // by another thread.
85
+ return p_future.get();
86
+ } else {
87
+ // The requested object is NOT present in the cache therefore we
88
+ // have to create it and notify the waiting threads once the
89
+ // creation is done.
90
+ cache_object_t cv = create(create_context);
91
+ if (cv.status != status::success) {
92
+ // Communicate an error.
93
+ p_promise.set_value({nullptr, cv.status});
94
+ // Remove the shared future from the cache because it's
95
+ // invalidated. An invalidated shared future is the one that
96
+ // stores a nullptr.
97
+ remove_if_invalidated(key);
98
+ return {nullptr, cv.status};
99
+ } else {
100
+ // Store the created object in the shared future and notify the
101
+ // waiting threads.
102
+ p_promise.set_value(cv);
103
+
104
+ // The key_t may contains pointers that should reside within the
105
+ // stored object. Therefore the pointers in the key may need
106
+ // updated.
107
+ update_entry(key, cv.get_value());
108
+ return cv;
109
+ }
110
+ }
111
+ }
112
+
113
+ protected:
114
+ virtual value_t get_or_add(const key_t &key, const value_t &value) = 0;
115
+ virtual void remove_if_invalidated(const key_t &key) = 0;
116
+ virtual void update_entry(const key_t &key, const object_t &p) = 0;
117
+ static utils::rw_mutex_t &rw_mutex() {
118
+ static utils::rw_mutex_t mutex;
119
+ return mutex;
120
+ }
121
+ };
122
+
123
+ // The cache uses LRU replacement policy
124
+ template <typename K, typename O, typename C,
125
+ key_merge_t<K, O> key_merge = nullptr>
126
+ struct lru_cache_t final : public cache_t<K, O, C, key_merge> {
127
+ using lru_base_t = cache_t<K, O, C, key_merge>;
128
+ using key_t = typename lru_base_t::key_t;
129
+ using object_t = typename lru_base_t::object_t;
130
+ using cache_object_t = typename lru_base_t::cache_object_t;
131
+ using value_t = typename lru_base_t::value_t;
132
+ lru_cache_t(int capacity) : capacity_(capacity) {}
133
+
134
+ ~lru_cache_t() override {
135
+ if (cache_mapper().empty()) return;
136
+
137
+ #if defined(_WIN32) \
138
+ && (defined(DNNL_WITH_SYCL) || DNNL_GPU_RUNTIME == DNNL_RUNTIME_OCL)
139
+ // The ntdll.dll library is located in system32, therefore setting
140
+ // additional environment is not required.
141
+ HMODULE handle = LoadLibraryExA(
142
+ "ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
143
+ if (!handle) {
144
+ release_cache();
145
+ return;
146
+ }
147
+
148
+ // RtlDllShutdownInProgress returns TRUE if the whole process terminates
149
+ // and FALSE if DLL is being unloaded dynamically or if it’s called from
150
+ // an executable.
151
+ auto f = reinterpret_cast<BOOLEAN (*)(void)>(
152
+ GetProcAddress(handle, "RtlDllShutdownInProgress"));
153
+ if (!f) {
154
+ auto ret = FreeLibrary(handle);
155
+ assert(ret);
156
+ MAYBE_UNUSED(ret);
157
+ release_cache();
158
+ return;
159
+ }
160
+
161
+ bool is_process_termination_in_progress = f();
162
+
163
+ auto ret = FreeLibrary(handle);
164
+ assert(ret);
165
+ MAYBE_UNUSED(ret);
166
+
167
+ if (is_process_termination_in_progress) {
168
+ // The whole process is being terminated hence destroying content of
169
+ // the cache cannot be done safely. However we can check all entries
170
+ // and remove those that are not affected e.g. native CPU.
171
+ for (auto it = cache_mapper().begin();
172
+ it != cache_mapper().end();) {
173
+ if (!it->first.has_runtime_dependencies()) {
174
+ it = cache_mapper().erase(it);
175
+ } else {
176
+ ++it;
177
+ }
178
+ }
179
+ release_cache();
180
+ } else {
181
+ // Three scenarios possible:
182
+ // 1. oneDNN is being dynamically unloaded
183
+ // 2. Another dynamic library that contains statically linked
184
+ // oneDNN is dynamically unloaded
185
+ // 3. oneDNN is statically linked in an executable which is done
186
+ // and now the process terminates In all these scenarios
187
+ // content of the cache can be safely destroyed.
188
+ }
189
+ #else
190
+ // Always destroy the content of the cache for non-Windows OSes, and
191
+ // non-sycl and non-ocl runtimes because there is no a problem with
192
+ // library unloading order in such cases.
193
+ #endif
194
+ }
195
+
196
+ cache_object_t get(const key_t &key) override {
197
+ value_t e;
198
+ {
199
+ utils::lock_read_t lock_r(this->rw_mutex());
200
+ if (capacity_ == 0) { return cache_object_t(); }
201
+ e = get_future(key);
202
+ }
203
+
204
+ if (e.valid()) return e.get();
205
+ return cache_object_t();
206
+ }
207
+
208
+ int get_capacity() const override {
209
+ utils::lock_read_t lock_r(this->rw_mutex());
210
+ return capacity_;
211
+ };
212
+
213
+ status_t set_capacity(int capacity) override {
214
+ utils::lock_write_t lock_w(this->rw_mutex());
215
+ capacity_ = capacity;
216
+ // Check if number of entries exceeds the new capacity
217
+ if (get_size_no_lock() > capacity_) {
218
+ // Evict excess entries
219
+ int n_excess_entries = get_size_no_lock() - capacity_;
220
+ evict(n_excess_entries);
221
+ }
222
+ return status::success;
223
+ }
224
+ void set_capacity_without_clearing(int capacity) {
225
+ utils::lock_write_t lock_w(this->rw_mutex());
226
+ capacity_ = capacity;
227
+ }
228
+
229
+ int get_size() const override {
230
+ utils::lock_read_t lock_r(this->rw_mutex());
231
+ return get_size_no_lock();
232
+ }
233
+
234
+ protected:
235
+ int get_size_no_lock() const { return (int)cache_mapper().size(); }
236
+
237
+ value_t get_or_add(const key_t &key, const value_t &value) override {
238
+ {
239
+ // 1. Section with shared access (read lock)
240
+ utils::lock_read_t lock_r(this->rw_mutex());
241
+ // Check if the cache is enabled.
242
+ if (capacity_ == 0) { return value_t(); }
243
+ // Check if the requested entry is present in the cache (likely
244
+ // cache_hit)
245
+ auto e = get_future(key);
246
+ if (e.valid()) { return e; }
247
+ }
248
+
249
+ utils::lock_write_t lock_w(this->rw_mutex());
250
+ // 2. Section with exclusive access (write lock).
251
+ // In a multithreaded scenario, in the context of one thread the cache
252
+ // may have changed by another thread between releasing the read lock
253
+ // and acquiring the write lock (a.k.a. ABA problem), therefore
254
+ // additional checks have to be performed for correctness. Double check
255
+ // the capacity due to possible race condition
256
+ if (capacity_ == 0) { return value_t(); }
257
+
258
+ // Double check if the requested entry is present in the cache (unlikely
259
+ // cache_hit).
260
+ auto e = get_future(key);
261
+ if (!e.valid()) {
262
+ // If the entry is missing in the cache then add it (cache_miss)
263
+ add(key, value);
264
+ }
265
+ return e;
266
+ }
267
+
268
+ void remove_if_invalidated(const key_t &key) override {
269
+ utils::lock_write_t lock_w(this->rw_mutex());
270
+
271
+ if (capacity_ == 0) { return; }
272
+
273
+ auto it = cache_mapper().find(key);
274
+ // The entry has been already evicted at this point
275
+ if (it == cache_mapper().end()) { return; }
276
+
277
+ const auto &value = it->second.value_;
278
+ // If the entry is not invalidated
279
+ if (!value.get().is_empty()) { return; }
280
+
281
+ // Remove the invalidated entry
282
+ cache_mapper().erase(it);
283
+ }
284
+
285
+ private:
286
+ static size_t get_timestamp() {
287
+ #if DNNL_CPU_RUNTIME != DNNL_RUNTIME_NONE
288
+ return cpu::platform::get_timestamp();
289
+ #else
290
+ return std::chrono::steady_clock::now().time_since_epoch().count();
291
+ #endif
292
+ }
293
+
294
+ void update_entry(const key_t &key, const object_t &p) override {
295
+ // Cast to void as compilers may warn about comparing compile time
296
+ // constant function pointers with nullptr, as that is often not an
297
+ // intended behavior
298
+ if ((void *)key_merge == nullptr) return;
299
+
300
+ utils::lock_write_t lock_w(this->rw_mutex());
301
+
302
+ if (capacity_ == 0) { return; }
303
+
304
+ // There is nothing to do in two cases:
305
+ // 1. The requested entry is not in the cache because it has been evicted
306
+ // by another thread
307
+ // 2. After the requested entry had been evicted it was inserted again
308
+ // by another thread
309
+ auto it = cache_mapper().find(key);
310
+ if (it == cache_mapper().end()
311
+ || it->first.thread_id() != key.thread_id()) {
312
+ return;
313
+ }
314
+
315
+ key_merge(it->first, p);
316
+ }
317
+
318
+ void evict(int n) {
319
+ using v_t =
320
+ typename std::unordered_map<key_t, timed_entry_t>::value_type;
321
+
322
+ if (n == capacity_) {
323
+ cache_mapper().clear();
324
+ return;
325
+ }
326
+
327
+ for (int e = 0; e < n; e++) {
328
+ // Find the smallest timestamp
329
+ // TODO: revisit the eviction algorithm due to O(n) complexity, E.g.
330
+ // maybe evict multiple entries at once.
331
+ auto it = std::min_element(cache_mapper().begin(),
332
+ cache_mapper().end(),
333
+ [&](const v_t &left, const v_t &right) {
334
+ // By default, load() and operator T use sequentially
335
+ // consistent memory ordering, which enforces writing
336
+ // the timestamps into registers in the same exact order
337
+ // they are read from the CPU cache line. Since eviction
338
+ // is performed under a write lock, this order is not
339
+ // important, therefore we can safely use the weakest
340
+ // memory ordering (relaxed). This brings about a few
341
+ // microseconds performance improvement for default
342
+ // cache capacity.
343
+ return left.second.timestamp_.load(
344
+ std::memory_order_relaxed)
345
+ < right.second.timestamp_.load(
346
+ std::memory_order_relaxed);
347
+ });
348
+ auto res = cache_mapper().erase(it->first);
349
+ MAYBE_UNUSED(res);
350
+ assert(res);
351
+ }
352
+ }
353
+ void add(const key_t &key, const value_t &value) {
354
+ // std::list::size() method has linear complexity. Check the cache size
355
+ // using std::unordered_map::size();
356
+ if (get_size_no_lock() == capacity_) {
357
+ // Evict the least recently used entry
358
+ evict(1);
359
+ }
360
+
361
+ size_t timestamp = get_timestamp();
362
+
363
+ auto res = cache_mapper().emplace(std::piecewise_construct,
364
+ std::forward_as_tuple(key),
365
+ std::forward_as_tuple(value, timestamp));
366
+ MAYBE_UNUSED(res);
367
+ assert(res.second);
368
+ }
369
+ value_t get_future(const key_t &key) {
370
+ auto it = cache_mapper().find(key);
371
+ if (it == cache_mapper().end()) return value_t();
372
+
373
+ size_t timestamp = get_timestamp();
374
+ it->second.timestamp_.store(timestamp);
375
+ // Return the entry
376
+ return it->second.value_;
377
+ }
378
+
379
+ int capacity_;
380
+ struct timed_entry_t {
381
+ value_t value_;
382
+ std::atomic<size_t> timestamp_;
383
+ timed_entry_t(const value_t &value, size_t timestamp)
384
+ : value_(value), timestamp_(timestamp) {}
385
+ };
386
+
387
+ std::unordered_map<key_t, timed_entry_t> &cache_mapper() {
388
+ return cache_mapper_;
389
+ }
390
+
391
+ const std::unordered_map<key_t, timed_entry_t> &cache_mapper() const {
392
+ return cache_mapper_;
393
+ }
394
+
395
+ // Leaks cached resources. Used to avoid issues with calling destructors
396
+ // allocated by an already unloaded dynamic library.
397
+ void release_cache() {
398
+ auto t = utils::make_unique<std::unordered_map<key_t, timed_entry_t>>();
399
+ std::swap(*t, cache_mapper_);
400
+ t.release();
401
+ }
402
+ // Each entry in the cache has a corresponding key and timestamp. NOTE:
403
+ // pairs that contain atomics cannot be stored in an unordered_map *as an
404
+ // element*, since it invokes the copy constructor of std::atomic, which is
405
+ // deleted.
406
+ std::unordered_map<key_t, timed_entry_t> cache_mapper_;
407
+ };
408
+
409
+ } // namespace utils
410
+ } // namespace impl
411
+ } // namespace dnnl
412
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/compiler_workarounds.hpp ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMPILER_WORKAROUNDS_HPP
18
+ #define COMPILER_WORKAROUNDS_HPP
19
+
20
+ #if (defined __GNUC__) && (!defined(__INTEL_COMPILER)) \
21
+ && (!defined(__INTEL_LLVM_COMPILER)) && (!defined(__clang__major__))
22
+ #define NEED_GCC_WA_CHECK 1
23
+ #endif
24
+
25
+ // Workaround 01: clang.
26
+ //
27
+ // Clang has an issue [1] with `#pragma omp simd` that might lead to segfault.
28
+ // The essential conditions are:
29
+ // 1. Optimization level is O1 or O2. Surprisingly, O3 is fine.
30
+ // 2. Conditional check inside the vectorization loop.
31
+ // Since there is no reliable way to determine the first condition, we disable
32
+ // vectorization for clang altogether for now.
33
+ //
34
+ // [1] https://bugs.llvm.org/show_bug.cgi?id=48104
35
+ #if (defined __clang_major__) && (__clang_major__ >= 6)
36
+ #define CLANG_WA_01_SAFE_TO_USE_OMP_SIMD 0
37
+ #else
38
+ #define CLANG_WA_01_SAFE_TO_USE_OMP_SIMD 1
39
+ #endif
40
+
41
+ // Workaround 02: clang.
42
+ //
43
+ // Clang 6+ generates incorrect code with OMP_SIMD in some particular cases.
44
+ // Unlike CLANG_WA_01_SAFE_TO_USE_OMP_SIMD, the issue happens even with -O3.
45
+ #if (defined __clang_major__) && (__clang_major__ >= 6)
46
+ #define CLANG_WA_02_SAFE_TO_USE_OMP_SIMD 0
47
+ #else
48
+ #define CLANG_WA_02_SAFE_TO_USE_OMP_SIMD 1
49
+ #endif
50
+
51
+ // Workaround 03: GCC
52
+ //
53
+ // For very large functions with too much control flow (i.e. if, switch, goto
54
+ // statements), GCC 7 may struggle to perform optimizations based on tree
55
+ // dominator (i.e. -ftree-dominator-opts, which is enabled with O1), thereby
56
+ // producing an internal compiler error (ICE). Specifically, it seems that the
57
+ // jump threading optimization is the culprit, which cannot be disabled on its
58
+ // own. There is no reliable way to reproduce the ICE, therefore it is not clear
59
+ // which __GCC_MINOR__ version fixes issue.
60
+ #if (defined NEED_GCC_WA_CHECK) && (__GNUC__ == 7)
61
+ #define GCC_WA_NO_TREE_DOMINATOR_OPTS 1
62
+ #else
63
+ #define GCC_WA_NO_TREE_DOMINATOR_OPTS 0
64
+ #endif
65
+
66
+ // Workaround 04: GCC
67
+ //
68
+ // GCC 10 & 11 && 12 (at least versiona 10.1, 10.3 & 11.1, 12.2) report false positives
69
+ // in xbyak when -Warray-bounds build setting is on
70
+ #if (defined NEED_GCC_WA_CHECK) && (__GNUC__ >= 10)
71
+ #pragma GCC diagnostic ignored "-Warray-bounds"
72
+ #endif
73
+
74
+ // Workaround 05: GCC
75
+ //
76
+ // NOTE: inside lambda, type cast variables captured by reference using
77
+ // either c-like "(type)var" or functional "type(var)" notation in order
78
+ // to avoid gcc7 bug with c++14 standard
79
+ // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83204).
80
+ #if (defined NEED_GCC_WA_CHECK) && (__GNUC__ <= 7)
81
+ #define GCC_WA_LAMBDA_C_CAST
82
+ #endif
83
+
84
+ #endif // COMPILER_WORKAROUNDS_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/convolution_pd.hpp ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2016-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_CONVOLUTION_PD_HPP
18
+ #define COMMON_CONVOLUTION_PD_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "c_types_map.hpp"
23
+ #include "primitive_desc.hpp"
24
+ #include "utils.hpp"
25
+
26
+ namespace dnnl {
27
+ namespace impl {
28
+
29
+ status_t conv_desc_init(convolution_desc_t *conv_desc, prop_kind_t prop_kind,
30
+ alg_kind_t alg_kind, const memory_desc_t *src_desc,
31
+ const memory_desc_t *weights_desc, const memory_desc_t *bias_desc,
32
+ const memory_desc_t *dst_desc, const dims_t strides,
33
+ const dims_t dilates, const dims_t padding_l, const dims_t padding_r);
34
+
35
+ memory_desc_t *conv_prop_invariant_src_d(convolution_desc_t *desc);
36
+ memory_desc_t *conv_prop_invariant_wei_d(convolution_desc_t *desc);
37
+ memory_desc_t *conv_prop_invariant_bia_d(convolution_desc_t *desc);
38
+ memory_desc_t *conv_prop_invariant_dst_d(convolution_desc_t *desc);
39
+ const memory_desc_t *conv_prop_invariant_src_d(const convolution_desc_t *desc);
40
+ const memory_desc_t *conv_prop_invariant_wei_d(const convolution_desc_t *desc);
41
+ const memory_desc_t *conv_prop_invariant_bia_d(const convolution_desc_t *desc);
42
+ const memory_desc_t *conv_prop_invariant_dst_d(const convolution_desc_t *desc);
43
+
44
+ struct convolution_fwd_pd_t;
45
+
46
+ struct convolution_pd_t : public primitive_desc_t {
47
+ static constexpr auto base_pkind = primitive_kind::convolution;
48
+
49
+ const convolution_desc_t *desc() const { return &desc_; }
50
+ const op_desc_t *op_desc() const override {
51
+ return reinterpret_cast<const op_desc_t *>(this->desc());
52
+ }
53
+
54
+ status_t query(query_t what, int idx, void *result) const override {
55
+ switch (what) {
56
+ case query::prop_kind:
57
+ *(prop_kind_t *)result = desc()->prop_kind;
58
+ break;
59
+ case query::alg_kind:
60
+ *(alg_kind_t *)result = desc()->alg_kind;
61
+ break;
62
+ case query::strides:
63
+ *(const dims_t **)result = &desc()->strides;
64
+ break;
65
+ case query::dilations:
66
+ *(const dims_t **)result = &desc()->dilates;
67
+ break;
68
+ case query::padding_l:
69
+ *(const dims_t **)result = &desc()->padding[0];
70
+ break;
71
+ case query::padding_r:
72
+ *(const dims_t **)result = &desc()->padding[1];
73
+ break;
74
+ default: return primitive_desc_t::query(what, idx, result);
75
+ }
76
+ return status::success;
77
+ }
78
+
79
+ /* common conv aux functions */
80
+
81
+ dim_t MB() const { return invariant_src_md()->dims[0]; }
82
+
83
+ dim_t IC() const { return invariant_src_md()->dims[1]; }
84
+ dim_t OC() const { return invariant_dst_md()->dims[1]; }
85
+ dim_t G() const { return with_groups() ? invariant_wei_md()->dims[0] : 1; }
86
+
87
+ dim_t ID() const {
88
+ return ndims() >= 5 ? invariant_src_md()->dims[ndims() - 3] : 1;
89
+ }
90
+ dim_t IH() const {
91
+ return ndims() >= 4 ? invariant_src_md()->dims[ndims() - 2] : 1;
92
+ }
93
+ dim_t IW() const { return invariant_src_md()->dims[ndims() - 1]; }
94
+
95
+ dim_t OD() const {
96
+ return ndims() >= 5 ? invariant_dst_md()->dims[ndims() - 3] : 1;
97
+ }
98
+ dim_t OH() const {
99
+ return ndims() >= 4 ? invariant_dst_md()->dims[ndims() - 2] : 1;
100
+ }
101
+ dim_t OW() const { return invariant_dst_md()->dims[ndims() - 1]; }
102
+
103
+ dim_t KD() const {
104
+ return ndims() >= 5
105
+ ? invariant_wei_md()->dims[ndims() + with_groups() - 3]
106
+ : 1;
107
+ }
108
+ dim_t KH() const {
109
+ return ndims() >= 4
110
+ ? invariant_wei_md()->dims[ndims() + with_groups() - 2]
111
+ : 1;
112
+ }
113
+ dim_t KW() const {
114
+ return invariant_wei_md()->dims[ndims() + with_groups() - 1];
115
+ }
116
+
117
+ dim_t KSD() const { return ndims() >= 5 ? desc_.strides[ndims() - 5] : 1; }
118
+ dim_t KSH() const { return ndims() >= 4 ? desc_.strides[ndims() - 4] : 1; }
119
+ dim_t KSW() const { return desc_.strides[ndims() - 3]; }
120
+
121
+ dim_t KDD() const { return ndims() >= 5 ? desc_.dilates[ndims() - 5] : 0; }
122
+ dim_t KDH() const { return ndims() >= 4 ? desc_.dilates[ndims() - 4] : 1; }
123
+ dim_t KDW() const { return desc_.dilates[ndims() - 3]; }
124
+
125
+ dim_t padFront() const {
126
+ return ndims() >= 5 ? desc_.padding[0][ndims() - 5] : 0;
127
+ }
128
+ dim_t padBack() const {
129
+ return ndims() >= 5 ? desc_.padding[1][ndims() - 5] : 0;
130
+ }
131
+ dim_t padT() const {
132
+ return ndims() >= 4 ? desc_.padding[0][ndims() - 4] : 0;
133
+ }
134
+ dim_t padB() const {
135
+ return ndims() >= 4 ? desc_.padding[1][ndims() - 4] : 0;
136
+ }
137
+ dim_t padL() const { return desc_.padding[0][ndims() - 3]; }
138
+ dim_t padR() const { return desc_.padding[1][ndims() - 3]; }
139
+
140
+ int ndims() const { return invariant_src_md()->ndims; }
141
+
142
+ bool with_bias() const {
143
+ return !memory_desc_wrapper(invariant_bia_md()).is_zero();
144
+ }
145
+ bool with_groups() const {
146
+ return invariant_wei_md()->ndims == ndims() + 1;
147
+ }
148
+
149
+ bool is_fwd() const {
150
+ return utils::one_of(desc_.prop_kind, prop_kind::forward_training,
151
+ prop_kind::forward_inference);
152
+ }
153
+
154
+ bool is_bwd_d() const {
155
+ return desc_.prop_kind == prop_kind::backward_data;
156
+ }
157
+
158
+ bool is_bwd_w() const {
159
+ return desc_.prop_kind == prop_kind::backward_weights;
160
+ }
161
+
162
+ bool has_zero_dim_memory() const {
163
+ const auto s_d = memory_desc_wrapper(*invariant_src_md());
164
+ const auto d_d = memory_desc_wrapper(*invariant_dst_md());
165
+ return s_d.has_zero_dim() || d_d.has_zero_dim();
166
+ }
167
+
168
+ protected:
169
+ convolution_desc_t desc_;
170
+ const convolution_fwd_pd_t *hint_fwd_pd_;
171
+
172
+ convolution_pd_t(const convolution_desc_t *adesc,
173
+ const primitive_attr_t *attr,
174
+ const convolution_fwd_pd_t *hint_fwd_pd)
175
+ : primitive_desc_t(attr, base_pkind)
176
+ , desc_(*adesc)
177
+ , hint_fwd_pd_(hint_fwd_pd) {}
178
+
179
+ bool set_default_formats_common_template(memory_desc_t &src_md,
180
+ format_tag_t src_tag, memory_desc_t &wei_md, format_tag_t wei_tag,
181
+ memory_desc_t &dst_md, format_tag_t dst_tag,
182
+ memory_desc_t &bia_md) {
183
+ using namespace format_tag;
184
+
185
+ #define IS_OK(f) \
186
+ do { \
187
+ if ((f) != status::success) return false; \
188
+ } while (0)
189
+ if (src_md.format_kind == format_kind::any
190
+ && !utils::one_of(src_tag, any, undef))
191
+ IS_OK(memory_desc_init_by_tag(src_md, src_tag));
192
+ if (dst_md.format_kind == format_kind::any
193
+ && !utils::one_of(dst_tag, any, undef))
194
+ IS_OK(memory_desc_init_by_tag(dst_md, dst_tag));
195
+ if (wei_md.format_kind == format_kind::any
196
+ && !utils::one_of(wei_tag, any, undef))
197
+ IS_OK(memory_desc_init_by_tag(wei_md, wei_tag));
198
+ if (with_bias() && bia_md.format_kind == format_kind::any)
199
+ IS_OK(memory_desc_init_by_tag(bia_md, x));
200
+ #undef IS_OK
201
+
202
+ return true;
203
+ }
204
+
205
+ bool set_default_alg_kind(alg_kind_t alg_kind) {
206
+ assert(utils::one_of(alg_kind, alg_kind::convolution_direct,
207
+ alg_kind::convolution_winograd));
208
+ if (desc_.alg_kind == alg_kind::convolution_auto)
209
+ desc_.alg_kind = alg_kind;
210
+ return desc_.alg_kind == alg_kind;
211
+ }
212
+
213
+ bool expect_data_types(data_type_t src_dt, data_type_t wei_dt,
214
+ data_type_t bia_dt, data_type_t dst_dt, data_type_t acc_dt) const {
215
+ bool ok = true
216
+ && (src_dt == data_type::undef
217
+ || invariant_src_md()->data_type == src_dt)
218
+ && (wei_dt == data_type::undef
219
+ || invariant_wei_md()->data_type == wei_dt)
220
+ && (dst_dt == data_type::undef
221
+ || invariant_dst_md()->data_type == dst_dt)
222
+ && (acc_dt == data_type::undef
223
+ || desc_.accum_data_type == acc_dt);
224
+ if (with_bias() && bia_dt != data_type::undef)
225
+ ok = ok && invariant_bia_md()->data_type == bia_dt;
226
+ return ok;
227
+ }
228
+
229
+ bool attr_scales_ok(const std::vector<int> &supported_args
230
+ = {DNNL_ARG_SRC, DNNL_ARG_WEIGHTS, DNNL_ARG_DST}) const {
231
+ bool ok = attr()->scales_.has_default_values(supported_args);
232
+ for (int arg : supported_args) {
233
+ const auto &mask = attr()->scales_.get(arg).mask_;
234
+ if (arg == DNNL_ARG_WEIGHTS)
235
+ ok = ok && (mask == 0 || mask == (with_groups() ? 3 : 1));
236
+ else
237
+ ok = ok && (mask == 0);
238
+ }
239
+ return ok;
240
+ }
241
+ };
242
+
243
+ struct convolution_fwd_pd_t : public convolution_pd_t {
244
+ typedef convolution_fwd_pd_t base_class;
245
+ typedef convolution_fwd_pd_t hint_class;
246
+
247
+ arg_usage_t arg_usage(int arg) const override {
248
+ if (utils::one_of(arg, DNNL_ARG_SRC, DNNL_ARG_WEIGHTS))
249
+ return arg_usage_t::input;
250
+
251
+ if (arg == DNNL_ARG_BIAS && with_bias()) return arg_usage_t::input;
252
+
253
+ if (arg == DNNL_ARG_DST) return arg_usage_t::output;
254
+
255
+ return primitive_desc_t::arg_usage(arg);
256
+ }
257
+
258
+ const memory_desc_t *arg_md(
259
+ int arg, bool user_input = false) const override {
260
+ switch (arg) {
261
+ case DNNL_ARG_SRC: return src_md(0);
262
+ case DNNL_ARG_WEIGHTS: return weights_md(0);
263
+ case DNNL_ARG_BIAS: return weights_md(1);
264
+ case DNNL_ARG_DST: return dst_md(0, user_input);
265
+ default: return convolution_pd_t::arg_md(arg);
266
+ }
267
+ }
268
+
269
+ const memory_desc_t *src_md(
270
+ int index = 0, bool user_input = false) const override {
271
+ if (index == 0) return user_input ? &desc()->src_desc : &src_md_;
272
+ return &glob_zero_md;
273
+ }
274
+ const memory_desc_t *dst_md(
275
+ int index = 0, bool user_input = false) const override {
276
+ if (index == 0) return user_input ? &desc()->dst_desc : &dst_md_;
277
+ return &glob_zero_md;
278
+ }
279
+ const memory_desc_t *weights_md(
280
+ int index = 0, bool user_input = false) const override {
281
+ if (index == 0)
282
+ return user_input ? &desc()->weights_desc : &weights_md_;
283
+ if (index == 1) return user_input ? &desc()->bias_desc : &bias_md_;
284
+ return &glob_zero_md;
285
+ }
286
+
287
+ int n_inputs() const override {
288
+ return 2 + with_bias() + attr_post_op_dw_inputs() + n_binary_po_inputs()
289
+ + n_prelu_po_inputs();
290
+ }
291
+
292
+ int n_outputs() const override { return 1; }
293
+
294
+ protected:
295
+ memory_desc_t src_md_;
296
+ memory_desc_t weights_md_;
297
+ memory_desc_t bias_md_;
298
+ memory_desc_t dst_md_;
299
+
300
+ convolution_fwd_pd_t(const convolution_desc_t *adesc,
301
+ const primitive_attr_t *attr,
302
+ const convolution_fwd_pd_t *hint_fwd_pd)
303
+ : convolution_pd_t(adesc, attr, hint_fwd_pd)
304
+ , src_md_(desc_.src_desc)
305
+ , weights_md_(desc_.weights_desc)
306
+ , bias_md_(desc_.bias_desc)
307
+ , dst_md_(desc_.dst_desc) {}
308
+
309
+ bool set_default_formats_common(
310
+ format_tag_t src_tag, format_tag_t wei_tag, format_tag_t dst_tag) {
311
+ return set_default_formats_common_template(src_md_, src_tag,
312
+ weights_md_, wei_tag, dst_md_, dst_tag, bias_md_);
313
+ }
314
+
315
+ int attr_post_op_dw_inputs() const {
316
+ const auto &po = attr_.post_ops_;
317
+ int conv = po.find(primitive_kind::convolution);
318
+ if (conv == -1) return 0;
319
+ return po.entry_[conv].depthwise_conv.bias_dt == data_type::undef ? 1
320
+ : 2;
321
+ }
322
+ };
323
+
324
+ struct convolution_bwd_data_pd_t : public convolution_pd_t {
325
+ typedef convolution_bwd_data_pd_t base_class;
326
+ typedef convolution_fwd_pd_t hint_class;
327
+
328
+ arg_usage_t arg_usage(int arg) const override {
329
+ if (utils::one_of(arg, DNNL_ARG_WEIGHTS, DNNL_ARG_DIFF_DST))
330
+ return arg_usage_t::input;
331
+
332
+ if (arg == DNNL_ARG_DIFF_SRC) return arg_usage_t::output;
333
+
334
+ return primitive_desc_t::arg_usage(arg);
335
+ }
336
+
337
+ const memory_desc_t *arg_md(
338
+ int arg, bool user_input = false) const override {
339
+ switch (arg) {
340
+ case DNNL_ARG_DIFF_SRC: return diff_src_md(0);
341
+ case DNNL_ARG_WEIGHTS: return weights_md(0);
342
+ case DNNL_ARG_BIAS: return weights_md(1);
343
+ case DNNL_ARG_DIFF_DST: return diff_dst_md(0, user_input);
344
+ default: return convolution_pd_t::arg_md(arg);
345
+ }
346
+ }
347
+
348
+ const memory_desc_t *diff_src_md(
349
+ int index = 0, bool user_input = false) const override {
350
+ if (index == 0)
351
+ return user_input ? &desc()->diff_src_desc : &diff_src_md_;
352
+ return &glob_zero_md;
353
+ }
354
+ const memory_desc_t *diff_dst_md(
355
+ int index = 0, bool user_input = false) const override {
356
+ if (index == 0)
357
+ return user_input ? &desc()->diff_dst_desc : &diff_dst_md_;
358
+ return &glob_zero_md;
359
+ }
360
+ const memory_desc_t *weights_md(
361
+ int index = 0, bool user_input = false) const override {
362
+ if (index == 0)
363
+ return user_input ? &desc()->weights_desc : &weights_md_;
364
+ if (index == 1) return user_input ? &desc()->bias_desc : &bias_md_;
365
+ return &glob_zero_md;
366
+ }
367
+
368
+ int n_inputs() const override { return 2 + with_bias(); }
369
+ int n_outputs() const override { return 1; }
370
+
371
+ virtual bool support_bias() const { return false; }
372
+
373
+ protected:
374
+ memory_desc_t diff_src_md_;
375
+ memory_desc_t weights_md_;
376
+ memory_desc_t bias_md_;
377
+ memory_desc_t diff_dst_md_;
378
+
379
+ convolution_bwd_data_pd_t(const convolution_desc_t *adesc,
380
+ const primitive_attr_t *attr,
381
+ const convolution_fwd_pd_t *hint_fwd_pd)
382
+ : convolution_pd_t(adesc, attr, hint_fwd_pd)
383
+ , diff_src_md_(desc_.diff_src_desc)
384
+ , weights_md_(desc_.weights_desc)
385
+ , bias_md_(desc_.bias_desc)
386
+ , diff_dst_md_(desc_.diff_dst_desc) {}
387
+
388
+ bool set_default_formats_common(format_tag_t diff_src_tag,
389
+ format_tag_t wei_tag, format_tag_t diff_dst_tag) {
390
+ return set_default_formats_common_template(diff_src_md_, diff_src_tag,
391
+ weights_md_, wei_tag, diff_dst_md_, diff_dst_tag, bias_md_);
392
+ }
393
+ };
394
+
395
+ struct convolution_bwd_weights_pd_t : public convolution_pd_t {
396
+ typedef convolution_bwd_weights_pd_t base_class;
397
+ typedef convolution_fwd_pd_t hint_class;
398
+
399
+ convolution_bwd_weights_pd_t(const convolution_desc_t *adesc,
400
+ const primitive_attr_t *attr,
401
+ const convolution_fwd_pd_t *hint_fwd_pd)
402
+ : convolution_pd_t(adesc, attr, hint_fwd_pd)
403
+ , src_md_(desc_.src_desc)
404
+ , diff_weights_md_(desc_.diff_weights_desc)
405
+ , diff_bias_md_(desc_.diff_bias_desc)
406
+ , diff_dst_md_(desc_.diff_dst_desc) {}
407
+
408
+ arg_usage_t arg_usage(int arg) const override {
409
+ if (utils::one_of(arg, DNNL_ARG_SRC, DNNL_ARG_DIFF_DST))
410
+ return arg_usage_t::input;
411
+
412
+ if (arg == DNNL_ARG_DIFF_WEIGHTS) return arg_usage_t::output;
413
+
414
+ if (arg == DNNL_ARG_DIFF_BIAS && with_bias())
415
+ return arg_usage_t::output;
416
+
417
+ return primitive_desc_t::arg_usage(arg);
418
+ }
419
+
420
+ const memory_desc_t *arg_md(
421
+ int arg, bool user_input = false) const override {
422
+ switch (arg) {
423
+ case DNNL_ARG_SRC: return src_md(0);
424
+ case DNNL_ARG_DIFF_WEIGHTS: return diff_weights_md(0);
425
+ case DNNL_ARG_DIFF_BIAS: return diff_weights_md(1);
426
+ case DNNL_ARG_DIFF_DST: return diff_dst_md(0, user_input);
427
+ default: return convolution_pd_t::arg_md(arg);
428
+ }
429
+ }
430
+
431
+ const memory_desc_t *src_md(
432
+ int index = 0, bool user_input = false) const override {
433
+ if (index == 0) return user_input ? &desc()->src_desc : &src_md_;
434
+ return &glob_zero_md;
435
+ }
436
+ const memory_desc_t *diff_dst_md(
437
+ int index = 0, bool user_input = false) const override {
438
+ if (index == 0)
439
+ return user_input ? &desc()->diff_dst_desc : &diff_dst_md_;
440
+ return &glob_zero_md;
441
+ }
442
+ const memory_desc_t *diff_weights_md(
443
+ int index = 0, bool user_input = false) const override {
444
+ if (index == 0)
445
+ return user_input ? &desc()->diff_weights_desc : &diff_weights_md_;
446
+ if (index == 1)
447
+ return user_input ? &desc()->diff_bias_desc : &diff_bias_md_;
448
+ return &glob_zero_md;
449
+ }
450
+
451
+ int n_inputs() const override { return 2; }
452
+ int n_outputs() const override { return 1 + with_bias(); }
453
+
454
+ protected:
455
+ memory_desc_t src_md_;
456
+ memory_desc_t diff_weights_md_;
457
+ memory_desc_t diff_bias_md_;
458
+ memory_desc_t diff_dst_md_;
459
+
460
+ bool set_default_formats_common(format_tag_t src_tag,
461
+ format_tag_t diff_wei_tag, format_tag_t diff_dst_tag) {
462
+ return set_default_formats_common_template(src_md_, src_tag,
463
+ diff_weights_md_, diff_wei_tag, diff_dst_md_, diff_dst_tag,
464
+ diff_bias_md_);
465
+ }
466
+ };
467
+
468
+ } // namespace impl
469
+ } // namespace dnnl
470
+
471
+ #endif
472
+
473
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/cpp_compat.hpp ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2021 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_CPP_COMPAT_HPP
18
+ #define COMMON_CPP_COMPAT_HPP
19
+
20
+ #include <exception>
21
+ #include <type_traits>
22
+
23
+ namespace dnnl {
24
+ namespace impl {
25
+ namespace cpp_compat {
26
+
27
+ // oneDNN relies on C++11 standard. However, for DPCPP runtime the standard we
28
+ // use to build oneDNN must be C++17 per requirements. Some C++11 features have
29
+ // been deprecated in C++17, which triggers deprecations warnings. This file
30
+ // contains a compatibility layer for such C++ features.
31
+
32
+ // Older than C++17.
33
+ #if defined(__cplusplus) && __cplusplus < 201703L
34
+ inline int uncaught_exceptions() {
35
+ return (int)std::uncaught_exception();
36
+ }
37
+
38
+ template <class F, class... ArgTypes>
39
+ using invoke_result = std::result_of<F(ArgTypes...)>;
40
+ #else
41
+
42
+ inline int uncaught_exceptions() {
43
+ return std::uncaught_exceptions();
44
+ }
45
+
46
+ template <class F, class... ArgTypes>
47
+ using invoke_result = std::invoke_result<F, ArgTypes...>;
48
+
49
+ #endif
50
+ } // namespace cpp_compat
51
+ } // namespace impl
52
+ } // namespace dnnl
53
+
54
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/dnnl_thread.hpp ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2017-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_DNNL_THREAD_HPP
18
+ #define COMMON_DNNL_THREAD_HPP
19
+
20
+ #include <algorithm>
21
+ #include <functional>
22
+ #include <mutex>
23
+
24
+ #include "utils.hpp"
25
+ #include "z_magic.hpp"
26
+
27
+ // IMPORTANT NOTICE:
28
+ // This header is special in the library since it enables all threading
29
+ // functionality in the product including tests.
30
+ // tests/test_thread.{c,h}pp files rely on this header file by:
31
+ // * Substituting `threadpool_utils` namespace to re-use threadpool functions
32
+ // and enable a second threadpool different from the library;
33
+ // * Re-defining `DNNL_CPU_THREADING_RUNTIME` macro value when it is supposed
34
+ // to be `DNNL_RUNTIME_SEQ`, e.g., for CPU_NONE configuration.
35
+ // 1. It implies all parts of code relying on this macro should stay in the
36
+ // file.
37
+ // 2. It implies there are no function bodies in the translation units
38
+ // related to the library. Tests threading layer uses dnnl::impl::func
39
+ // signature, and if library has symbols defined, regardless of
40
+ // redefinition, it will take those that were compiled with original
41
+ // macro value.
42
+ //
43
+ // Potential drawback could be increased binary size but it doesn't happen much
44
+ // due to linker optimizations. The newer compiler and C++ standard, the less
45
+ // binary size will be achieved.
46
+
47
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL
48
+ #include "counting_barrier.hpp"
49
+ #endif
50
+
51
+ #if defined(DNNL_ENABLE_ITT_TASKS)
52
+ #include "common/ittnotify.hpp"
53
+ #endif
54
+
55
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_SEQ
56
+ #define DNNL_THR_SYNC 1
57
+ inline int dnnl_get_max_threads() {
58
+ return 1;
59
+ }
60
+ inline int dnnl_in_parallel() {
61
+ return 0;
62
+ }
63
+ inline void dnnl_thr_barrier() {}
64
+
65
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
66
+ #include "omp.h"
67
+ #define DNNL_THR_SYNC 1
68
+ inline int dnnl_get_max_threads() {
69
+ return omp_get_max_threads();
70
+ }
71
+ inline int dnnl_in_parallel() {
72
+ return omp_in_parallel();
73
+ }
74
+ inline void dnnl_thr_barrier() {
75
+ #pragma omp barrier
76
+ }
77
+
78
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_TBB
79
+ #include "tbb/parallel_for.h"
80
+ #include "tbb/task_arena.h"
81
+ #define DNNL_THR_SYNC 0
82
+ inline int dnnl_get_max_threads() {
83
+ return tbb::this_task_arena::max_concurrency();
84
+ }
85
+ inline int dnnl_in_parallel() {
86
+ return 0;
87
+ }
88
+ inline void dnnl_thr_barrier() {
89
+ assert(!"no barrier in TBB");
90
+ }
91
+
92
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL
93
+ #include <thread>
94
+ #include "oneapi/dnnl/dnnl_threadpool_iface.hpp"
95
+ #define DNNL_THR_SYNC 0
96
+
97
+ #include "cpu/platform.hpp"
98
+
99
+ namespace dnnl {
100
+ namespace impl {
101
+ namespace threadpool_utils {
102
+
103
+ // Each thread maintains a thread-local pointer to a threadpool which is
104
+ // 'active' for the current thread. If this pointer is a nullptr, all the work
105
+ // is executed sequentially.
106
+
107
+ // Sets `tp` to be the active threadpool for the calling thread. This will
108
+ // make all calls to `get_active_threadpool()` to return `tp` thus enabling
109
+ // `parallel()` and `parallel_nd()` to submit work to `tp`.
110
+ void activate_threadpool(dnnl::threadpool_interop::threadpool_iface *tp);
111
+
112
+ // Resets the active threadpool for the calling thread to nullptr. After this
113
+ // call `parallel()` and `parallel_nd()` would execute work sequentially.
114
+ void deactivate_threadpool();
115
+
116
+ // Returns the active threadpool for the calling thread.
117
+ dnnl::threadpool_interop::threadpool_iface *get_active_threadpool();
118
+
119
+ // returns the maximum concurrency available in the given global context
120
+ int get_max_concurrency();
121
+
122
+ int &get_threadlocal_max_concurrency();
123
+
124
+ } // namespace threadpool_utils
125
+ } // namespace impl
126
+ } // namespace dnnl
127
+
128
+ inline int dnnl_get_max_threads() {
129
+ using namespace dnnl::impl::threadpool_utils;
130
+ dnnl::threadpool_interop::threadpool_iface *tp = get_active_threadpool();
131
+
132
+ // This is the maximum number of threads oneDNN will use by default
133
+ int max_concurrency = dnnl::impl::threadpool_utils::get_max_concurrency();
134
+
135
+ // Use the default max_concurrency only when no tp is passed by
136
+ // user (e.g. primitive creation).
137
+ return tp ? std::max(1, tp->get_num_threads()) : max_concurrency;
138
+ }
139
+ inline int dnnl_in_parallel() {
140
+ using namespace dnnl::impl::threadpool_utils;
141
+ dnnl::threadpool_interop::threadpool_iface *tp = get_active_threadpool();
142
+ return tp ? tp->get_in_parallel() : 0;
143
+ }
144
+ inline void dnnl_thr_barrier() {
145
+ assert(!"no barrier with THREADPOOL");
146
+ }
147
+ #endif
148
+
149
+ /* The purpose of this function is to provide the number of threads the library
150
+ * is aware of when this function is invoked. Since oneDNN does not allow nested
151
+ * parallelism, inside a parallel region the number of available threads is 1.
152
+ * Otherwise, the number of current threads varies between threading runtimes:
153
+ * - for OpenMP and TBB, return the max number of threads since the number of
154
+ * threads is held in a global object throughout the entire execution.
155
+ * - for Threadpool, since the global object in oneDNN changes throughout
156
+ * execution, two situations can occur:
157
+ * a) if the library *is* aware of a threadpool when this function is invoked,
158
+ * return the number of available threads in the threadpool;
159
+ * b) if the library *is not* aware of a threadpool when this function is
160
+ * invoked, return 1 since the main thread will do the work.
161
+ */
162
+ inline int dnnl_get_current_num_threads() {
163
+ if (dnnl_in_parallel()) return 1;
164
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
165
+ return omp_get_max_threads();
166
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_TBB
167
+ return tbb::this_task_arena::max_concurrency();
168
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL
169
+ using namespace dnnl::impl::threadpool_utils;
170
+ dnnl::threadpool_interop::threadpool_iface *tp = get_active_threadpool();
171
+ return (tp) ? dnnl_get_max_threads() : 1;
172
+ #else
173
+ return 1;
174
+ #endif
175
+ }
176
+
177
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
178
+ #define PRAGMA_OMP(...) PRAGMA_MACRO(CHAIN2(omp, __VA_ARGS__))
179
+ #define OMP_GET_THREAD_NUM() omp_get_thread_num()
180
+ #define OMP_GET_NUM_THREADS() omp_get_num_threads()
181
+ #else
182
+ #define PRAGMA_OMP(...)
183
+ #define OMP_GET_THREAD_NUM() 0
184
+ #define OMP_GET_NUM_THREADS() 1
185
+ #endif
186
+
187
+ // MSVC still supports omp 2.0 only
188
+ #if defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
189
+ #define collapse(x)
190
+ #define PRAGMA_OMP_SIMD(...)
191
+ #else
192
+ #define PRAGMA_OMP_SIMD(...) PRAGMA_MACRO(CHAIN2(omp, simd __VA_ARGS__))
193
+ #endif // defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
194
+
195
+ // process simdlen; it is supported for Clang >= 3.9; ICC >= 17.0; GCC >= 6.1
196
+ // No support on Windows.
197
+ #if (defined(__clang_major__) \
198
+ && (__clang_major__ < 3 \
199
+ || (__clang_major__ == 3 && __clang_minor__ < 9))) \
200
+ || (defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1700) \
201
+ || (!defined(__INTEL_COMPILER) && !defined(__clang__) \
202
+ && (defined(_MSC_VER) || __GNUC__ < 6 \
203
+ || (__GNUC__ == 6 && __GNUC_MINOR__ < 1)))
204
+ #define simdlen(x)
205
+ #endif // long simdlen if
206
+
207
+ namespace dnnl {
208
+ namespace impl {
209
+
210
+ inline bool dnnl_thr_syncable() {
211
+ return DNNL_THR_SYNC == 1;
212
+ }
213
+
214
+ template <typename T, typename U>
215
+ inline void balance211(T n, U team, U tid, T &n_start, T &n_end) {
216
+ T n_min = 1;
217
+ T &n_my = n_end;
218
+ if (team <= 1 || n == 0) {
219
+ n_start = 0;
220
+ n_my = n;
221
+ } else if (n_min == 1) {
222
+ // team = T1 + T2
223
+ // n = T1*n1 + T2*n2 (n1 - n2 = 1)
224
+ T n1 = utils::div_up(n, (T)team);
225
+ T n2 = n1 - 1;
226
+ T T1 = n - n2 * (T)team;
227
+ n_my = (T)tid < T1 ? n1 : n2;
228
+ n_start = (T)tid <= T1 ? (T)tid * n1 : T1 * n1 + ((T)tid - T1) * n2;
229
+ }
230
+
231
+ n_end += n_start;
232
+ }
233
+
234
+ template <typename T, typename U>
235
+ void balance2D(U nthr, U ithr, T ny, T &ny_start, T &ny_end, T nx, T &nx_start,
236
+ T &nx_end, T nx_divider) {
237
+ const T grp_count = nstl::min(nx_divider, static_cast<T>(nthr));
238
+ const int grp_size_big = nthr / static_cast<int>(grp_count) + 1;
239
+ const int grp_size_small = nthr / static_cast<int>(grp_count);
240
+ const int n_grp_big = nthr % static_cast<int>(grp_count);
241
+ const int threads_in_big_groups = n_grp_big * grp_size_big;
242
+
243
+ const int ithr_bound_distance = ithr - threads_in_big_groups;
244
+ T grp, grp_ithr, grp_nthr;
245
+ if (ithr_bound_distance < 0) { // ithr in first groups
246
+ grp = ithr / grp_size_big;
247
+ grp_ithr = ithr % grp_size_big;
248
+ grp_nthr = grp_size_big;
249
+ } else { // ithr in last groups
250
+ grp = n_grp_big + ithr_bound_distance / grp_size_small;
251
+ grp_ithr = ithr_bound_distance % grp_size_small;
252
+ grp_nthr = grp_size_small;
253
+ }
254
+
255
+ balance211(nx, grp_count, grp, nx_start, nx_end);
256
+ balance211(ny, grp_nthr, grp_ithr, ny_start, ny_end);
257
+ }
258
+
259
+ /* Functions:
260
+ * - parallel(nthr, f) - executes f in parallel using at
261
+ * most nthr threads. If nthr equals
262
+ * 0 dnnl_get_current_num_threads() threads
263
+ * is used
264
+ * - for_nd(ithr, nthr, dims..., f) - multidimensional for loop for
265
+ * already created threads
266
+ * - for_nd_ext(ithr, nthr, dims..., f) - multidimensional for loop for
267
+ * already created threads that passes
268
+ * ithr and nthr
269
+ * - parallel_nd(dims..., f) - creates a parallel section and then
270
+ * calls for_nd
271
+ * - parallel_nd_ext(dims..., f) - creates a parallel section and then
272
+ * calls for_nd_ext
273
+ * - parallel_nd_in_omp(dims..., f) - queries current nthr and ithr and
274
+ * then calls for_nd (mostly for
275
+ * convenience)
276
+ */
277
+
278
+ /* general parallelization */
279
+ inline int adjust_num_threads(int nthr, dim_t work_amount) {
280
+ if (nthr == 0) nthr = dnnl_get_current_num_threads();
281
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
282
+ return (work_amount == 1 || omp_in_parallel()) ? 1 : nthr;
283
+ #else
284
+ return (int)std::min((dim_t)nthr, work_amount);
285
+ #endif
286
+ }
287
+
288
+ static inline void parallel(int nthr, const std::function<void(int, int)> &f) {
289
+ nthr = adjust_num_threads(nthr, INT64_MAX);
290
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_SEQ
291
+ for (int i = 0; i < nthr; ++i) {
292
+ f(i, nthr);
293
+ }
294
+ #else
295
+ #if defined(DNNL_ENABLE_ITT_TASKS)
296
+ auto task_primitive_kind = itt::primitive_task_get_current_kind();
297
+ bool itt_enable = itt::get_itt(itt::__itt_task_level_high);
298
+ #endif
299
+ if (nthr == 1) {
300
+ f(0, 1);
301
+ return;
302
+ }
303
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
304
+ #pragma omp parallel num_threads(nthr)
305
+ {
306
+ int nthr_ = omp_get_num_threads();
307
+ int ithr_ = omp_get_thread_num();
308
+ assert(nthr_ == nthr);
309
+ #if defined(DNNL_ENABLE_ITT_TASKS)
310
+ if (ithr_ && itt_enable) itt::primitive_task_start(task_primitive_kind);
311
+ #endif
312
+ f(ithr_, nthr_);
313
+ #if defined(DNNL_ENABLE_ITT_TASKS)
314
+ if (ithr_ && itt_enable) itt::primitive_task_end();
315
+ #endif
316
+ }
317
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_TBB
318
+ tbb::parallel_for(
319
+ 0, nthr,
320
+ [&](int ithr) {
321
+ #if defined(DNNL_ENABLE_ITT_TASKS)
322
+ bool mark_task = itt::primitive_task_get_current_kind()
323
+ == primitive_kind::undefined;
324
+ if (mark_task && itt_enable)
325
+ itt::primitive_task_start(task_primitive_kind);
326
+ #endif
327
+ f(ithr, nthr);
328
+ #if defined(DNNL_ENABLE_ITT_TASKS)
329
+ if (mark_task && itt_enable) itt::primitive_task_end();
330
+ #endif
331
+ },
332
+ tbb::static_partitioner());
333
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL
334
+ using namespace dnnl::impl::threadpool_utils;
335
+ dnnl::threadpool_interop::threadpool_iface *tp = get_active_threadpool();
336
+ if (!tp || dnnl_in_parallel()) {
337
+ threadpool_utils::deactivate_threadpool();
338
+ for (int ithr = 0; ithr < nthr; ithr++) {
339
+ f(ithr, nthr);
340
+ }
341
+ threadpool_utils::activate_threadpool(tp);
342
+ } else {
343
+ bool async = tp->get_flags()
344
+ & dnnl::threadpool_interop::threadpool_iface::ASYNCHRONOUS;
345
+ counting_barrier_t b;
346
+ if (async) b.init(nthr);
347
+ tp->parallel_for(nthr, [&, tp](int ithr, int nthr) {
348
+ bool is_master = threadpool_utils::get_active_threadpool() == tp;
349
+ if (!is_master) {
350
+ threadpool_utils::activate_threadpool(tp);
351
+ #if defined(DNNL_ENABLE_ITT_TASKS)
352
+ if (itt_enable) itt::primitive_task_start(task_primitive_kind);
353
+ #endif
354
+ }
355
+ f(ithr, nthr);
356
+ if (!is_master) {
357
+ #if defined(DNNL_ENABLE_ITT_TASKS)
358
+ if (itt_enable) itt::primitive_task_end();
359
+ #endif
360
+ threadpool_utils::deactivate_threadpool();
361
+ }
362
+ if (async) b.notify();
363
+ });
364
+ if (async) b.wait();
365
+ }
366
+ #endif
367
+ #endif
368
+ }
369
+
370
+ // XXX: IMPORTANT!!!
371
+ // Keep the functions below static.
372
+ //
373
+ // The threading file is included in gtests and benchdnn. When
374
+ // the functions are not static it can cause a crash in gtests and
375
+ // benchdnn on macOS with Intel 2021 compiler.
376
+
377
+ /* for_nd section */
378
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0,
379
+ const std::function<void(dim_t)> &f) {
380
+ dim_t start {0}, end {0};
381
+ balance211(D0, nthr, ithr, start, end);
382
+ for (dim_t d0 = start; d0 < end; ++d0)
383
+ f(d0);
384
+ }
385
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0, dim_t D1,
386
+ const std::function<void(dim_t, dim_t)> &f) {
387
+ const dim_t work_amount = D0 * D1;
388
+ if (work_amount == 0) return;
389
+ dim_t start {0}, end {0};
390
+ balance211(work_amount, nthr, ithr, start, end);
391
+
392
+ dim_t d0 {0}, d1 {0};
393
+ utils::nd_iterator_init(start, d0, D0, d1, D1);
394
+ for (dim_t iwork = start; iwork < end; ++iwork) {
395
+ f(d0, d1);
396
+ utils::nd_iterator_step(d0, D0, d1, D1);
397
+ }
398
+ }
399
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0, dim_t D1,
400
+ dim_t D2, const std::function<void(dim_t, dim_t, dim_t)> &f) {
401
+ const dim_t work_amount = D0 * D1 * D2;
402
+ if (work_amount == 0) return;
403
+ dim_t start {0}, end {0};
404
+ balance211(work_amount, nthr, ithr, start, end);
405
+
406
+ dim_t d0 {0}, d1 {0}, d2 {0};
407
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2);
408
+ for (dim_t iwork = start; iwork < end; ++iwork) {
409
+ f(d0, d1, d2);
410
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2);
411
+ }
412
+ }
413
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0, dim_t D1,
414
+ dim_t D2, dim_t D3,
415
+ const std::function<void(dim_t, dim_t, dim_t, dim_t)> &f) {
416
+ const dim_t work_amount = D0 * D1 * D2 * D3;
417
+ if (work_amount == 0) return;
418
+ dim_t start {0}, end {0};
419
+ balance211(work_amount, nthr, ithr, start, end);
420
+
421
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0};
422
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3);
423
+ for (dim_t iwork = start; iwork < end; ++iwork) {
424
+ f(d0, d1, d2, d3);
425
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3);
426
+ }
427
+ }
428
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0, dim_t D1,
429
+ dim_t D2, dim_t D3, dim_t D4,
430
+ const std::function<void(dim_t, dim_t, dim_t, dim_t, dim_t)> &f) {
431
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4;
432
+ if (work_amount == 0) return;
433
+ dim_t start {0}, end {0};
434
+ balance211(work_amount, nthr, ithr, start, end);
435
+
436
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0}, d4 {0};
437
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
438
+ for (dim_t iwork = start; iwork < end; ++iwork) {
439
+ f(d0, d1, d2, d3, d4);
440
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
441
+ }
442
+ }
443
+ static inline void for_nd(const int ithr, const int nthr, dim_t D0, dim_t D1,
444
+ dim_t D2, dim_t D3, dim_t D4, dim_t D5,
445
+ const std::function<void(dim_t, dim_t, dim_t, dim_t, dim_t, dim_t)>
446
+ &f) {
447
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4 * D5;
448
+ if (work_amount == 0) return;
449
+ dim_t start {0}, end {0};
450
+ balance211(work_amount, nthr, ithr, start, end);
451
+
452
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0}, d4 {0}, d5 {0};
453
+ utils::nd_iterator_init(
454
+ start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
455
+ for (dim_t iwork = start; iwork < end; ++iwork) {
456
+ f(d0, d1, d2, d3, d4, d5);
457
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
458
+ }
459
+ }
460
+
461
+ /* for_nd_ext section */
462
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
463
+ const std::function<void(int, int, dim_t)> &f) {
464
+ dim_t start {0}, end {0};
465
+ balance211(D0, nthr, ithr, start, end);
466
+ for (dim_t d0 = start; d0 < end; ++d0)
467
+ f(ithr, nthr, d0);
468
+ }
469
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
470
+ dim_t D1, const std::function<void(int, int, dim_t, dim_t)> &f) {
471
+ const dim_t work_amount = D0 * D1;
472
+ if (work_amount == 0) return;
473
+ dim_t start {0}, end {0};
474
+ balance211(work_amount, nthr, ithr, start, end);
475
+
476
+ dim_t d0 {0}, d1 {0};
477
+ utils::nd_iterator_init(start, d0, D0, d1, D1);
478
+ for (dim_t iwork = start; iwork < end; ++iwork) {
479
+ f(ithr, nthr, d0, d1);
480
+ utils::nd_iterator_step(d0, D0, d1, D1);
481
+ }
482
+ }
483
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
484
+ dim_t D1, dim_t D2,
485
+ const std::function<void(int, int, dim_t, dim_t, dim_t)> &f) {
486
+ const dim_t work_amount = D0 * D1 * D2;
487
+ if (work_amount == 0) return;
488
+ dim_t start {0}, end {0};
489
+ balance211(work_amount, nthr, ithr, start, end);
490
+
491
+ dim_t d0 {0}, d1 {0}, d2 {0};
492
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2);
493
+ for (dim_t iwork = start; iwork < end; ++iwork) {
494
+ f(ithr, nthr, d0, d1, d2);
495
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2);
496
+ }
497
+ }
498
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
499
+ dim_t D1, dim_t D2, dim_t D3,
500
+ const std::function<void(int, int, dim_t, dim_t, dim_t, dim_t)> &f) {
501
+ const dim_t work_amount = D0 * D1 * D2 * D3;
502
+ if (work_amount == 0) return;
503
+ dim_t start {0}, end {0};
504
+ balance211(work_amount, nthr, ithr, start, end);
505
+
506
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0};
507
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3);
508
+ for (dim_t iwork = start; iwork < end; ++iwork) {
509
+ f(ithr, nthr, d0, d1, d2, d3);
510
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3);
511
+ }
512
+ }
513
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
514
+ dim_t D1, dim_t D2, dim_t D3, dim_t D4,
515
+ const std::function<void(int, int, dim_t, dim_t, dim_t, dim_t, dim_t)>
516
+ &f) {
517
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4;
518
+ if (work_amount == 0) return;
519
+ dim_t start {0}, end {0};
520
+ balance211(work_amount, nthr, ithr, start, end);
521
+
522
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0}, d4 {0};
523
+ utils::nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
524
+ for (dim_t iwork = start; iwork < end; ++iwork) {
525
+ f(ithr, nthr, d0, d1, d2, d3, d4);
526
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
527
+ }
528
+ }
529
+ static inline void for_nd_ext(const int ithr, const int nthr, dim_t D0,
530
+ dim_t D1, dim_t D2, dim_t D3, dim_t D4, dim_t D5,
531
+ const std::function<void(
532
+ int, int, dim_t, dim_t, dim_t, dim_t, dim_t, dim_t)> &f) {
533
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4 * D5;
534
+ if (work_amount == 0) return;
535
+ dim_t start {0}, end {0};
536
+ balance211(work_amount, nthr, ithr, start, end);
537
+
538
+ dim_t d0 {0}, d1 {0}, d2 {0}, d3 {0}, d4 {0}, d5 {0};
539
+ utils::nd_iterator_init(
540
+ start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
541
+ for (dim_t iwork = start; iwork < end; ++iwork) {
542
+ f(ithr, nthr, d0, d1, d2, d3, d4, d5);
543
+ utils::nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
544
+ }
545
+ }
546
+
547
+ /* parallel_nd_ext section */
548
+ static inline void parallel_nd_ext(
549
+ int nthr, dim_t D0, const std::function<void(int, int, dim_t)> &f) {
550
+ const dim_t work_amount = D0;
551
+ nthr = adjust_num_threads(nthr, work_amount);
552
+ if (nthr)
553
+ parallel(nthr,
554
+ [&](int ithr, int nthr) { for_nd_ext(ithr, nthr, D0, f); });
555
+ }
556
+ static inline void parallel_nd_ext(int nthr, dim_t D0, dim_t D1,
557
+ const std::function<void(int, int, dim_t, dim_t)> &f) {
558
+ const dim_t work_amount = D0 * D1;
559
+ nthr = adjust_num_threads(nthr, work_amount);
560
+ if (nthr)
561
+ parallel(nthr,
562
+ [&](int ithr, int nthr) { for_nd_ext(ithr, nthr, D0, D1, f); });
563
+ }
564
+ static inline void parallel_nd_ext(int nthr, dim_t D0, dim_t D1, dim_t D2,
565
+ const std::function<void(int, int, dim_t, dim_t, dim_t)> &f) {
566
+ const dim_t work_amount = D0 * D1 * D2;
567
+ nthr = adjust_num_threads(nthr, work_amount);
568
+ if (nthr)
569
+ parallel(nthr, [&](int ithr, int nthr) {
570
+ for_nd_ext(ithr, nthr, D0, D1, D2, f);
571
+ });
572
+ }
573
+ static inline void parallel_nd_ext(int nthr, dim_t D0, dim_t D1, dim_t D2,
574
+ dim_t D3,
575
+ const std::function<void(int, int, dim_t, dim_t, dim_t, dim_t)> &f) {
576
+ const dim_t work_amount = D0 * D1 * D2 * D3;
577
+ nthr = adjust_num_threads(nthr, work_amount);
578
+ if (nthr)
579
+ parallel(nthr, [&](int ithr, int nthr) {
580
+ for_nd_ext(ithr, nthr, D0, D1, D2, D3, f);
581
+ });
582
+ }
583
+ static inline void parallel_nd_ext(int nthr, dim_t D0, dim_t D1, dim_t D2,
584
+ dim_t D3, dim_t D4,
585
+ const std::function<void(int, int, dim_t, dim_t, dim_t, dim_t, dim_t)>
586
+ &f) {
587
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4;
588
+ nthr = adjust_num_threads(nthr, work_amount);
589
+ if (nthr)
590
+ parallel(nthr, [&](int ithr, int nthr) {
591
+ for_nd_ext(ithr, nthr, D0, D1, D2, D3, D4, f);
592
+ });
593
+ }
594
+ static inline void parallel_nd_ext(int nthr, dim_t D0, dim_t D1, dim_t D2,
595
+ dim_t D3, dim_t D4, dim_t D5,
596
+ const std::function<void(
597
+ int, int, dim_t, dim_t, dim_t, dim_t, dim_t, dim_t)> &f) {
598
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4 * D5;
599
+ nthr = adjust_num_threads(nthr, work_amount);
600
+ if (nthr)
601
+ parallel(nthr, [&](int ithr, int nthr) {
602
+ for_nd_ext(ithr, nthr, D0, D1, D2, D3, D4, D5, f);
603
+ });
604
+ }
605
+
606
+ /* parallel_nd section */
607
+ static inline void parallel_nd(dim_t D0, const std::function<void(dim_t)> &f) {
608
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), D0);
609
+ if (nthr)
610
+ parallel(nthr, [&](int ithr, int nthr) { for_nd(ithr, nthr, D0, f); });
611
+ }
612
+ static inline void parallel_nd(
613
+ dim_t D0, dim_t D1, const std::function<void(dim_t, dim_t)> &f) {
614
+ const dim_t work_amount = D0 * D1;
615
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), work_amount);
616
+ if (nthr)
617
+ parallel(nthr,
618
+ [&](int ithr, int nthr) { for_nd(ithr, nthr, D0, D1, f); });
619
+ }
620
+ static inline void parallel_nd(dim_t D0, dim_t D1, dim_t D2,
621
+ const std::function<void(dim_t, dim_t, dim_t)> &f) {
622
+ const dim_t work_amount = D0 * D1 * D2;
623
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), work_amount);
624
+ if (nthr)
625
+ parallel(nthr,
626
+ [&](int ithr, int nthr) { for_nd(ithr, nthr, D0, D1, D2, f); });
627
+ }
628
+ static inline void parallel_nd(dim_t D0, dim_t D1, dim_t D2, dim_t D3,
629
+ const std::function<void(dim_t, dim_t, dim_t, dim_t)> &f) {
630
+ const dim_t work_amount = D0 * D1 * D2 * D3;
631
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), work_amount);
632
+ if (nthr)
633
+ parallel(nthr, [&](int ithr, int nthr) {
634
+ for_nd(ithr, nthr, D0, D1, D2, D3, f);
635
+ });
636
+ }
637
+ static inline void parallel_nd(dim_t D0, dim_t D1, dim_t D2, dim_t D3, dim_t D4,
638
+ const std::function<void(dim_t, dim_t, dim_t, dim_t, dim_t)> &f) {
639
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4;
640
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), work_amount);
641
+ if (nthr)
642
+ parallel(nthr, [&](int ithr, int nthr) {
643
+ for_nd(ithr, nthr, D0, D1, D2, D3, D4, f);
644
+ });
645
+ }
646
+ static inline void parallel_nd(dim_t D0, dim_t D1, dim_t D2, dim_t D3, dim_t D4,
647
+ dim_t D5,
648
+ const std::function<void(dim_t, dim_t, dim_t, dim_t, dim_t, dim_t)>
649
+ &f) {
650
+ const dim_t work_amount = D0 * D1 * D2 * D3 * D4 * D5;
651
+ int nthr = adjust_num_threads(dnnl_get_current_num_threads(), work_amount);
652
+ if (nthr)
653
+ parallel(nthr, [&](int ithr, int nthr) {
654
+ for_nd(ithr, nthr, D0, D1, D2, D3, D4, D5, f);
655
+ });
656
+ }
657
+
658
+ /* parallel_nd_in_omp section */
659
+
660
+ template <typename... Args>
661
+ void parallel_nd_in_omp(Args &&...args) {
662
+ #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_SEQ
663
+ for_nd(0, 1, utils::forward<Args>(args)...);
664
+ #elif DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
665
+ for_nd(omp_get_thread_num(), omp_get_num_threads(),
666
+ utils::forward<Args>(args)...);
667
+ #elif (DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_TBB \
668
+ || DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL)
669
+ assert(!"parallel_nd_in_omp() is not supported by this DNNL_CPU_RUNTIME");
670
+ #endif
671
+ }
672
+
673
+ } // namespace impl
674
+ } // namespace dnnl
675
+
676
+ #endif
677
+
678
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/dnnl_traits.hpp ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2016-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_DNNL_TRAITS_HPP
18
+ #define COMMON_DNNL_TRAITS_HPP
19
+
20
+ #include <assert.h>
21
+ #include <stdint.h>
22
+
23
+ #include "oneapi/dnnl/dnnl.h"
24
+
25
+ #include "bfloat16.hpp"
26
+ #include "c_types_map.hpp"
27
+ #include "float16.hpp"
28
+ #include "nstl.hpp"
29
+ #include "opdesc.hpp"
30
+ #include "utils.hpp"
31
+ #include "z_magic.hpp"
32
+
33
+ namespace dnnl {
34
+ namespace impl {
35
+
36
+ template <data_type_t>
37
+ struct prec_traits {}; /* ::type -> float */
38
+ template <typename>
39
+ struct data_traits {}; /* ::data_type -> f32 */
40
+ template <int>
41
+ struct typesize_traits {}; /* ::data_type_size -> f32 */
42
+ template <primitive_kind_t>
43
+ struct pkind_traits {}; /* ::desc_type, ::query_d */
44
+
45
+ template <>
46
+ struct prec_traits<data_type::f16> {
47
+ typedef float16_t type;
48
+ };
49
+ template <>
50
+ struct prec_traits<data_type::bf16> {
51
+ typedef bfloat16_t type;
52
+ };
53
+ template <>
54
+ struct prec_traits<data_type::f32> {
55
+ typedef float type;
56
+ };
57
+ template <>
58
+ struct prec_traits<data_type::f64> {
59
+ typedef double type;
60
+ };
61
+ template <>
62
+ struct prec_traits<data_type::s32> {
63
+ typedef int32_t type;
64
+ };
65
+ template <>
66
+ struct prec_traits<data_type::s8> {
67
+ typedef int8_t type;
68
+ };
69
+ template <>
70
+ struct prec_traits<data_type::u8> {
71
+ typedef uint8_t type;
72
+ };
73
+ template <>
74
+ struct prec_traits<data_type::boolean> {
75
+ typedef bool type;
76
+ };
77
+
78
+ template <>
79
+ struct data_traits<float16_t> {
80
+ static constexpr data_type_t data_type = data_type::f16;
81
+ };
82
+ template <>
83
+ struct data_traits<bfloat16_t> {
84
+ static constexpr data_type_t data_type = data_type::bf16;
85
+ };
86
+ template <>
87
+ struct data_traits<float> {
88
+ static constexpr data_type_t data_type = data_type::f32;
89
+ };
90
+ template <>
91
+ struct data_traits<int32_t> {
92
+ static constexpr data_type_t data_type = data_type::s32;
93
+ };
94
+ template <>
95
+ struct data_traits<int8_t> {
96
+ static constexpr data_type_t data_type = data_type::s8;
97
+ };
98
+ template <>
99
+ struct data_traits<uint8_t> {
100
+ static constexpr data_type_t data_type = data_type::u8;
101
+ };
102
+ template <>
103
+ struct data_traits<bool> {
104
+ static constexpr data_type_t data_type = data_type::boolean;
105
+ };
106
+
107
+ template <>
108
+ struct typesize_traits<4> {
109
+ typedef float type;
110
+ };
111
+ template <>
112
+ struct typesize_traits<2> {
113
+ typedef int16_t type;
114
+ };
115
+ template <>
116
+ struct typesize_traits<1> {
117
+ typedef uint8_t type;
118
+ };
119
+
120
+ #define PKIND_TRAITS_INST(op) \
121
+ template <> \
122
+ struct pkind_traits<primitive_kind::op> { \
123
+ typedef CONCAT2(op, _desc_t) desc_type; \
124
+ }
125
+ PKIND_TRAITS_INST(convolution);
126
+ PKIND_TRAITS_INST(deconvolution);
127
+ PKIND_TRAITS_INST(shuffle);
128
+ PKIND_TRAITS_INST(eltwise);
129
+ PKIND_TRAITS_INST(softmax);
130
+ PKIND_TRAITS_INST(pooling);
131
+ PKIND_TRAITS_INST(prelu);
132
+ PKIND_TRAITS_INST(lrn);
133
+ PKIND_TRAITS_INST(batch_normalization);
134
+ PKIND_TRAITS_INST(group_normalization);
135
+ PKIND_TRAITS_INST(layer_normalization);
136
+ PKIND_TRAITS_INST(inner_product);
137
+ PKIND_TRAITS_INST(rnn);
138
+ PKIND_TRAITS_INST(gemm);
139
+ PKIND_TRAITS_INST(zero_pad);
140
+ PKIND_TRAITS_INST(binary);
141
+ PKIND_TRAITS_INST(matmul);
142
+ PKIND_TRAITS_INST(resampling);
143
+ PKIND_TRAITS_INST(reduction);
144
+ #undef PKIND_TRAITS_INST
145
+
146
+ } // namespace impl
147
+ } // namespace dnnl
148
+
149
+ #endif
150
+
151
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/eltwise_pd.hpp ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2016-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_ELTWISE_PD_HPP
18
+ #define COMMON_ELTWISE_PD_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "c_types_map.hpp"
23
+ #include "primitive_desc.hpp"
24
+
25
+ namespace dnnl {
26
+ namespace impl {
27
+
28
+ struct eltwise_fwd_pd_t;
29
+
30
+ status_t eltwise_desc_init(eltwise_desc_t *eltwise_desc, prop_kind_t prop_kind,
31
+ alg_kind_t alg_kind, const memory_desc_t *src_desc,
32
+ const memory_desc_t *dst_desc, const memory_desc_t *diff_src_desc,
33
+ const memory_desc_t *diff_dst_desc, float alpha, float beta);
34
+
35
+ struct eltwise_pd_t : public primitive_desc_t {
36
+ static constexpr auto base_pkind = primitive_kind::eltwise;
37
+
38
+ const eltwise_desc_t *desc() const { return &desc_; }
39
+ const op_desc_t *op_desc() const override {
40
+ return reinterpret_cast<const op_desc_t *>(this->desc());
41
+ }
42
+
43
+ status_t query(query_t what, int idx, void *result) const override {
44
+ switch (what) {
45
+ case query::prop_kind:
46
+ *(prop_kind_t *)result = desc()->prop_kind;
47
+ break;
48
+ case query::alg_kind:
49
+ *(alg_kind_t *)result = desc()->alg_kind;
50
+ break;
51
+ case query::alpha_f32: *(float *)result = desc()->alpha; break;
52
+ case query::beta_f32: *(float *)result = desc()->beta; break;
53
+ default: return primitive_desc_t::query(what, idx, result);
54
+ }
55
+ return status::success;
56
+ }
57
+
58
+ /* common eltwise aux functions */
59
+
60
+ dim_t MB() const { return data_md()->dims[0]; }
61
+ dim_t C() const { return ndims() >= 2 ? data_md()->dims[1] : 1; }
62
+ dim_t D() const { return ndims() >= 5 ? data_md()->dims[ndims() - 3] : 1; }
63
+ dim_t H() const { return ndims() >= 4 ? data_md()->dims[ndims() - 2] : 1; }
64
+ dim_t W() const { return ndims() >= 3 ? data_md()->dims[ndims() - 1] : 1; }
65
+
66
+ int ndims() const { return data_md()->ndims; }
67
+
68
+ bool is_fwd() const {
69
+ return utils::one_of(desc_.prop_kind, prop_kind::forward_training,
70
+ prop_kind::forward_inference);
71
+ }
72
+
73
+ bool has_zero_dim_memory() const {
74
+ return memory_desc_wrapper(data_md()).has_zero_dim();
75
+ }
76
+
77
+ bool use_dst() const {
78
+ using namespace alg_kind;
79
+ return !is_fwd()
80
+ && utils::one_of(desc_.alg_kind, eltwise_relu_use_dst_for_bwd,
81
+ eltwise_tanh_use_dst_for_bwd,
82
+ eltwise_elu_use_dst_for_bwd,
83
+ eltwise_sqrt_use_dst_for_bwd,
84
+ eltwise_logistic_use_dst_for_bwd,
85
+ eltwise_exp_use_dst_for_bwd,
86
+ eltwise_clip_v2_use_dst_for_bwd);
87
+ }
88
+
89
+ protected:
90
+ eltwise_desc_t desc_;
91
+ const eltwise_fwd_pd_t *hint_fwd_pd_;
92
+
93
+ memory_desc_t src_md_;
94
+ memory_desc_t dst_md_;
95
+
96
+ eltwise_pd_t(const eltwise_desc_t *adesc, const primitive_attr_t *attr,
97
+ const eltwise_fwd_pd_t *hint_fwd_pd)
98
+ : primitive_desc_t(attr, base_pkind)
99
+ , desc_(*adesc)
100
+ , hint_fwd_pd_(hint_fwd_pd)
101
+ , src_md_(desc_.src_desc)
102
+ , dst_md_(desc_.dst_desc) {}
103
+
104
+ private:
105
+ const memory_desc_t *data_md(int index = 0) const {
106
+ return use_dst() ? dst_md(index) : src_md(index);
107
+ }
108
+ };
109
+
110
+ struct eltwise_fwd_pd_t : public eltwise_pd_t {
111
+ typedef eltwise_fwd_pd_t base_class;
112
+ typedef eltwise_fwd_pd_t hint_class;
113
+
114
+ arg_usage_t arg_usage(int arg) const override {
115
+ if (arg == DNNL_ARG_SRC) return arg_usage_t::input;
116
+
117
+ if (arg == DNNL_ARG_DST) return arg_usage_t::output;
118
+
119
+ return primitive_desc_t::arg_usage(arg);
120
+ }
121
+
122
+ const memory_desc_t *arg_md(
123
+ int arg, bool user_input = false) const override {
124
+ switch (arg) {
125
+ case DNNL_ARG_SRC: return src_md(0);
126
+ case DNNL_ARG_DST: return dst_md(0, user_input);
127
+ default: return eltwise_pd_t::arg_md(arg);
128
+ }
129
+ }
130
+
131
+ const memory_desc_t *src_md(
132
+ int index = 0, bool user_input = false) const override {
133
+ if (index == 0) return user_input ? &desc()->src_desc : &src_md_;
134
+ return &glob_zero_md;
135
+ }
136
+ const memory_desc_t *dst_md(
137
+ int index = 0, bool user_input = false) const override {
138
+ if (index == 0) return user_input ? &desc()->dst_desc : &dst_md_;
139
+ return &glob_zero_md;
140
+ }
141
+
142
+ int n_inputs() const override { return 1 + n_binary_po_inputs(); }
143
+ int n_outputs() const override { return 1; }
144
+
145
+ static bool eltwise_preserves_zero(
146
+ alg_kind_t alg, float alpha, float beta) {
147
+ using namespace alg_kind;
148
+ using namespace utils;
149
+ return one_of(alg, eltwise_relu, eltwise_tanh, eltwise_elu,
150
+ eltwise_square, eltwise_abs, eltwise_sqrt, eltwise_swish,
151
+ eltwise_gelu_tanh, eltwise_gelu_erf, eltwise_round,
152
+ eltwise_hardswish)
153
+ || one_of(alg, eltwise_relu_use_dst_for_bwd,
154
+ eltwise_tanh_use_dst_for_bwd,
155
+ eltwise_elu_use_dst_for_bwd,
156
+ eltwise_sqrt_use_dst_for_bwd)
157
+ || (one_of(alg, eltwise_clip, eltwise_clip_v2) && alpha <= 0
158
+ && beta >= 0)
159
+ || (alg == eltwise_linear && beta == 0)
160
+ || (alg == eltwise_pow && beta > 0);
161
+ }
162
+
163
+ static bool eltwise_preserves_zero(
164
+ const post_ops_t::entry_t::eltwise_t &eltwise) {
165
+ return eltwise_preserves_zero(eltwise.alg, eltwise.alpha, eltwise.beta);
166
+ }
167
+
168
+ bool is_zero_preserved() const {
169
+ return eltwise_preserves_zero(desc_.alg_kind, desc_.alpha, desc_.beta);
170
+ }
171
+
172
+ protected:
173
+ eltwise_fwd_pd_t(const eltwise_desc_t *adesc, const primitive_attr_t *attr,
174
+ const eltwise_fwd_pd_t *hint_fwd_pd)
175
+ : eltwise_pd_t(adesc, attr, hint_fwd_pd) {}
176
+
177
+ bool set_default_formats_common() {
178
+ return IMPLICATION(dst_md_.format_kind == format_kind::any,
179
+ memory_desc_init_by_md_and_dt(
180
+ dst_md_, src_md_, dst_md_.data_type)
181
+ == status::success);
182
+ }
183
+ };
184
+
185
+ struct eltwise_bwd_pd_t : public eltwise_pd_t {
186
+ typedef eltwise_bwd_pd_t base_class;
187
+ typedef eltwise_fwd_pd_t hint_class;
188
+
189
+ arg_usage_t arg_usage(int arg) const override {
190
+ if (use_dst() ? arg == DNNL_ARG_DST : arg == DNNL_ARG_SRC)
191
+ return arg_usage_t::input;
192
+
193
+ if (arg == DNNL_ARG_DIFF_DST) return arg_usage_t::input;
194
+ if (arg == DNNL_ARG_DIFF_SRC) return arg_usage_t::output;
195
+
196
+ return primitive_desc_t::arg_usage(arg);
197
+ }
198
+
199
+ const memory_desc_t *arg_md(
200
+ int arg, bool user_input = false) const override {
201
+ switch (arg) {
202
+ case DNNL_ARG_SRC: return src_md(0);
203
+ case DNNL_ARG_DST: return dst_md(0, user_input);
204
+ case DNNL_ARG_DIFF_SRC: return diff_src_md(0);
205
+ case DNNL_ARG_DIFF_DST: return diff_dst_md(0, user_input);
206
+ default: return eltwise_pd_t::arg_md(arg);
207
+ }
208
+ }
209
+
210
+ // To avoid additional logic in implementations
211
+ const memory_desc_t *data_md(int index = 0) const {
212
+ return use_dst() ? dst_md(index) : src_md(index);
213
+ }
214
+ const memory_desc_t *src_md(
215
+ int index = 0, bool user_input = false) const override {
216
+ if (index == 0 && !use_dst())
217
+ return user_input ? &desc()->src_desc : &src_md_;
218
+ return &glob_zero_md;
219
+ }
220
+ const memory_desc_t *dst_md(
221
+ int index = 0, bool user_input = false) const override {
222
+ if (index == 0 && use_dst())
223
+ return user_input ? &desc()->dst_desc : &dst_md_;
224
+ return &glob_zero_md;
225
+ }
226
+ const memory_desc_t *diff_dst_md(
227
+ int index = 0, bool user_input = false) const override {
228
+ if (index == 0)
229
+ return user_input ? &desc()->diff_dst_desc : &diff_dst_md_;
230
+ return &glob_zero_md;
231
+ }
232
+ const memory_desc_t *diff_src_md(
233
+ int index = 0, bool user_input = false) const override {
234
+ if (index == 0)
235
+ return user_input ? &desc()->diff_src_desc : &diff_src_md_;
236
+ return &glob_zero_md;
237
+ }
238
+
239
+ int n_inputs() const override { return 2; }
240
+ int n_outputs() const override { return 1; }
241
+
242
+ static bool eltwise_preserves_zero(
243
+ alg_kind_t alg, float alpha, float beta) {
244
+ // Unlike forward counterpart, bwd works on two tensors (with same formats)
245
+ // and if alg moves zero to non-zero, it's fine, because diff_dst will
246
+ // still have zeros in padding and multiplication of zero and non-zero
247
+ // gives desired result. However, it doesn't work in case of special fp
248
+ // values which are NaN or infinity which give NaN when multiplying on
249
+ // zero, so excluding all those algs from here.
250
+ using namespace alg_kind;
251
+ using namespace utils;
252
+ return one_of(alg, eltwise_abs, eltwise_clip, eltwise_clip_v2,
253
+ eltwise_elu, eltwise_exp, eltwise_gelu_erf,
254
+ eltwise_gelu_tanh, eltwise_hardsigmoid, eltwise_linear,
255
+ eltwise_logistic, eltwise_mish, eltwise_relu,
256
+ eltwise_soft_relu, eltwise_square, eltwise_swish,
257
+ eltwise_tanh)
258
+ || one_of(alg, eltwise_elu_use_dst_for_bwd,
259
+ eltwise_exp_use_dst_for_bwd,
260
+ eltwise_logistic_use_dst_for_bwd,
261
+ eltwise_relu_use_dst_for_bwd,
262
+ eltwise_tanh_use_dst_for_bwd,
263
+ eltwise_clip_v2_use_dst_for_bwd)
264
+ || (alg == eltwise_pow && beta >= 1);
265
+ }
266
+
267
+ bool is_zero_preserved() const {
268
+ return eltwise_preserves_zero(desc_.alg_kind, desc_.alpha, desc_.beta);
269
+ }
270
+
271
+ protected:
272
+ memory_desc_t diff_src_md_;
273
+ memory_desc_t diff_dst_md_;
274
+
275
+ eltwise_bwd_pd_t(const eltwise_desc_t *adesc, const primitive_attr_t *attr,
276
+ const eltwise_fwd_pd_t *hint_fwd_pd)
277
+ : eltwise_pd_t(adesc, attr, hint_fwd_pd)
278
+ , diff_src_md_(desc_.diff_src_desc)
279
+ , diff_dst_md_(desc_.diff_dst_desc) {}
280
+
281
+ bool set_default_formats_common() {
282
+ return IMPLICATION(diff_dst_md_.format_kind == format_kind::any,
283
+ memory_desc_init_by_md_and_dt(
284
+ diff_dst_md_, *data_md(), diff_dst_md_.data_type)
285
+ == status::success)
286
+ && IMPLICATION(diff_src_md_.format_kind == format_kind::any,
287
+ memory_desc_init_by_md_and_dt(diff_src_md_, *data_md(),
288
+ diff_src_md_.data_type)
289
+ == status::success);
290
+ }
291
+ };
292
+
293
+ } // namespace impl
294
+ } // namespace dnnl
295
+
296
+ #endif
297
+
298
+ // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/experimental.hpp ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2022 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+ #ifndef EXPERIMENTAL_HPP
17
+ #define EXPERIMENTAL_HPP
18
+
19
+ namespace dnnl {
20
+ namespace impl {
21
+ namespace experimental {
22
+
23
+ bool use_bnorm_stats_one_pass();
24
+
25
+ } // namespace experimental
26
+ } // namespace impl
27
+ } // namespace dnnl
28
+
29
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/gemm_pd.hpp ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_GEMM_PD_HPP
18
+ #define COMMON_GEMM_PD_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #include "common/c_types_map.hpp"
23
+ #include "common/gemm_utils.hpp"
24
+ #include "common/primitive_desc.hpp"
25
+ #include "common/utils.hpp"
26
+
27
+ namespace dnnl {
28
+ namespace impl {
29
+
30
+ struct gemm_pd_t : public primitive_desc_t {
31
+ static constexpr auto base_pkind = primitive_kind::gemm;
32
+
33
+ typedef gemm_pd_t base_class;
34
+ typedef gemm_pd_t hint_class;
35
+
36
+ const gemm_desc_t *desc() const { return &desc_; }
37
+ const op_desc_t *op_desc() const override {
38
+ return reinterpret_cast<const op_desc_t *>(this->desc());
39
+ }
40
+
41
+ arg_usage_t arg_usage(int arg) const override {
42
+ if (utils::one_of(arg, DNNL_ARG_SRC_0, DNNL_ARG_SRC_1))
43
+ return arg_usage_t::input;
44
+
45
+ if (arg == DNNL_ARG_DST) return arg_usage_t::output;
46
+
47
+ return primitive_desc_t::arg_usage(arg);
48
+ }
49
+
50
+ const memory_desc_t *arg_md(
51
+ int arg, bool user_input = false) const override {
52
+ switch (arg) {
53
+ case DNNL_ARG_SRC_0: return src_md(0);
54
+ case DNNL_ARG_SRC_1: return src_md(1);
55
+ case DNNL_ARG_BIAS: return src_md(2);
56
+ case DNNL_ARG_DST: return dst_md(0, user_input);
57
+ default: return primitive_desc_t::arg_md(arg);
58
+ }
59
+ }
60
+
61
+ const memory_desc_t *src_md(
62
+ int index = 0, bool user_input = false) const override {
63
+ switch (index) {
64
+ case 0: return &desc_.a_desc;
65
+ case 1: return &desc_.b_desc;
66
+ case 2: return &desc_.bias_desc;
67
+ default: return &glob_zero_md;
68
+ }
69
+ }
70
+ const memory_desc_t *dst_md(
71
+ int index = 0, bool user_input = false) const override {
72
+ return index == 0 ? &desc_.c_desc : &glob_zero_md;
73
+ }
74
+ bool with_bias() const { return desc_.bias_desc.ndims != 0; }
75
+
76
+ int n_inputs() const override { return 2; }
77
+ int n_outputs() const override { return 1; }
78
+
79
+ protected:
80
+ // Note: we do not copy memory desc locally to avoid
81
+ // overheads. This means we lose the users memory descs when we
82
+ // resolve the 'any' tags.
83
+ gemm_desc_t desc_;
84
+
85
+ gemm_pd_t(const gemm_desc_t *adesc, const primitive_attr_t *attr,
86
+ const hint_class *hint_fwd_pd)
87
+ : primitive_desc_t(attr, base_pkind), desc_(*adesc) {}
88
+
89
+ // By default, we just resolve 'any' with blocked layout and trivial strides
90
+ bool set_default_format(memory_desc_t *md) {
91
+ memory_desc_wrapper mdw(md);
92
+ if (mdw.format_any()) {
93
+ if (mdw.has_runtime_dims_or_strides()) return false;
94
+ status_t status = memory_desc_init_by_strides(*md, nullptr);
95
+ if (status != status::success) return false;
96
+ }
97
+
98
+ return true;
99
+ }
100
+
101
+ bool set_default_formats() {
102
+ bool ok = true;
103
+
104
+ for (auto md : {&desc_.a_desc, &desc_.b_desc, &desc_.bias_desc,
105
+ &desc_.c_desc}) {
106
+ ok = ok && set_default_format(md);
107
+ }
108
+
109
+ auto status = attr_.post_ops_.set_default_formats(&desc_.c_desc);
110
+ ok = ok && (status == status::success);
111
+
112
+ return ok;
113
+ }
114
+ };
115
+
116
+ } // namespace impl
117
+ } // namespace dnnl
118
+
119
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/gemm_types.hpp ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2019-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_GEMM_TYPES_HPP
18
+ #define COMMON_GEMM_TYPES_HPP
19
+
20
+ #include <assert.h>
21
+ #include "common/c_types_map.hpp"
22
+ #include "common/memory_desc.hpp"
23
+
24
+ namespace dnnl {
25
+ namespace impl {
26
+
27
+ enum transpose_t { dnnl_notrans, dnnl_trans };
28
+
29
+ namespace transpose {
30
+ const transpose_t notrans = dnnl_notrans;
31
+ const transpose_t trans = dnnl_trans;
32
+ } // namespace transpose
33
+
34
+ enum offsetc_t { dnnl_fixed, dnnl_column, dnnl_row };
35
+
36
+ namespace offsetc {
37
+ const offsetc_t fixed = dnnl_fixed;
38
+ const offsetc_t column = dnnl_column;
39
+ const offsetc_t row = dnnl_row;
40
+ } // namespace offsetc
41
+
42
+ enum sum_ab_t { dnnl_sum_a_row, dnnl_sum_b_col, dnnl_sum_none };
43
+ namespace sum_ab {
44
+ const sum_ab_t sum_a_row = dnnl_sum_a_row;
45
+ const sum_ab_t sum_b_col = dnnl_sum_b_col;
46
+ const sum_ab_t sum_none = dnnl_sum_none;
47
+ } // namespace sum_ab
48
+
49
+ // A descriptor for a matrix multiplication (gemm) operation. To make the
50
+ // interface consistent, the descriptor represent the GEMM operation in row
51
+ // major.
52
+ struct gemm_desc_t {
53
+ // The kind of primitive. Used for self identifying the primitive
54
+ // descriptor. Must be #dnnl_gemm.
55
+ dnnl_primitive_kind_t primitive_kind;
56
+ memory_desc_t a_desc;
57
+ memory_desc_t b_desc;
58
+ memory_desc_t c_desc;
59
+ memory_desc_t bias_desc;
60
+ // Type for accumulating A*B.
61
+ dnnl_data_type_t acc_type;
62
+ // Sum across k dimension in either A or B tensor
63
+ // and output to sum_ab tensor.
64
+ sum_ab_t sum_ab;
65
+ dnnl_data_type_t sum_ab_type;
66
+
67
+ // These accessors are to be used by the GEMM implementation. Because the
68
+ // GEMM implementation currently assumes column major. These accessors
69
+ // return data in column major fashion.
70
+
71
+ inline bool is_batched() const { return c_desc.ndims >= 3; }
72
+
73
+ // Simplified accessors that comply to GEMM API
74
+ transpose_t get_trans(const memory_desc_t &md) const {
75
+ return md.format_desc.blocking.strides[md.ndims - 1] != 1
76
+ ? transpose::trans
77
+ : transpose::notrans;
78
+ }
79
+ transpose_t transa() const { return get_trans(b_desc); };
80
+ transpose_t transb() const { return get_trans(a_desc); };
81
+ transpose_t trans_bias() const { return get_trans(bias_desc); }
82
+
83
+ dnnl_dim_t batch() const {
84
+ // if ndims < 3, it should return 1
85
+ int64_t batch = 1;
86
+ for (int i = 0; i < c_desc.ndims - 2; ++i) {
87
+ if (c_desc.dims[i] == DNNL_RUNTIME_DIM_VAL)
88
+ return DNNL_RUNTIME_DIM_VAL;
89
+ batch *= c_desc.dims[i];
90
+ }
91
+ return batch;
92
+ }
93
+
94
+ // Number of rows of C.
95
+ dnnl_dim_t m() const { return c_desc.dims[c_desc.ndims - 1]; }
96
+ // Number of columns of C.
97
+ dnnl_dim_t n() const { return c_desc.dims[c_desc.ndims - 2]; }
98
+ // Size of inner dimension shared between A and B.
99
+ dnnl_dim_t k() const { return a_desc.dims[a_desc.ndims - 1]; }
100
+
101
+ static dnnl_dim_t get_stride(const memory_desc_t &md, int dim = 0) {
102
+ return (dim >= md.ndims - 2 || md.dims[dim] == 1)
103
+ ? 0
104
+ : md.format_desc.blocking.strides[dim];
105
+ }
106
+
107
+ /** Stride between 2 matrices A in a batch. */
108
+ dnnl_dim_t stride_a(int dim = 0) const { return get_stride(b_desc, dim); };
109
+ /** Stride between 2 matrices B in a batch. */
110
+ dnnl_dim_t stride_b(int dim = 0) const { return get_stride(a_desc, dim); };
111
+ /** Stride between 2 matrices C in a batch. */
112
+ dnnl_dim_t stride_c(int dim = 0) const { return get_stride(c_desc, dim); };
113
+
114
+ // This assumes that one of the dimensions has strides 1
115
+ static dnnl_dim_t get_ld(const memory_desc_t &md) {
116
+ auto strides = md.format_desc.blocking.strides;
117
+ assert(strides[md.ndims - 1] == 1 || strides[md.ndims - 2] == 1);
118
+ return strides[md.ndims - 1] != 1 ? strides[md.ndims - 1]
119
+ : strides[md.ndims - 2];
120
+ }
121
+ // Leading dimension of A.
122
+ dnnl_dim_t lda() const { return get_ld(b_desc); }
123
+ // Leading dimension of B.
124
+ dnnl_dim_t ldb() const { return get_ld(a_desc); }
125
+ // Leading dimension of C.
126
+ dnnl_dim_t ldc() const { return get_ld(c_desc); }
127
+ /** Leading dimension of bias. */
128
+ dnnl_dim_t ld_bias() const { return get_ld(bias_desc); }
129
+
130
+ // Type of matrix A.
131
+ dnnl_data_type_t a_type() const { return b_desc.data_type; }
132
+ // Type of matrix B.
133
+ dnnl_data_type_t b_type() const { return a_desc.data_type; }
134
+ // Type of matrix C.
135
+ dnnl_data_type_t c_type() const { return c_desc.data_type; }
136
+ // Type of bias.
137
+ dnnl_data_type_t bias_type() const { return bias_desc.data_type; }
138
+ // Type of bias.
139
+ int bias_mask() const {
140
+ assert(bias_desc.ndims <= 3);
141
+ int mask = 0;
142
+ // TODO: update the mask for batched dimension if we start
143
+ // supporting more batch dimensions
144
+ if (is_batched()) mask |= (bias_desc.dims[0] > 1) ? 1 << 0 : 0;
145
+
146
+ // because the bias mask is in row major, we have to convert
147
+ // to col major here by swapping two last dimensions
148
+ int m_idx = is_batched();
149
+ mask |= (bias_desc.dims[m_idx] > 1) ? 1 << (bias_desc.ndims - m_idx)
150
+ : 0;
151
+ mask |= (bias_desc.dims[m_idx + 1] > 1)
152
+ ? 1 << (bias_desc.ndims - (m_idx + 1))
153
+ : 0;
154
+ return mask;
155
+ }
156
+ };
157
+
158
+ } // namespace impl
159
+ } // namespace dnnl
160
+
161
+ #endif // COMMON_GEMM_TYPES_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/impl_registration.hpp ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2021-2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_IMPL_REGISTRATION_HPP
18
+ #define COMMON_IMPL_REGISTRATION_HPP
19
+
20
+ #include "oneapi/dnnl/dnnl_config.h"
21
+
22
+ // Workload section
23
+
24
+ // Note: REG_BWD_D_PK is a dedicated macro for deconv to enable bwd_d conv.
25
+ #if BUILD_TRAINING
26
+ #define REG_BWD_PK(...) __VA_ARGS__
27
+ #define REG_BWD_D_PK(...) __VA_ARGS__
28
+ #else
29
+ #define REG_BWD_PK(...) \
30
+ { nullptr }
31
+ #define REG_BWD_D_PK(...) \
32
+ { nullptr }
33
+ #endif
34
+
35
+ // Primitives section
36
+
37
+ // Note:
38
+ // `_P` is a mandatory suffix for macros. This is to avoid a conflict with
39
+ // `REG_BINARY`, Windows-defined macro.
40
+
41
+ #if BUILD_PRIMITIVE_ALL || BUILD_BATCH_NORMALIZATION
42
+ #define REG_BNORM_P(...) __VA_ARGS__
43
+ #else
44
+ #define REG_BNORM_P(...) \
45
+ {}
46
+ #endif
47
+
48
+ #if BUILD_PRIMITIVE_ALL || BUILD_BINARY
49
+ #define REG_BINARY_P(...) __VA_ARGS__
50
+ #else
51
+ #define REG_BINARY_P(...) \
52
+ { nullptr }
53
+ #endif
54
+
55
+ #if BUILD_PRIMITIVE_ALL || BUILD_CONCAT
56
+ #define REG_CONCAT_P(...) __VA_ARGS__
57
+ #else
58
+ #define REG_CONCAT_P(...) \
59
+ { nullptr }
60
+ #endif
61
+
62
+ #if BUILD_PRIMITIVE_ALL || BUILD_CONVOLUTION
63
+ #define REG_CONV_P(...) __VA_ARGS__
64
+ #else
65
+ #define REG_CONV_P(...) \
66
+ {}
67
+ #endif
68
+
69
+ #if BUILD_PRIMITIVE_ALL || BUILD_DECONVOLUTION
70
+ #define REG_DECONV_P(...) __VA_ARGS__
71
+ // This case is special, it requires handling of convolution_bwd_d internally
72
+ // since major optimizations are based on convolution implementations.
73
+ #ifndef REG_CONV_P
74
+ #error "REG_CONV_P is not defined. Check that convolution is defined prior deconvolution."
75
+ #else
76
+ #undef REG_CONV_P
77
+ #define REG_CONV_P(...) __VA_ARGS__
78
+ #endif
79
+
80
+ #ifndef REG_BWD_D_PK
81
+ #error "REG_BWD_D_PK is not defined. Dedicated macro was not enabled."
82
+ #else
83
+ #undef REG_BWD_D_PK
84
+ #define REG_BWD_D_PK(...) __VA_ARGS__
85
+ #endif
86
+
87
+ #else // BUILD_PRIMITIVE_ALL || BUILD_DECONVOLUTION
88
+ #define REG_DECONV_P(...) \
89
+ {}
90
+ #endif
91
+
92
+ #if BUILD_PRIMITIVE_ALL || BUILD_ELTWISE
93
+ #define REG_ELTWISE_P(...) __VA_ARGS__
94
+ #else
95
+ #define REG_ELTWISE_P(...) \
96
+ {}
97
+ #endif
98
+
99
+ #if BUILD_PRIMITIVE_ALL || BUILD_GROUP_NORMALIZATION
100
+ #define REG_GNORM_P(...) __VA_ARGS__
101
+ #else
102
+ #define REG_GNORM_P(...) \
103
+ {}
104
+ #endif
105
+
106
+ #if BUILD_PRIMITIVE_ALL || BUILD_INNER_PRODUCT
107
+ #define REG_IP_P(...) __VA_ARGS__
108
+ #else
109
+ #define REG_IP_P(...) \
110
+ {}
111
+ #endif
112
+
113
+ #if BUILD_PRIMITIVE_ALL || BUILD_LAYER_NORMALIZATION
114
+ #define REG_LNORM_P(...) __VA_ARGS__
115
+ #else
116
+ #define REG_LNORM_P(...) \
117
+ {}
118
+ #endif
119
+
120
+ #if BUILD_PRIMITIVE_ALL || BUILD_LRN
121
+ #define REG_LRN_P(...) __VA_ARGS__
122
+ #else
123
+ #define REG_LRN_P(...) \
124
+ {}
125
+ #endif
126
+
127
+ #if BUILD_PRIMITIVE_ALL || BUILD_MATMUL
128
+ #define REG_MATMUL_P(...) __VA_ARGS__
129
+ #else
130
+ #define REG_MATMUL_P(...) \
131
+ { nullptr }
132
+ #endif
133
+
134
+ #if BUILD_PRIMITIVE_ALL || BUILD_POOLING
135
+ #define REG_POOLING_P(...) __VA_ARGS__
136
+ #else
137
+ #define REG_POOLING_P(...) \
138
+ {}
139
+ #endif
140
+
141
+ #if BUILD_PRIMITIVE_ALL || BUILD_PRELU
142
+ #define REG_PRELU_P(...) __VA_ARGS__
143
+ #else
144
+ #define REG_PRELU_P(...) \
145
+ {}
146
+ #endif
147
+
148
+ #if BUILD_PRIMITIVE_ALL || BUILD_REDUCTION
149
+ #define REG_REDUCTION_P(...) __VA_ARGS__
150
+ #else
151
+ #define REG_REDUCTION_P(...) \
152
+ { nullptr }
153
+ #endif
154
+
155
+ #if BUILD_PRIMITIVE_ALL || BUILD_REORDER
156
+ #define REG_REORDER_P(...) __VA_ARGS__
157
+ #else
158
+ #define REG_REORDER_P(...) \
159
+ {}
160
+ #endif
161
+
162
+ #if BUILD_PRIMITIVE_ALL || BUILD_RESAMPLING
163
+ #define REG_RESAMPLING_P(...) __VA_ARGS__
164
+ #else
165
+ #define REG_RESAMPLING_P(...) \
166
+ {}
167
+ #endif
168
+
169
+ #if BUILD_PRIMITIVE_ALL || BUILD_RNN
170
+ #define REG_RNN_P(...) __VA_ARGS__
171
+ #else
172
+ #define REG_RNN_P(...) \
173
+ {}
174
+ #endif
175
+
176
+ #if BUILD_PRIMITIVE_ALL || BUILD_SHUFFLE
177
+ #define REG_SHUFFLE_P(...) __VA_ARGS__
178
+ #else
179
+ #define REG_SHUFFLE_P(...) \
180
+ { nullptr }
181
+ #endif
182
+
183
+ #if BUILD_PRIMITIVE_ALL || BUILD_SOFTMAX
184
+ #define REG_SOFTMAX_P(...) __VA_ARGS__
185
+ #else
186
+ #define REG_SOFTMAX_P(...) \
187
+ {}
188
+ #endif
189
+
190
+ #if BUILD_PRIMITIVE_ALL || BUILD_SUM
191
+ #define REG_SUM_P(...) __VA_ARGS__
192
+ #else
193
+ #define REG_SUM_P(...) \
194
+ { nullptr }
195
+ #endif
196
+
197
+ // Primitive CPU ISA section is in src/cpu/platform.hpp
198
+
199
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_GEN9
200
+ #define REG_GEN9_ISA(...) __VA_ARGS__
201
+ #else
202
+ #define REG_GEN9_ISA(...)
203
+ #endif
204
+
205
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_GEN11
206
+ #define REG_GEN11_ISA(...) __VA_ARGS__
207
+ #else
208
+ #define REG_GEN11_ISA(...)
209
+ #endif
210
+
211
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_XELP
212
+ #define REG_XELP_ISA(...) __VA_ARGS__
213
+ #else
214
+ #define REG_XELP_ISA(...)
215
+ #endif
216
+
217
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_XEHP
218
+ #define REG_XEHP_ISA(...) __VA_ARGS__
219
+ #else
220
+ #define REG_XEHP_ISA(...)
221
+ #endif
222
+
223
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_XEHPG
224
+ #define REG_XEHPG_ISA(...) __VA_ARGS__
225
+ #else
226
+ #define REG_XEHPG_ISA(...)
227
+ #endif
228
+
229
+ #if BUILD_PRIMITIVE_GPU_ISA_ALL || BUILD_XEHPC
230
+ #define REG_XEHPC_ISA(...) __VA_ARGS__
231
+ #else
232
+ #define REG_XEHPC_ISA(...)
233
+ #endif
234
+
235
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/internal_defs.hpp ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_INTERNAL_DEFS_HPP
18
+ #define COMMON_INTERNAL_DEFS_HPP
19
+
20
+ #if defined(DNNL_DLL)
21
+ #define DNNL_WEAK DNNL_HELPER_DLL_EXPORT
22
+ #else
23
+ #if defined(__GNUC__) || defined(__clang__)
24
+ #define DNNL_WEAK __attribute__((weak))
25
+ #else
26
+ #define DNNL_WEAK
27
+ #endif
28
+ #endif
29
+
30
+ #if defined(DNNL_DLL)
31
+ #define DNNL_STRONG DNNL_HELPER_DLL_EXPORT
32
+ #else
33
+ #define DNNL_STRONG
34
+ #endif
35
+
36
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/disable_warnings.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+
7
+ #include "ittnotify_config.h"
8
+
9
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
10
+
11
+ #if defined _MSC_VER
12
+
13
+ #pragma warning (disable: 593) /* parameter "XXXX" was set but never used */
14
+ #pragma warning (disable: 344) /* typedef name has already been declared (with same type) */
15
+ #pragma warning (disable: 174) /* expression has no effect */
16
+ #pragma warning (disable: 4127) /* conditional expression is constant */
17
+ #pragma warning (disable: 4306) /* conversion from '?' to '?' of greater size */
18
+
19
+ #endif
20
+
21
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
22
+
23
+ #if defined __INTEL_COMPILER
24
+
25
+ #pragma warning (disable: 869) /* parameter "XXXXX" was never referenced */
26
+ #pragma warning (disable: 1418) /* external function definition with no prior declaration */
27
+ #pragma warning (disable: 1419) /* external declaration in primary source file */
28
+
29
+ #endif /* __INTEL_COMPILER */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify.h ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_config.h ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+ #ifndef _ITTNOTIFY_CONFIG_H_
7
+ #define _ITTNOTIFY_CONFIG_H_
8
+
9
+ /** @cond exclude_from_documentation */
10
+ #ifndef ITT_OS_WIN
11
+ # define ITT_OS_WIN 1
12
+ #endif /* ITT_OS_WIN */
13
+
14
+ #ifndef ITT_OS_LINUX
15
+ # define ITT_OS_LINUX 2
16
+ #endif /* ITT_OS_LINUX */
17
+
18
+ #ifndef ITT_OS_MAC
19
+ # define ITT_OS_MAC 3
20
+ #endif /* ITT_OS_MAC */
21
+
22
+ #ifndef ITT_OS_FREEBSD
23
+ # define ITT_OS_FREEBSD 4
24
+ #endif /* ITT_OS_FREEBSD */
25
+
26
+ #ifndef ITT_OS
27
+ # if defined WIN32 || defined _WIN32
28
+ # define ITT_OS ITT_OS_WIN
29
+ # elif defined( __APPLE__ ) && defined( __MACH__ )
30
+ # define ITT_OS ITT_OS_MAC
31
+ # elif defined( __FreeBSD__ )
32
+ # define ITT_OS ITT_OS_FREEBSD
33
+ # else
34
+ # define ITT_OS ITT_OS_LINUX
35
+ # endif
36
+ #endif /* ITT_OS */
37
+
38
+ #ifndef ITT_PLATFORM_WIN
39
+ # define ITT_PLATFORM_WIN 1
40
+ #endif /* ITT_PLATFORM_WIN */
41
+
42
+ #ifndef ITT_PLATFORM_POSIX
43
+ # define ITT_PLATFORM_POSIX 2
44
+ #endif /* ITT_PLATFORM_POSIX */
45
+
46
+ #ifndef ITT_PLATFORM_MAC
47
+ # define ITT_PLATFORM_MAC 3
48
+ #endif /* ITT_PLATFORM_MAC */
49
+
50
+ #ifndef ITT_PLATFORM_FREEBSD
51
+ # define ITT_PLATFORM_FREEBSD 4
52
+ #endif /* ITT_PLATFORM_FREEBSD */
53
+
54
+ #ifndef ITT_PLATFORM
55
+ # if ITT_OS==ITT_OS_WIN
56
+ # define ITT_PLATFORM ITT_PLATFORM_WIN
57
+ # elif ITT_OS==ITT_OS_MAC
58
+ # define ITT_PLATFORM ITT_PLATFORM_MAC
59
+ # elif ITT_OS==ITT_OS_FREEBSD
60
+ # define ITT_PLATFORM ITT_PLATFORM_FREEBSD
61
+ # else
62
+ # define ITT_PLATFORM ITT_PLATFORM_POSIX
63
+ # endif
64
+ #endif /* ITT_PLATFORM */
65
+
66
+ #if defined(_UNICODE) && !defined(UNICODE)
67
+ #define UNICODE
68
+ #endif
69
+
70
+ #include <stddef.h>
71
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
72
+ #include <tchar.h>
73
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
74
+ #include <stdint.h>
75
+ #if defined(UNICODE) || defined(_UNICODE)
76
+ #include <wchar.h>
77
+ #endif /* UNICODE || _UNICODE */
78
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
79
+
80
+ #ifndef ITTAPI_CDECL
81
+ # if ITT_PLATFORM==ITT_PLATFORM_WIN
82
+ # define ITTAPI_CDECL __cdecl
83
+ # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
84
+ # if defined _M_IX86 || defined __i386__
85
+ # define ITTAPI_CDECL __attribute__ ((cdecl))
86
+ # else /* _M_IX86 || __i386__ */
87
+ # define ITTAPI_CDECL /* actual only on x86 platform */
88
+ # endif /* _M_IX86 || __i386__ */
89
+ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
90
+ #endif /* ITTAPI_CDECL */
91
+
92
+ #ifndef STDCALL
93
+ # if ITT_PLATFORM==ITT_PLATFORM_WIN
94
+ # define STDCALL __stdcall
95
+ # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
96
+ # if defined _M_IX86 || defined __i386__
97
+ # define STDCALL __attribute__ ((stdcall))
98
+ # else /* _M_IX86 || __i386__ */
99
+ # define STDCALL /* supported only on x86 platform */
100
+ # endif /* _M_IX86 || __i386__ */
101
+ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
102
+ #endif /* STDCALL */
103
+
104
+ #define ITTAPI ITTAPI_CDECL
105
+ #define LIBITTAPI ITTAPI_CDECL
106
+
107
+ /* TODO: Temporary for compatibility! */
108
+ #define ITTAPI_CALL ITTAPI_CDECL
109
+ #define LIBITTAPI_CALL ITTAPI_CDECL
110
+
111
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
112
+ /* use __forceinline (VC++ specific) */
113
+ #if defined(__MINGW32__) && !defined(__cplusplus)
114
+ #define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__))
115
+ #else
116
+ #define ITT_INLINE static __forceinline
117
+ #endif /* __MINGW32__ */
118
+
119
+ #define ITT_INLINE_ATTRIBUTE /* nothing */
120
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
121
+ /*
122
+ * Generally, functions are not inlined unless optimization is specified.
123
+ * For functions declared inline, this attribute inlines the function even
124
+ * if no optimization level was specified.
125
+ */
126
+ #ifdef __STRICT_ANSI__
127
+ #define ITT_INLINE static
128
+ #define ITT_INLINE_ATTRIBUTE __attribute__((unused))
129
+ #else /* __STRICT_ANSI__ */
130
+ #define ITT_INLINE static inline
131
+ #define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused))
132
+ #endif /* __STRICT_ANSI__ */
133
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
134
+ /** @endcond */
135
+
136
+ #ifndef ITT_ARCH_IA32
137
+ # define ITT_ARCH_IA32 1
138
+ #endif /* ITT_ARCH_IA32 */
139
+
140
+ #ifndef ITT_ARCH_IA32E
141
+ # define ITT_ARCH_IA32E 2
142
+ #endif /* ITT_ARCH_IA32E */
143
+
144
+ #ifndef ITT_ARCH_IA64
145
+ # define ITT_ARCH_IA64 3
146
+ #endif /* ITT_ARCH_IA64 */
147
+
148
+ #ifndef ITT_ARCH_ARM
149
+ # define ITT_ARCH_ARM 4
150
+ #endif /* ITT_ARCH_ARM */
151
+
152
+ #ifndef ITT_ARCH_PPC64
153
+ # define ITT_ARCH_PPC64 5
154
+ #endif /* ITT_ARCH_PPC64 */
155
+
156
+ #ifndef ITT_ARCH_ARM64
157
+ # define ITT_ARCH_ARM64 6
158
+ #endif /* ITT_ARCH_ARM64 */
159
+
160
+ #ifndef ITT_ARCH
161
+ # if defined _M_IX86 || defined __i386__
162
+ # define ITT_ARCH ITT_ARCH_IA32
163
+ # elif defined _M_X64 || defined _M_AMD64 || defined __x86_64__
164
+ # define ITT_ARCH ITT_ARCH_IA32E
165
+ # elif defined _M_IA64 || defined __ia64__
166
+ # define ITT_ARCH ITT_ARCH_IA64
167
+ # elif defined _M_ARM || defined __arm__
168
+ # define ITT_ARCH ITT_ARCH_ARM
169
+ # elif defined __aarch64__
170
+ # define ITT_ARCH ITT_ARCH_ARM64
171
+ # elif defined __powerpc64__
172
+ # define ITT_ARCH ITT_ARCH_PPC64
173
+ # endif
174
+ #endif
175
+
176
+ #ifdef __cplusplus
177
+ # define ITT_EXTERN_C extern "C"
178
+ # define ITT_EXTERN_C_BEGIN extern "C" {
179
+ # define ITT_EXTERN_C_END }
180
+ #else
181
+ # define ITT_EXTERN_C /* nothing */
182
+ # define ITT_EXTERN_C_BEGIN /* nothing */
183
+ # define ITT_EXTERN_C_END /* nothing */
184
+ #endif /* __cplusplus */
185
+
186
+ #define ITT_TO_STR_AUX(x) #x
187
+ #define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
188
+
189
+ #define __ITT_BUILD_ASSERT(expr, suffix) do { \
190
+ static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \
191
+ __itt_build_check_##suffix[0] = 0; \
192
+ } while(0)
193
+ #define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
194
+ #define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
195
+
196
+ #define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
197
+
198
+ /* Replace with snapshot date YYYYMMDD for promotion build. */
199
+ #define API_VERSION_BUILD 20180723
200
+
201
+ #ifndef API_VERSION_NUM
202
+ #define API_VERSION_NUM 3.23.0
203
+ #endif /* API_VERSION_NUM */
204
+
205
+ #define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \
206
+ " (" ITT_TO_STR(API_VERSION_BUILD) ")"
207
+
208
+ /* OS communication functions */
209
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
210
+ #include <windows.h>
211
+ typedef HMODULE lib_t;
212
+ typedef DWORD TIDT;
213
+ typedef CRITICAL_SECTION mutex_t;
214
+ #ifdef __cplusplus
215
+ #define MUTEX_INITIALIZER {}
216
+ #else
217
+ #define MUTEX_INITIALIZER { 0 }
218
+ #endif
219
+ #define strong_alias(name, aliasname) /* empty for Windows */
220
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
221
+ #include <dlfcn.h>
222
+ #if defined(UNICODE) || defined(_UNICODE)
223
+ #include <wchar.h>
224
+ #endif /* UNICODE */
225
+ #ifndef _GNU_SOURCE
226
+ #define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
227
+ #endif /* _GNU_SOURCE */
228
+ #ifndef __USE_UNIX98
229
+ #define __USE_UNIX98 1 /* need for PTHREAD_MUTEX_RECURSIVE, on SLES11.1 with gcc 4.3.4 wherein pthread.h missing dependency on __USE_XOPEN2K8 */
230
+ #endif /*__USE_UNIX98*/
231
+ #include <pthread.h>
232
+ typedef void* lib_t;
233
+ typedef pthread_t TIDT;
234
+ typedef pthread_mutex_t mutex_t;
235
+ #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
236
+ #define _strong_alias(name, aliasname) \
237
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
238
+ #define strong_alias(name, aliasname) _strong_alias(name, aliasname)
239
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
240
+
241
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
242
+ #define __itt_get_proc(lib, name) GetProcAddress(lib, name)
243
+ #define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
244
+ #define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
245
+ #define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
246
+ #define __itt_mutex_destroy(mutex) DeleteCriticalSection(mutex)
247
+ #define __itt_load_lib(name) LoadLibraryA(name)
248
+ #define __itt_unload_lib(handle) FreeLibrary(handle)
249
+ #define __itt_system_error() (int)GetLastError()
250
+ #define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
251
+ #define __itt_fstrnlen(s, l) strnlen_s(s, l)
252
+ #define __itt_fstrcpyn(s1, b, s2, l) strncpy_s(s1, b, s2, l)
253
+ #define __itt_thread_id() GetCurrentThreadId()
254
+ #define __itt_thread_yield() SwitchToThread()
255
+ #ifndef ITT_SIMPLE_INIT
256
+ ITT_INLINE long
257
+ __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
258
+ ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
259
+ {
260
+ return InterlockedIncrement(ptr);
261
+ }
262
+ ITT_INLINE long
263
+ __itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) ITT_INLINE_ATTRIBUTE;
264
+ ITT_INLINE long
265
+ __itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand)
266
+ {
267
+ return InterlockedCompareExchange(ptr, exchange, comperand);
268
+ }
269
+ #endif /* ITT_SIMPLE_INIT */
270
+
271
+ #define DL_SYMBOLS (1)
272
+ #define PTHREAD_SYMBOLS (1)
273
+
274
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
275
+ #define __itt_get_proc(lib, name) dlsym(lib, name)
276
+ #define __itt_mutex_init(mutex) {\
277
+ pthread_mutexattr_t mutex_attr; \
278
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
279
+ if (error_code) \
280
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", \
281
+ error_code); \
282
+ error_code = pthread_mutexattr_settype(&mutex_attr, \
283
+ PTHREAD_MUTEX_RECURSIVE); \
284
+ if (error_code) \
285
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \
286
+ error_code); \
287
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
288
+ if (error_code) \
289
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", \
290
+ error_code); \
291
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
292
+ if (error_code) \
293
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \
294
+ error_code); \
295
+ }
296
+ #define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
297
+ #define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
298
+ #define __itt_mutex_destroy(mutex) pthread_mutex_destroy(mutex)
299
+ #define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
300
+ #define __itt_unload_lib(handle) dlclose(handle)
301
+ #define __itt_system_error() errno
302
+ #define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
303
+
304
+ /* makes customer code define safe APIs for SDL_STRNLEN_S and SDL_STRNCPY_S */
305
+ #ifdef SDL_STRNLEN_S
306
+ #define __itt_fstrnlen(s, l) SDL_STRNLEN_S(s, l)
307
+ #else
308
+ #define __itt_fstrnlen(s, l) strlen(s)
309
+ #endif /* SDL_STRNLEN_S */
310
+ #ifdef SDL_STRNCPY_S
311
+ #define __itt_fstrcpyn(s1, b, s2, l) SDL_STRNCPY_S(s1, b, s2, l)
312
+ #else
313
+ #define __itt_fstrcpyn(s1, b, s2, l) { \
314
+ if (b > 0) { \
315
+ /* 'volatile' is used to suppress the warning that a destination */ \
316
+ /* bound depends on the length of the source. */ \
317
+ volatile size_t num_to_copy = (size_t)(b - 1) < (size_t)(l) ? \
318
+ (size_t)(b - 1) : (size_t)(l); \
319
+ strncpy(s1, s2, num_to_copy); \
320
+ s1[num_to_copy] = 0; \
321
+ } \
322
+ }
323
+ #endif /* SDL_STRNCPY_S */
324
+
325
+ #define __itt_thread_id() pthread_self()
326
+ #define __itt_thread_yield() sched_yield()
327
+ #if ITT_ARCH==ITT_ARCH_IA64
328
+ #ifdef __INTEL_COMPILER
329
+ #define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
330
+ #else /* __INTEL_COMPILER */
331
+ /* TODO: Add Support for not Intel compilers for IA-64 architecture */
332
+ #endif /* __INTEL_COMPILER */
333
+ #elif ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_IA32E /* ITT_ARCH!=ITT_ARCH_IA64 */
334
+ ITT_INLINE long
335
+ __TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
336
+ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
337
+ {
338
+ long result;
339
+ __asm__ __volatile__("lock\nxadd %0,%1"
340
+ : "=r"(result),"=m"(*(volatile int*)ptr)
341
+ : "0"(addend), "m"(*(volatile int*)ptr)
342
+ : "memory");
343
+ return result;
344
+ }
345
+ #else
346
+ #define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val)
347
+ #endif /* ITT_ARCH==ITT_ARCH_IA64 */
348
+ #ifndef ITT_SIMPLE_INIT
349
+ ITT_INLINE long
350
+ __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
351
+ ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
352
+ {
353
+ return __TBB_machine_fetchadd4(ptr, 1) + 1L;
354
+ }
355
+ ITT_INLINE long
356
+ __itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) ITT_INLINE_ATTRIBUTE;
357
+ ITT_INLINE long
358
+ __itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand)
359
+ {
360
+ return __sync_val_compare_and_swap(ptr, exchange, comperand);
361
+ }
362
+ #endif /* ITT_SIMPLE_INIT */
363
+
364
+ void* dlopen(const char*, int) __attribute__((weak));
365
+ void* dlsym(void*, const char*) __attribute__((weak));
366
+ int dlclose(void*) __attribute__((weak));
367
+ #define DL_SYMBOLS (dlopen && dlsym && dlclose)
368
+
369
+ int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak));
370
+ int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak));
371
+ int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak));
372
+ int pthread_mutex_destroy(pthread_mutex_t*) __attribute__((weak));
373
+ int pthread_mutexattr_init(pthread_mutexattr_t*) __attribute__((weak));
374
+ int pthread_mutexattr_settype(pthread_mutexattr_t*, int) __attribute__((weak));
375
+ int pthread_mutexattr_destroy(pthread_mutexattr_t*) __attribute__((weak));
376
+ pthread_t pthread_self(void) __attribute__((weak));
377
+ #define PTHREAD_SYMBOLS (pthread_mutex_init && pthread_mutex_lock && pthread_mutex_unlock && pthread_mutex_destroy && pthread_mutexattr_init && pthread_mutexattr_settype && pthread_mutexattr_destroy && pthread_self)
378
+
379
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
380
+
381
+ /* strdup() is not included into C99 which results in a compiler warning about
382
+ * implicitly declared symbol. To avoid the issue strdup is implemented
383
+ * manually.
384
+ */
385
+ #define ITT_STRDUP_MAX_STRING_SIZE 4096
386
+ #define __itt_fstrdup(s, new_s) do { \
387
+ if (s != NULL) { \
388
+ size_t s_len = __itt_fstrnlen(s, ITT_STRDUP_MAX_STRING_SIZE); \
389
+ new_s = (char *)malloc(s_len + 1); \
390
+ if (new_s != NULL) { \
391
+ __itt_fstrcpyn(new_s, s_len + 1, s, s_len); \
392
+ } \
393
+ } \
394
+ } while(0)
395
+
396
+ typedef enum {
397
+ __itt_thread_normal = 0,
398
+ __itt_thread_ignored = 1
399
+ } __itt_thread_state;
400
+
401
+ #pragma pack(push, 8)
402
+
403
+ typedef struct ___itt_thread_info
404
+ {
405
+ const char* nameA; /*!< Copy of original name in ASCII. */
406
+ #if defined(UNICODE) || defined(_UNICODE)
407
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
408
+ #else /* UNICODE || _UNICODE */
409
+ void* nameW;
410
+ #endif /* UNICODE || _UNICODE */
411
+ TIDT tid;
412
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
413
+ int extra1; /*!< Reserved to the runtime */
414
+ void* extra2; /*!< Reserved to the runtime */
415
+ struct ___itt_thread_info* next;
416
+ } __itt_thread_info;
417
+
418
+ #include "ittnotify_types.h" /* For __itt_group_id definition */
419
+
420
+ typedef struct ___itt_api_info_20101001
421
+ {
422
+ const char* name;
423
+ void** func_ptr;
424
+ void* init_func;
425
+ __itt_group_id group;
426
+ } __itt_api_info_20101001;
427
+
428
+ typedef struct ___itt_api_info
429
+ {
430
+ const char* name;
431
+ void** func_ptr;
432
+ void* init_func;
433
+ void* null_func;
434
+ __itt_group_id group;
435
+ } __itt_api_info;
436
+
437
+ typedef struct __itt_counter_info
438
+ {
439
+ const char* nameA; /*!< Copy of original name in ASCII. */
440
+ #if defined(UNICODE) || defined(_UNICODE)
441
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
442
+ #else /* UNICODE || _UNICODE */
443
+ void* nameW;
444
+ #endif /* UNICODE || _UNICODE */
445
+ const char* domainA; /*!< Copy of original name in ASCII. */
446
+ #if defined(UNICODE) || defined(_UNICODE)
447
+ const wchar_t* domainW; /*!< Copy of original name in UNICODE. */
448
+ #else /* UNICODE || _UNICODE */
449
+ void* domainW;
450
+ #endif /* UNICODE || _UNICODE */
451
+ int type;
452
+ long index;
453
+ int extra1; /*!< Reserved to the runtime */
454
+ void* extra2; /*!< Reserved to the runtime */
455
+ struct __itt_counter_info* next;
456
+ } __itt_counter_info_t;
457
+
458
+ struct ___itt_domain;
459
+ struct ___itt_string_handle;
460
+ struct ___itt_histogram;
461
+
462
+ #include "ittnotify.h"
463
+
464
+ typedef struct ___itt_global
465
+ {
466
+ unsigned char magic[8];
467
+ unsigned long version_major;
468
+ unsigned long version_minor;
469
+ unsigned long version_build;
470
+ volatile long api_initialized;
471
+ volatile long mutex_initialized;
472
+ volatile long atomic_counter;
473
+ mutex_t mutex;
474
+ lib_t lib;
475
+ void* error_handler;
476
+ const char** dll_path_ptr;
477
+ __itt_api_info* api_list_ptr;
478
+ struct ___itt_global* next;
479
+ /* Joinable structures below */
480
+ __itt_thread_info* thread_list;
481
+ struct ___itt_domain* domain_list;
482
+ struct ___itt_string_handle* string_list;
483
+ __itt_collection_state state;
484
+ __itt_counter_info_t* counter_list;
485
+ unsigned int ipt_collect_events;
486
+ struct ___itt_histogram* histogram_list;
487
+ } __itt_global;
488
+
489
+ #pragma pack(pop)
490
+
491
+ #define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
492
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
493
+ if (h != NULL) { \
494
+ h->tid = t; \
495
+ h->nameA = NULL; \
496
+ h->nameW = n ? _wcsdup(n) : NULL; \
497
+ h->state = s; \
498
+ h->extra1 = 0; /* reserved */ \
499
+ h->extra2 = NULL; /* reserved */ \
500
+ h->next = NULL; \
501
+ if (h_tail == NULL) \
502
+ (gptr)->thread_list = h; \
503
+ else \
504
+ h_tail->next = h; \
505
+ } \
506
+ }
507
+
508
+ #define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
509
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
510
+ if (h != NULL) { \
511
+ h->tid = t; \
512
+ char *n_copy = NULL; \
513
+ __itt_fstrdup(n, n_copy); \
514
+ h->nameA = n_copy; \
515
+ h->nameW = NULL; \
516
+ h->state = s; \
517
+ h->extra1 = 0; /* reserved */ \
518
+ h->extra2 = NULL; /* reserved */ \
519
+ h->next = NULL; \
520
+ if (h_tail == NULL) \
521
+ (gptr)->thread_list = h; \
522
+ else \
523
+ h_tail->next = h; \
524
+ } \
525
+ }
526
+
527
+ #define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
528
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
529
+ if (h != NULL) { \
530
+ h->flags = 1; /* domain is enabled by default */ \
531
+ h->nameA = NULL; \
532
+ h->nameW = name ? _wcsdup(name) : NULL; \
533
+ h->extra1 = 0; /* reserved */ \
534
+ h->extra2 = NULL; /* reserved */ \
535
+ h->next = NULL; \
536
+ if (h_tail == NULL) \
537
+ (gptr)->domain_list = h; \
538
+ else \
539
+ h_tail->next = h; \
540
+ } \
541
+ }
542
+
543
+ #define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
544
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
545
+ if (h != NULL) { \
546
+ h->flags = 1; /* domain is enabled by default */ \
547
+ char *name_copy = NULL; \
548
+ __itt_fstrdup(name, name_copy); \
549
+ h->nameA = name_copy; \
550
+ h->nameW = NULL; \
551
+ h->extra1 = 0; /* reserved */ \
552
+ h->extra2 = NULL; /* reserved */ \
553
+ h->next = NULL; \
554
+ if (h_tail == NULL) \
555
+ (gptr)->domain_list = h; \
556
+ else \
557
+ h_tail->next = h; \
558
+ } \
559
+ }
560
+
561
+ #define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
562
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
563
+ if (h != NULL) { \
564
+ h->strA = NULL; \
565
+ h->strW = name ? _wcsdup(name) : NULL; \
566
+ h->extra1 = 0; /* reserved */ \
567
+ h->extra2 = NULL; /* reserved */ \
568
+ h->next = NULL; \
569
+ if (h_tail == NULL) \
570
+ (gptr)->string_list = h; \
571
+ else \
572
+ h_tail->next = h; \
573
+ } \
574
+ }
575
+
576
+ #define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
577
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
578
+ if (h != NULL) { \
579
+ char *name_copy = NULL; \
580
+ __itt_fstrdup(name, name_copy); \
581
+ h->strA = name_copy; \
582
+ h->strW = NULL; \
583
+ h->extra1 = 0; /* reserved */ \
584
+ h->extra2 = NULL; /* reserved */ \
585
+ h->next = NULL; \
586
+ if (h_tail == NULL) \
587
+ (gptr)->string_list = h; \
588
+ else \
589
+ h_tail->next = h; \
590
+ } \
591
+ }
592
+
593
+ #define NEW_COUNTER_W(gptr,h,h_tail,name,domain,type) { \
594
+ h = (__itt_counter_info_t*)malloc(sizeof(__itt_counter_info_t)); \
595
+ if (h != NULL) { \
596
+ h->nameA = NULL; \
597
+ h->nameW = name ? _wcsdup(name) : NULL; \
598
+ h->domainA = NULL; \
599
+ h->domainW = name ? _wcsdup(domain) : NULL; \
600
+ h->type = (int)type; \
601
+ h->index = 0; \
602
+ h->next = NULL; \
603
+ if (h_tail == NULL) \
604
+ (gptr)->counter_list = h; \
605
+ else \
606
+ h_tail->next = h; \
607
+ } \
608
+ }
609
+
610
+ #define NEW_COUNTER_A(gptr,h,h_tail,name,domain,type) { \
611
+ h = (__itt_counter_info_t*)malloc(sizeof(__itt_counter_info_t)); \
612
+ if (h != NULL) { \
613
+ char *name_copy = NULL; \
614
+ __itt_fstrdup(name, name_copy); \
615
+ h->nameA = name_copy; \
616
+ h->nameW = NULL; \
617
+ char *domain_copy = NULL; \
618
+ __itt_fstrdup(domain, domain_copy); \
619
+ h->domainA = domain_copy; \
620
+ h->domainW = NULL; \
621
+ h->type = (int)type; \
622
+ h->index = 0; \
623
+ h->next = NULL; \
624
+ if (h_tail == NULL) \
625
+ (gptr)->counter_list = h; \
626
+ else \
627
+ h_tail->next = h; \
628
+ } \
629
+ }
630
+
631
+ #define NEW_HISTOGRAM_W(gptr,h,h_tail,domain,name,x_type,y_type) { \
632
+ h = (__itt_histogram*)malloc(sizeof(__itt_histogram)); \
633
+ if (h != NULL) { \
634
+ h->domain = domain; \
635
+ h->nameA = NULL; \
636
+ h->nameW = name ? _wcsdup(name) : NULL; \
637
+ h->x_type = x_type; \
638
+ h->y_type = y_type; \
639
+ h->extra1 = 0; \
640
+ h->extra2 = NULL; \
641
+ if (h_tail == NULL) \
642
+ (gptr)->histogram_list = h; \
643
+ else \
644
+ h_tail->next = h; \
645
+ } \
646
+ }
647
+
648
+ #define NEW_HISTOGRAM_A(gptr,h,h_tail,domain,name,x_type,y_type) { \
649
+ h = (__itt_histogram*)malloc(sizeof(__itt_histogram)); \
650
+ if (h != NULL) { \
651
+ h->domain = domain; \
652
+ char *name_copy = NULL; \
653
+ __itt_fstrdup(name, name_copy); \
654
+ h->nameA = name_copy; \
655
+ h->nameW = NULL; \
656
+ h->x_type = x_type; \
657
+ h->y_type = y_type; \
658
+ h->extra1 = 0; \
659
+ h->extra2 = NULL; \
660
+ if (h_tail == NULL) \
661
+ (gptr)->histogram_list = h; \
662
+ else \
663
+ h_tail->next = h; \
664
+ } \
665
+ }
666
+
667
+ #endif /* _ITTNOTIFY_CONFIG_H_ */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_static.h ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+
7
+ #include "ittnotify_config.h"
8
+
9
+ #ifndef ITT_FORMAT_DEFINED
10
+ # ifndef ITT_FORMAT
11
+ # define ITT_FORMAT
12
+ # endif /* ITT_FORMAT */
13
+ # ifndef ITT_NO_PARAMS
14
+ # define ITT_NO_PARAMS
15
+ # endif /* ITT_NO_PARAMS */
16
+ #endif /* ITT_FORMAT_DEFINED */
17
+
18
+ /*
19
+ * parameters for macro expected:
20
+ * ITT_STUB(api, type, func_name, arguments, params, func_name_in_dll, group, printf_fmt)
21
+ */
22
+ #ifdef __ITT_INTERNAL_INIT
23
+
24
+ #ifndef __ITT_INTERNAL_BODY
25
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
26
+ ITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char *name), (ITT_FORMAT name), domain_createA, __itt_group_structure, "\"%s\"")
27
+ ITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name), (ITT_FORMAT name), domain_createW, __itt_group_structure, "\"%S\"")
28
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
29
+ ITT_STUB(ITTAPI, __itt_domain*, domain_create, (const char *name), (ITT_FORMAT name), domain_create, __itt_group_structure, "\"%s\"")
30
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
31
+
32
+ ITT_STUBV(ITTAPI, void, module_load_with_sections, (__itt_module_object* module_obj), (ITT_FORMAT module_obj), module_load_with_sections, __itt_group_module, "%p")
33
+ ITT_STUBV(ITTAPI, void, module_unload_with_sections, (__itt_module_object* module_obj), (ITT_FORMAT module_obj), module_unload_with_sections, __itt_group_module, "%p")
34
+
35
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
36
+ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char *name), (ITT_FORMAT name), string_handle_createA, __itt_group_structure, "\"%s\"")
37
+ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name), (ITT_FORMAT name), string_handle_createW, __itt_group_structure, "\"%S\"")
38
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
39
+ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *name), (ITT_FORMAT name), string_handle_create, __itt_group_structure, "\"%s\"")
40
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
41
+
42
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
43
+ ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, "\"%s\", \"%s\"")
44
+ ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, "\"%s\", \"%s\"")
45
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
46
+ ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_create, __itt_group_counter, "\"%s\", \"%s\"")
47
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
48
+
49
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
50
+ ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typedA, __itt_group_counter, "\"%s\", \"%s\", %d")
51
+ ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typedW, __itt_group_counter, "\"%s\", \"%s\", %d")
52
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
53
+ ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typed, __itt_group_counter, "\"%s\", \"%s\", %d")
54
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
55
+
56
+
57
+ ITT_STUBV(ITTAPI, void, pause, (void), (ITT_NO_PARAMS), pause, __itt_group_control | __itt_group_legacy, "no args")
58
+ ITT_STUBV(ITTAPI, void, resume, (void), (ITT_NO_PARAMS), resume, __itt_group_control | __itt_group_legacy, "no args")
59
+
60
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
61
+ ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name), (ITT_FORMAT name), thread_set_nameA, __itt_group_thread, "\"%s\"")
62
+ ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (ITT_FORMAT name), thread_set_nameW, __itt_group_thread, "\"%S\"")
63
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
64
+ ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name), (ITT_FORMAT name), thread_set_name, __itt_group_thread, "\"%s\"")
65
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
66
+ ITT_STUBV(ITTAPI, void, thread_ignore, (void), (ITT_NO_PARAMS), thread_ignore, __itt_group_thread, "no args")
67
+
68
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
69
+ ITT_STUB(LIBITTAPI, int, thr_name_setA, (const char *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setA, __itt_group_thread | __itt_group_legacy, "\"%s\", %d")
70
+ ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setW, __itt_group_thread | __itt_group_legacy, "\"%S\", %d")
71
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
72
+ ITT_STUB(LIBITTAPI, int, thr_name_set, (const char *name, int namelen), (ITT_FORMAT name, namelen), thr_name_set, __itt_group_thread | __itt_group_legacy, "\"%s\", %d")
73
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
74
+ ITT_STUBV(LIBITTAPI, void, thr_ignore, (void), (ITT_NO_PARAMS), thr_ignore, __itt_group_thread | __itt_group_legacy, "no args")
75
+
76
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
77
+ ITT_STUB(ITTAPI, __itt_histogram*, histogram_createA, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_createA, __itt_group_structure, "%p, \"%s\", %d, %d")
78
+ ITT_STUB(ITTAPI, __itt_histogram*, histogram_createW, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_createW, __itt_group_structure, "%p, \"%s\", %d, %d")
79
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
80
+ ITT_STUB(ITTAPI, __itt_histogram*, histogram_create, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_create, __itt_group_structure, "%p, \"%s\", %d, %d")
81
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
82
+
83
+ #endif /* __ITT_INTERNAL_BODY */
84
+
85
+ ITT_STUBV(ITTAPI, void, enable_attach, (void), (ITT_NO_PARAMS), enable_attach, __itt_group_all, "no args")
86
+
87
+ #else /* __ITT_INTERNAL_INIT */
88
+
89
+ ITT_STUBV(ITTAPI, void, detach, (void), (ITT_NO_PARAMS), detach, __itt_group_control | __itt_group_legacy, "no args")
90
+
91
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
92
+ ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x")
93
+ ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\", \"%S\", %x")
94
+ ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name), (ITT_FORMAT addr, name), sync_renameA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\"")
95
+ ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (ITT_FORMAT addr, name), sync_renameW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\"")
96
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
97
+ ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_create, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x")
98
+ ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name), (ITT_FORMAT addr, name), sync_rename, __itt_group_sync | __itt_group_fsync, "%p, \"%s\"")
99
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
100
+ ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr), (ITT_FORMAT addr), sync_destroy, __itt_group_sync | __itt_group_fsync, "%p")
101
+
102
+ ITT_STUBV(ITTAPI, void, sync_prepare, (void* addr), (ITT_FORMAT addr), sync_prepare, __itt_group_sync, "%p")
103
+ ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr), (ITT_FORMAT addr), sync_cancel, __itt_group_sync, "%p")
104
+ ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr), (ITT_FORMAT addr), sync_acquired, __itt_group_sync, "%p")
105
+ ITT_STUBV(ITTAPI, void, sync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_sync, "%p")
106
+
107
+ ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask), (ITT_FORMAT mask), suppress_push, __itt_group_suppress, "%p")
108
+ ITT_STUBV(ITTAPI, void, suppress_pop, (void), (ITT_NO_PARAMS), suppress_pop, __itt_group_suppress, "no args")
109
+ ITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size),(ITT_FORMAT mode, mask, address, size), suppress_mark_range, __itt_group_suppress, "%d, %p, %p, %d")
110
+ ITT_STUBV(ITTAPI, void, suppress_clear_range,(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size),(ITT_FORMAT mode, mask, address, size), suppress_clear_range,__itt_group_suppress, "%d, %p, %p, %d")
111
+
112
+ ITT_STUBV(ITTAPI, void, fsync_prepare, (void* addr), (ITT_FORMAT addr), sync_prepare, __itt_group_fsync, "%p")
113
+ ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr), (ITT_FORMAT addr), sync_cancel, __itt_group_fsync, "%p")
114
+ ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr), (ITT_FORMAT addr), sync_acquired, __itt_group_fsync, "%p")
115
+ ITT_STUBV(ITTAPI, void, fsync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_fsync, "%p")
116
+
117
+ ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (ITT_FORMAT site, instance, name), model_site_begin, __itt_group_model, "%p, %p, \"%s\"")
118
+ ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance), (ITT_FORMAT site, instance), model_site_end, __itt_group_model, "%p, %p")
119
+ ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (ITT_FORMAT task, instance, name), model_task_begin, __itt_group_model, "%p, %p, \"%s\"")
120
+ ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance), (ITT_FORMAT task, instance), model_task_end, __itt_group_model, "%p, %p")
121
+ ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock), (ITT_FORMAT lock), model_lock_acquire, __itt_group_model, "%p")
122
+ ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock), (ITT_FORMAT lock), model_lock_release, __itt_group_model, "%p")
123
+ ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size), (ITT_FORMAT addr, size), model_record_allocation, __itt_group_model, "%p, %d")
124
+ ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr), (ITT_FORMAT addr), model_record_deallocation, __itt_group_model, "%p")
125
+ ITT_STUBV(ITTAPI, void, model_induction_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_induction_uses, __itt_group_model, "%p, %d")
126
+ ITT_STUBV(ITTAPI, void, model_reduction_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_reduction_uses, __itt_group_model, "%p, %d")
127
+ ITT_STUBV(ITTAPI, void, model_observe_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_observe_uses, __itt_group_model, "%p, %d")
128
+ ITT_STUBV(ITTAPI, void, model_clear_uses, (void* addr), (ITT_FORMAT addr), model_clear_uses, __itt_group_model, "%p")
129
+
130
+ #ifndef __ITT_INTERNAL_BODY
131
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
132
+ ITT_STUBV(ITTAPI, void, model_site_beginW, (const wchar_t *name), (ITT_FORMAT name), model_site_beginW, __itt_group_model, "\"%s\"")
133
+ ITT_STUBV(ITTAPI, void, model_task_beginW, (const wchar_t *name), (ITT_FORMAT name), model_task_beginW, __itt_group_model, "\"%s\"")
134
+ ITT_STUBV(ITTAPI, void, model_iteration_taskW, (const wchar_t *name), (ITT_FORMAT name), model_iteration_taskW, __itt_group_model, "\"%s\"")
135
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
136
+ ITT_STUBV(ITTAPI, void, model_site_beginA, (const char *name), (ITT_FORMAT name), model_site_beginA, __itt_group_model, "\"%s\"")
137
+ ITT_STUBV(ITTAPI, void, model_site_beginAL, (const char *name, size_t len), (ITT_FORMAT name, len), model_site_beginAL, __itt_group_model, "\"%s\", %d")
138
+ ITT_STUBV(ITTAPI, void, model_task_beginA, (const char *name), (ITT_FORMAT name), model_task_beginA, __itt_group_model, "\"%s\"")
139
+ ITT_STUBV(ITTAPI, void, model_task_beginAL, (const char *name, size_t len), (ITT_FORMAT name, len), model_task_beginAL, __itt_group_model, "\"%s\", %d")
140
+ ITT_STUBV(ITTAPI, void, model_iteration_taskA, (const char *name), (ITT_FORMAT name), model_iteration_taskA, __itt_group_model, "\"%s\"")
141
+ ITT_STUBV(ITTAPI, void, model_iteration_taskAL, (const char *name, size_t len), (ITT_FORMAT name, len), model_iteration_taskAL, __itt_group_model, "\"%s\", %d")
142
+ ITT_STUBV(ITTAPI, void, model_site_end_2, (void), (ITT_NO_PARAMS), model_site_end_2, __itt_group_model, "no args")
143
+ ITT_STUBV(ITTAPI, void, model_task_end_2, (void), (ITT_NO_PARAMS), model_task_end_2, __itt_group_model, "no args")
144
+ ITT_STUBV(ITTAPI, void, model_lock_acquire_2, (void *lock), (ITT_FORMAT lock), model_lock_acquire_2, __itt_group_model, "%p")
145
+ ITT_STUBV(ITTAPI, void, model_lock_release_2, (void *lock), (ITT_FORMAT lock), model_lock_release_2, __itt_group_model, "%p")
146
+ ITT_STUBV(ITTAPI, void, model_aggregate_task, (size_t count), (ITT_FORMAT count), model_aggregate_task, __itt_group_model, "%d")
147
+ ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x), (ITT_FORMAT x), model_disable_push, __itt_group_model, "%p")
148
+ ITT_STUBV(ITTAPI, void, model_disable_pop, (void), (ITT_NO_PARAMS), model_disable_pop, __itt_group_model, "no args")
149
+ #endif /* __ITT_INTERNAL_BODY */
150
+
151
+ #ifndef __ITT_INTERNAL_BODY
152
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
153
+ ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), heap_function_createA, __itt_group_heap, "\"%s\", \"%s\"")
154
+ ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), heap_function_createW, __itt_group_heap, "\"%s\", \"%s\"")
155
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
156
+ ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), heap_function_create, __itt_group_heap, "\"%s\", \"%s\"")
157
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
158
+ #endif /* __ITT_INTERNAL_BODY */
159
+ ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized), (ITT_FORMAT h, size, initialized), heap_allocate_begin, __itt_group_heap, "%p, %lu, %d")
160
+ ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, size_t size, int initialized), (ITT_FORMAT h, addr, size, initialized), heap_allocate_end, __itt_group_heap, "%p, %p, %lu, %d")
161
+ ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_begin, __itt_group_heap, "%p, %p")
162
+ ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_end, __itt_group_heap, "%p, %p")
163
+ ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_size, initialized), heap_reallocate_begin, __itt_group_heap, "%p, %p, %lu, %d")
164
+ ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_addr, new_size, initialized), heap_reallocate_end, __itt_group_heap, "%p, %p, %p, %lu, %d")
165
+ ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void), (ITT_NO_PARAMS), heap_internal_access_begin, __itt_group_heap, "no args")
166
+ ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void), (ITT_NO_PARAMS), heap_internal_access_end, __itt_group_heap, "no args")
167
+ ITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin, (void), (ITT_NO_PARAMS), heap_record_memory_growth_begin, __itt_group_heap, "no args")
168
+ ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void), (ITT_NO_PARAMS), heap_record_memory_growth_end, __itt_group_heap, "no args")
169
+ ITT_STUBV(ITTAPI, void, heap_reset_detection, (unsigned int reset_mask), (ITT_FORMAT reset_mask), heap_reset_detection, __itt_group_heap, "%u")
170
+ ITT_STUBV(ITTAPI, void, heap_record, (unsigned int record_mask), (ITT_FORMAT record_mask), heap_record, __itt_group_heap, "%u")
171
+
172
+ ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), id_create, __itt_group_structure, "%p, %lu")
173
+ ITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), id_destroy, __itt_group_structure, "%p, %lu")
174
+
175
+ ITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void), (ITT_NO_PARAMS), get_timestamp, __itt_group_structure, "no args")
176
+
177
+ ITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), region_begin, __itt_group_structure, "%p, %lu, %lu, %p")
178
+ ITT_STUBV(ITTAPI, void, region_end, (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), region_end, __itt_group_structure, "%p, %lu")
179
+
180
+ #ifndef __ITT_INTERNAL_BODY
181
+ ITT_STUBV(ITTAPI, void, frame_begin_v3, (const __itt_domain *domain, __itt_id *id), (ITT_FORMAT domain, id), frame_begin_v3, __itt_group_structure, "%p, %p")
182
+ ITT_STUBV(ITTAPI, void, frame_end_v3, (const __itt_domain *domain, __itt_id *id), (ITT_FORMAT domain, id), frame_end_v3, __itt_group_structure, "%p, %p")
183
+ ITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end), (ITT_FORMAT domain, id, begin, end), frame_submit_v3, __itt_group_structure, "%p, %p, %lu, %lu")
184
+ #endif /* __ITT_INTERNAL_BODY */
185
+
186
+ ITT_STUBV(ITTAPI, void, task_group, (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), task_group, __itt_group_structure, "%p, %lu, %lu, %p")
187
+
188
+ ITT_STUBV(ITTAPI, void, task_begin, (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), task_begin, __itt_group_structure, "%p, %lu, %lu, %p")
189
+ ITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parent, void* fn), (ITT_FORMAT domain, id, parent, fn), task_begin_fn, __itt_group_structure, "%p, %lu, %lu, %p")
190
+ ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain), (ITT_FORMAT domain), task_end, __itt_group_structure, "%p")
191
+
192
+ ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name), (ITT_FORMAT domain, name), counter_inc_v3, __itt_group_structure, "%p, %p")
193
+ ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long value), (ITT_FORMAT domain, name, value), counter_inc_delta_v3, __itt_group_structure, "%p, %p, %lu")
194
+ ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name), (ITT_FORMAT domain, name), counter_dec_v3, __itt_group_structure, "%p, %p")
195
+ ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long value), (ITT_FORMAT domain, name, value), counter_dec_delta_v3, __itt_group_structure, "%p, %p, %lu")
196
+
197
+ ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope), (ITT_FORMAT domain, id, name, scope), marker, __itt_group_structure, "%p, %lu, %p, %d")
198
+
199
+ ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data), (ITT_FORMAT domain, id, key, type, count, data), metadata_add, __itt_group_structure, "%p, %lu, %p, %d, %lu, %p")
200
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
201
+ ITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char* data, size_t length), (ITT_FORMAT domain, id, key, data, length), metadata_str_addA, __itt_group_structure, "%p, %lu, %p, %p, %lu")
202
+ ITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t* data, size_t length), (ITT_FORMAT domain, id, key, data, length), metadata_str_addW, __itt_group_structure, "%p, %lu, %p, %p, %lu")
203
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
204
+ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char* data, size_t length), (ITT_FORMAT domain, id, key, data, length), metadata_str_add, __itt_group_structure, "%p, %lu, %p, %p, %lu")
205
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
206
+
207
+ ITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, relation, tail), relation_add_to_current, __itt_group_structure, "%p, %lu, %p")
208
+ ITT_STUBV(ITTAPI, void, relation_add, (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, head, relation, tail), relation_add, __itt_group_structure, "%p, %p, %lu, %p")
209
+
210
+ #ifndef __ITT_INTERNAL_BODY
211
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
212
+ ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen), (ITT_FORMAT name, namelen), event_createA, __itt_group_mark | __itt_group_legacy, "\"%s\", %d")
213
+ ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), event_createW, __itt_group_mark | __itt_group_legacy, "\"%S\", %d")
214
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
215
+ ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen), (ITT_FORMAT name, namelen), event_create, __itt_group_mark | __itt_group_legacy, "\"%s\", %d")
216
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
217
+ ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (ITT_FORMAT event), event_start, __itt_group_mark | __itt_group_legacy, "%d")
218
+ ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (ITT_FORMAT event), event_end, __itt_group_mark | __itt_group_legacy, "%d")
219
+ #endif /* __ITT_INTERNAL_BODY */
220
+
221
+ #ifndef __ITT_INTERNAL_BODY
222
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
223
+ ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", \"%s\", %x")
224
+ ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", \"%S\", %x")
225
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
226
+ ITT_STUBV(ITTAPI, void, sync_set_name, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_name, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "p, \"%s\", \"%s\", %x")
227
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
228
+
229
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
230
+ ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *p, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x")
231
+ ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *p, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", %d, \"%S\", %d, %x")
232
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
233
+ ITT_STUB(LIBITTAPI, int, notify_sync_name, (void *p, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_name, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x")
234
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
235
+
236
+ ITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *p), (ITT_FORMAT p), notify_sync_prepare, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
237
+ ITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *p), (ITT_FORMAT p), notify_sync_cancel, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
238
+ ITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *p), (ITT_FORMAT p), notify_sync_acquired, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
239
+ ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *p), (ITT_FORMAT p), notify_sync_releasing, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
240
+ #endif /* __ITT_INTERNAL_BODY */
241
+
242
+ ITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_read, __itt_group_legacy, "%p, %lu")
243
+ ITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_write, __itt_group_legacy, "%p, %lu")
244
+ ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_update, __itt_group_legacy, "%p, %lu")
245
+
246
+ ITT_STUB(LIBITTAPI, __itt_state_t, state_get, (void), (ITT_NO_PARAMS), state_get, __itt_group_legacy, "no args")
247
+ ITT_STUB(LIBITTAPI, __itt_state_t, state_set, (__itt_state_t s), (ITT_FORMAT s), state_set, __itt_group_legacy, "%d")
248
+ ITT_STUB(LIBITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (ITT_FORMAT p, s), obj_mode_set, __itt_group_legacy, "%d, %d")
249
+ ITT_STUB(LIBITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (ITT_FORMAT p, s), thr_mode_set, __itt_group_legacy, "%d, %d")
250
+
251
+ #ifndef __ITT_INTERNAL_BODY
252
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
253
+ ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char *domain), (ITT_FORMAT domain), frame_createA, __itt_group_frame, "\"%s\"")
254
+ ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (ITT_FORMAT domain), frame_createW, __itt_group_frame, "\"%s\"")
255
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
256
+ ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain), (ITT_FORMAT domain), frame_create, __itt_group_frame, "\"%s\"")
257
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
258
+
259
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
260
+ ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createA, (const char *name), (ITT_FORMAT name), pt_region_createA, __itt_group_structure, "\"%s\"")
261
+ ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createW, (const wchar_t *name), (ITT_FORMAT name), pt_region_createW, __itt_group_structure, "\"%S\"")
262
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
263
+ ITT_STUB(ITTAPI, __itt_pt_region, pt_region_create, (const char *name), (ITT_FORMAT name), pt_region_create, __itt_group_structure, "\"%s\"")
264
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
265
+ #endif /* __ITT_INTERNAL_BODY */
266
+ ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (ITT_FORMAT frame), frame_begin, __itt_group_frame, "%p")
267
+ ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame), (ITT_FORMAT frame), frame_end, __itt_group_frame, "%p")
268
+
269
+ ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (ITT_FORMAT id), counter_destroy, __itt_group_counter, "%p")
270
+ ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (ITT_FORMAT id), counter_inc, __itt_group_counter, "%p")
271
+ ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, "%p, %lu")
272
+ ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id), (ITT_FORMAT id), counter_dec, __itt_group_counter, "%p")
273
+ ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_dec_delta, __itt_group_counter, "%p, %lu")
274
+ ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr), (ITT_FORMAT id, value_ptr), counter_set_value, __itt_group_counter, "%p, %p")
275
+ ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr), (ITT_FORMAT id, clock_domain, timestamp, value_ptr), counter_set_value_ex, __itt_group_counter, "%p, %p, %llu, %p")
276
+
277
+ #ifndef __ITT_INTERNAL_BODY
278
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
279
+ ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name), (ITT_FORMAT name), mark_createA, __itt_group_mark, "\"%s\"")
280
+ ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (ITT_FORMAT name), mark_createW, __itt_group_mark, "\"%S\"")
281
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
282
+ ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name), (ITT_FORMAT name), mark_create, __itt_group_mark, "\"%s\"")
283
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
284
+ #endif /* __ITT_INTERNAL_BODY */
285
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
286
+ ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), markA, __itt_group_mark, "%d, \"%s\"")
287
+ ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), markW, __itt_group_mark, "%d, \"%S\"")
288
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
289
+ ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark, __itt_group_mark, "%d, \"%s\"")
290
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
291
+ ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_off, __itt_group_mark, "%d")
292
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
293
+ ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark_globalA, __itt_group_mark, "%d, \"%s\"")
294
+ ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), mark_globalW, __itt_group_mark, "%d, \"%S\"")
295
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
296
+ ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark_global, __itt_group_mark, "%d, \"%S\"")
297
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
298
+ ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_global_off, __itt_group_mark, "%d")
299
+
300
+ #ifndef __ITT_INTERNAL_BODY
301
+ ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void), (ITT_NO_PARAMS), stack_caller_create, __itt_group_stitch, "no args")
302
+ #endif /* __ITT_INTERNAL_BODY */
303
+ ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id), (ITT_FORMAT id), stack_caller_destroy, __itt_group_stitch, "%p")
304
+ ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id), (ITT_FORMAT id), stack_callee_enter, __itt_group_stitch, "%p")
305
+ ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id), (ITT_FORMAT id), stack_callee_leave, __itt_group_stitch, "%p")
306
+
307
+ ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data), (ITT_FORMAT fn, fn_data), clock_domain_create, __itt_group_structure, "%p, %p")
308
+ ITT_STUBV(ITTAPI, void, clock_domain_reset, (void), (ITT_NO_PARAMS), clock_domain_reset, __itt_group_structure, "no args")
309
+ ITT_STUBV(ITTAPI, void, id_create_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id), (ITT_FORMAT domain, clock_domain, timestamp, id), id_create_ex, __itt_group_structure, "%p, %p, %lu, %lu")
310
+ ITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id), (ITT_FORMAT domain, clock_domain, timestamp, id), id_destroy_ex, __itt_group_structure, "%p, %p, %lu, %lu")
311
+ ITT_STUBV(ITTAPI, void, task_begin_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name), (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, name), task_begin_ex, __itt_group_structure, "%p, %p, %lu, %lu, %lu, %p")
312
+ ITT_STUBV(ITTAPI, void, task_begin_fn_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn), (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, fn), task_begin_fn_ex, __itt_group_structure, "%p, %p, %lu, %lu, %lu, %p")
313
+ ITT_STUBV(ITTAPI, void, task_end_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp), (ITT_FORMAT domain, clock_domain, timestamp), task_end_ex, __itt_group_structure, "%p, %p, %lu")
314
+ ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), task_begin_overlapped, __itt_group_structure, "%p, %lu, %lu, %p")
315
+ ITT_STUBV(ITTAPI, void, task_begin_overlapped_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name), (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, name), task_begin_overlapped_ex, __itt_group_structure, "%p, %p, %lu, %lu, %lu, %p")
316
+ ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), task_end_overlapped, __itt_group_structure, "%p, %lu")
317
+ ITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id), (ITT_FORMAT domain, clock_domain, timestamp, id), task_end_overlapped_ex, __itt_group_structure, "%p, %p, %lu, %lu")
318
+ ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope), (ITT_FORMAT domain, clock_domain, timestamp, id, name, scope), marker_ex, __itt_group_structure, "%p, %p, %lu, %lu, %p, %d")
319
+ ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data), (ITT_FORMAT domain, scope, key, type, count, data), metadata_add_with_scope, __itt_group_structure, "%p, %d, %p, %d, %lu, %p")
320
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
321
+ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length), (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scopeA, __itt_group_structure, "%p, %d, %p, %p, %lu")
322
+ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length), (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scopeW, __itt_group_structure, "%p, %d, %p, %p, %lu")
323
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
324
+ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length), (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scope, __itt_group_structure, "%p, %d, %p, %p, %lu")
325
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
326
+ ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, clock_domain, timestamp, relation, tail), relation_add_to_current_ex, __itt_group_structure, "%p, %p, %lu, %d, %lu")
327
+ ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, clock_domain, timestamp, head, relation, tail), relation_add_ex, __itt_group_structure, "%p, %p, %lu, %lu, %d, %lu")
328
+ ITT_STUB(ITTAPI, __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type), (ITT_FORMAT name, track_group_type), track_group_create, __itt_group_structure, "%p, %d")
329
+ ITT_STUB(ITTAPI, __itt_track*, track_create, (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type), (ITT_FORMAT track_group, name, track_type), track_create, __itt_group_structure, "%p, %p, %d")
330
+ ITT_STUBV(ITTAPI, void, set_track, (__itt_track *track), (ITT_FORMAT track), set_track, __itt_group_structure, "%p")
331
+
332
+ #ifndef __ITT_INTERNAL_BODY
333
+ ITT_STUB(ITTAPI, const char*, api_version, (void), (ITT_NO_PARAMS), api_version, __itt_group_all & ~__itt_group_legacy, "no args")
334
+ #endif /* __ITT_INTERNAL_BODY */
335
+
336
+ #ifndef __ITT_INTERNAL_BODY
337
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
338
+ ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_saveA, __itt_group_arrays, "%p, %d, %p, %d, \"%s\", %d")
339
+ ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_saveW, __itt_group_arrays, "%p, %d, %p, %d, \"%S\", %d")
340
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
341
+ ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_save, __itt_group_arrays, "%p, %d, %p, %d, \"%s\", %d")
342
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
343
+ #endif /* __ITT_INTERNAL_BODY */
344
+
345
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
346
+ ITT_STUBV(ITTAPI, void, module_loadA, (void *start_addr, void* end_addr, const char *path), (ITT_FORMAT start_addr, end_addr, path), module_loadA, __itt_group_module, "%p, %p, %p")
347
+ ITT_STUBV(ITTAPI, void, module_loadW, (void *start_addr, void* end_addr, const wchar_t *path), (ITT_FORMAT start_addr, end_addr, path), module_loadW, __itt_group_module, "%p, %p, %p")
348
+ #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
349
+ ITT_STUBV(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path), (ITT_FORMAT start_addr, end_addr, path), module_load, __itt_group_module, "%p, %p, %p")
350
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
351
+ ITT_STUBV(ITTAPI, void, module_unload, (void *start_addr), (ITT_FORMAT start_addr), module_unload, __itt_group_module, "%p")
352
+
353
+ ITT_STUBV(ITTAPI, void, histogram_submit, (__itt_histogram* hist, size_t length, void* x_data, void* y_data), (ITT_FORMAT hist, length, x_data, y_data), histogram_submit, __itt_group_structure, "%p, %lu, %p, %p")
354
+
355
+ #endif /* __ITT_INTERNAL_INIT */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/ittnotify_types.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+
7
+ #ifndef _ITTNOTIFY_TYPES_H_
8
+ #define _ITTNOTIFY_TYPES_H_
9
+
10
+ typedef enum ___itt_group_id
11
+ {
12
+ __itt_group_none = 0,
13
+ __itt_group_legacy = 1<<0,
14
+ __itt_group_control = 1<<1,
15
+ __itt_group_thread = 1<<2,
16
+ __itt_group_mark = 1<<3,
17
+ __itt_group_sync = 1<<4,
18
+ __itt_group_fsync = 1<<5,
19
+ __itt_group_jit = 1<<6,
20
+ __itt_group_model = 1<<7,
21
+ __itt_group_splitter_min = 1<<7,
22
+ __itt_group_counter = 1<<8,
23
+ __itt_group_frame = 1<<9,
24
+ __itt_group_stitch = 1<<10,
25
+ __itt_group_heap = 1<<11,
26
+ __itt_group_splitter_max = 1<<12,
27
+ __itt_group_structure = 1<<12,
28
+ __itt_group_suppress = 1<<13,
29
+ __itt_group_arrays = 1<<14,
30
+ __itt_group_module = 1<<15,
31
+ __itt_group_all = -1
32
+ } __itt_group_id;
33
+
34
+ #pragma pack(push, 8)
35
+
36
+ typedef struct ___itt_group_list
37
+ {
38
+ __itt_group_id id;
39
+ const char* name;
40
+ } __itt_group_list;
41
+
42
+ #pragma pack(pop)
43
+
44
+ #define ITT_GROUP_LIST(varname) \
45
+ static __itt_group_list varname[] = { \
46
+ { __itt_group_all, "all" }, \
47
+ { __itt_group_control, "control" }, \
48
+ { __itt_group_thread, "thread" }, \
49
+ { __itt_group_mark, "mark" }, \
50
+ { __itt_group_sync, "sync" }, \
51
+ { __itt_group_fsync, "fsync" }, \
52
+ { __itt_group_jit, "jit" }, \
53
+ { __itt_group_model, "model" }, \
54
+ { __itt_group_counter, "counter" }, \
55
+ { __itt_group_frame, "frame" }, \
56
+ { __itt_group_stitch, "stitch" }, \
57
+ { __itt_group_heap, "heap" }, \
58
+ { __itt_group_structure, "structure" }, \
59
+ { __itt_group_suppress, "suppress" }, \
60
+ { __itt_group_arrays, "arrays" }, \
61
+ { __itt_group_module, "module" }, \
62
+ { __itt_group_none, NULL } \
63
+ }
64
+
65
+ #endif /* _ITTNOTIFY_TYPES_H_ */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/jitprofiling.h ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+
7
+ #ifndef __JITPROFILING_H__
8
+ #define __JITPROFILING_H__
9
+
10
+ /**
11
+ * @brief JIT Profiling APIs
12
+ *
13
+ * The JIT Profiling API is used to report information about just-in-time
14
+ * generated code that can be used by performance tools. The user inserts
15
+ * calls in the code generator to report information before JIT-compiled
16
+ * code goes to execution. This information is collected at runtime and used
17
+ * by tools like Intel(R) VTune(TM) Amplifier to display performance metrics
18
+ * associated with JIT-compiled code.
19
+ *
20
+ * These APIs can be used to\n
21
+ * - **Profile trace-based and method-based JIT-compiled
22
+ * code**. Some examples of environments that you can profile with these APIs:
23
+ * dynamic JIT compilation of JavaScript code traces, JIT execution in OpenCL(TM)
24
+ * software technology, Java/.NET managed execution environments, and custom
25
+ * ISV JIT engines.
26
+ * @code
27
+ * #include <jitprofiling.h>
28
+ *
29
+ * if (iJIT_IsProfilingActive != iJIT_SAMPLING_ON) {
30
+ * return;
31
+ * }
32
+ *
33
+ * iJIT_Method_Load jmethod = {0};
34
+ * jmethod.method_id = iJIT_GetNewMethodID();
35
+ * jmethod.method_name = "method_name";
36
+ * jmethod.class_file_name = "class_name";
37
+ * jmethod.source_file_name = "source_file_name";
38
+ * jmethod.method_load_address = code_addr;
39
+ * jmethod.method_size = code_size;
40
+ *
41
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&jmethod);
42
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_SHUTDOWN, NULL);
43
+ * @endcode
44
+ *
45
+ * * Expected behavior:
46
+ * * If any iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event overwrites an
47
+ * already reported method, then such a method becomes invalid and its
48
+ * memory region is treated as unloaded. VTune Amplifier displays the metrics
49
+ * collected by the method until it is overwritten.
50
+ * * If supplied line number information contains multiple source lines for
51
+ * the same assembly instruction (code location), then VTune Amplifier picks up
52
+ * the first line number.
53
+ * * Dynamically generated code can be associated with a module name.
54
+ * Use the iJIT_Method_Load_V2 structure.\n
55
+ * Clarification of some cases:
56
+ * * If you register a function with the same method ID multiple times,
57
+ * specifying different module names, then the VTune Amplifier picks up
58
+ * the module name registered first. If you want to distinguish the same
59
+ * function between different JIT engines, supply different method IDs for
60
+ * each function. Other symbolic information (for example, source file)
61
+ * can be identical.
62
+ *
63
+ * - **Analyze split functions** (multiple joint or disjoint code regions
64
+ * belonging to the same function) **including re-JIT**
65
+ * with potential overlapping of code regions in time, which is common in
66
+ * resource-limited environments.
67
+ * @code
68
+ * #include <jitprofiling.h>
69
+ *
70
+ * unsigned int method_id = iJIT_GetNewMethodID();
71
+ *
72
+ * iJIT_Method_Load a = {0};
73
+ * a.method_id = method_id;
74
+ * a.method_load_address = 0x100;
75
+ * a.method_size = 0x20;
76
+ *
77
+ * iJIT_Method_Load b = {0};
78
+ * b.method_id = method_id;
79
+ * b.method_load_address = 0x200;
80
+ * b.method_size = 0x30;
81
+ *
82
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&a);
83
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&b);
84
+ * @endcode
85
+ *
86
+ * * Expected behaviour:
87
+ * * If a iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event overwrites an
88
+ * already reported method, then such a method becomes invalid and
89
+ * its memory region is treated as unloaded.
90
+ * * All code regions reported with the same method ID are considered as
91
+ * belonging to the same method. Symbolic information (method name,
92
+ * source file name) will be taken from the first notification, and all
93
+ * subsequent notifications with the same method ID will be processed
94
+ * only for line number table information. So, the VTune Amplifier will map
95
+ * samples to a source line using the line number table from the current
96
+ * notification while taking the source file name from the very first one.\n
97
+ * Clarification of some cases:\n
98
+ * * If you register a second code region with a different source file
99
+ * name and the same method ID, then this information will be saved and
100
+ * will not be considered as an extension of the first code region, but
101
+ * VTune Amplifier will use the source file of the first code region and map
102
+ * performance metrics incorrectly.
103
+ * * If you register a second code region with the same source file as
104
+ * for the first region and the same method ID, then the source file will be
105
+ * discarded but VTune Amplifier will map metrics to the source file correctly.
106
+ * * If you register a second code region with a null source file and
107
+ * the same method ID, then provided line number info will be associated
108
+ * with the source file of the first code region.
109
+ *
110
+ * - **Explore inline functions** including multi-level hierarchy of
111
+ * nested inline methods which shows how performance metrics are distributed through them.
112
+ * @code
113
+ * #include <jitprofiling.h>
114
+ *
115
+ * // method_id parent_id
116
+ * // [-- c --] 3000 2000
117
+ * // [---- d -----] 2001 1000
118
+ * // [---- b ----] 2000 1000
119
+ * // [------------ a ----------------] 1000 n/a
120
+ *
121
+ * iJIT_Method_Load a = {0};
122
+ * a.method_id = 1000;
123
+ *
124
+ * iJIT_Method_Inline_Load b = {0};
125
+ * b.method_id = 2000;
126
+ * b.parent_method_id = 1000;
127
+ *
128
+ * iJIT_Method_Inline_Load c = {0};
129
+ * c.method_id = 3000;
130
+ * c.parent_method_id = 2000;
131
+ *
132
+ * iJIT_Method_Inline_Load d = {0};
133
+ * d.method_id = 2001;
134
+ * d.parent_method_id = 1000;
135
+ *
136
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&a);
137
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED, (void*)&b);
138
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED, (void*)&c);
139
+ * iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED, (void*)&d);
140
+ * @endcode
141
+ *
142
+ * * Requirements:
143
+ * * Each inline (iJIT_Method_Inline_Load) method should be associated
144
+ * with two method IDs: one for itself; one for its immediate parent.
145
+ * * Address regions of inline methods of the same parent method cannot
146
+ * overlap each other.
147
+ * * Execution of the parent method must not be started until it and all
148
+ * its inline methods are reported.
149
+ * * Expected behaviour:
150
+ * * In case of nested inline methods an order of
151
+ * iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED events is not important.
152
+ * * If any event overwrites either inline method or top parent method,
153
+ * then the parent, including inline methods, becomes invalid and its memory
154
+ * region is treated as unloaded.
155
+ *
156
+ * **Life time of allocated data**\n
157
+ * The client sends an event notification to the agent with event-specific
158
+ * data, which is a structure. The pointers in the structure refer to memory
159
+ * allocated by the client, which responsible for releasing it. The pointers are
160
+ * used by the iJIT_NotifyEvent method to copy client's data in a trace file,
161
+ * and they are not used after the iJIT_NotifyEvent method returns.
162
+ */
163
+
164
+ /**
165
+ * @defgroup jitapi JIT Profiling
166
+ * @ingroup internal
167
+ * @{
168
+ */
169
+
170
+ /**
171
+ * @brief Enumerator for the types of notifications
172
+ */
173
+ typedef enum iJIT_jvm_event
174
+ {
175
+ iJVM_EVENT_TYPE_SHUTDOWN = 2, /**<\brief Send this to shutdown the agent.
176
+ * Use NULL for event data. */
177
+
178
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED = 13, /**<\brief Send when dynamic code is
179
+ * JIT compiled and loaded into
180
+ * memory by the JIT engine, but
181
+ * before the code is executed.
182
+ * Use iJIT_Method_Load as event
183
+ * data. */
184
+ /** @cond exclude_from_documentation */
185
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START, /**<\brief Send when compiled dynamic
186
+ * code is being unloaded from memory.
187
+ * Use iJIT_Method_Load as event data.*/
188
+ /** @endcond */
189
+
190
+ iJVM_EVENT_TYPE_METHOD_UPDATE, /**<\brief Send to provide new content for
191
+ * a previously reported dynamic code.
192
+ * The previous content will be invalidated
193
+ * starting from the time of the notification.
194
+ * Use iJIT_Method_Load as event data but
195
+ * required fields are following:
196
+ * - method_id identify the code to update.
197
+ * - method_load_address specify start address
198
+ * within identified code range
199
+ * where update should be started.
200
+ * - method_size specify length of updated code
201
+ * range. */
202
+
203
+
204
+ iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED, /**<\brief Send when an inline dynamic
205
+ * code is JIT compiled and loaded
206
+ * into memory by the JIT engine,
207
+ * but before the parent code region
208
+ * starts executing.
209
+ * Use iJIT_Method_Inline_Load as event data.*/
210
+
211
+ /** @cond exclude_from_documentation */
212
+ iJVM_EVENT_TYPE_METHOD_UPDATE_V2,
213
+ /** @endcond */
214
+
215
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V2 = 21, /**<\brief Send when a dynamic code is
216
+ * JIT compiled and loaded into
217
+ * memory by the JIT engine, but
218
+ * before the code is executed.
219
+ * Use iJIT_Method_Load_V2 as event data. */
220
+
221
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V3 /**<\brief Send when a dynamic code is
222
+ * JIT compiled and loaded into
223
+ * memory by the JIT engine, but
224
+ * before the code is executed.
225
+ * Use iJIT_Method_Load_V3 as event data. */
226
+ } iJIT_JVM_EVENT;
227
+
228
+ /**
229
+ * @brief Enumerator for the agent's mode
230
+ */
231
+ typedef enum _iJIT_IsProfilingActiveFlags
232
+ {
233
+ iJIT_NOTHING_RUNNING = 0x0000, /**<\brief The agent is not running;
234
+ * iJIT_NotifyEvent calls will
235
+ * not be processed. */
236
+ iJIT_SAMPLING_ON = 0x0001, /**<\brief The agent is running and
237
+ * ready to process notifications. */
238
+ } iJIT_IsProfilingActiveFlags;
239
+
240
+ /**
241
+ * @brief Description of a single entry in the line number information of a code region.
242
+ * @details A table of line number entries gives information about how the reported code region
243
+ * is mapped to source file.
244
+ * Intel(R) VTune(TM) Amplifier uses line number information to attribute
245
+ * the samples (virtual address) to a line number. \n
246
+ * It is acceptable to report different code addresses for the same source line:
247
+ * @code
248
+ * Offset LineNumber
249
+ * 1 2
250
+ * 12 4
251
+ * 15 2
252
+ * 18 1
253
+ * 21 30
254
+ *
255
+ * VTune Amplifier constructs the following table using the client data
256
+ *
257
+ * Code subrange Line number
258
+ * 0-1 2
259
+ * 1-12 4
260
+ * 12-15 2
261
+ * 15-18 1
262
+ * 18-21 30
263
+ * @endcode
264
+ */
265
+ typedef struct _LineNumberInfo
266
+ {
267
+ unsigned int Offset; /**<\brief Offset from the begining of the code region. */
268
+ unsigned int LineNumber; /**<\brief Matching source line number offset (from beginning of source file). */
269
+
270
+ } *pLineNumberInfo, LineNumberInfo;
271
+
272
+ /**
273
+ * @brief Enumerator for the code architecture.
274
+ */
275
+ typedef enum _iJIT_CodeArchitecture
276
+ {
277
+ iJIT_CA_NATIVE = 0, /**<\brief Native to the process architecture that is calling it. */
278
+
279
+ iJIT_CA_32, /**<\brief 32-bit machine code. */
280
+
281
+ iJIT_CA_64 /**<\brief 64-bit machine code. */
282
+
283
+ } iJIT_CodeArchitecture;
284
+
285
+ #pragma pack(push, 8)
286
+
287
+ /**
288
+ * @brief Description of a JIT-compiled method
289
+ * @details When you use the iJIT_Method_Load structure to describe
290
+ * the JIT compiled method, use iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
291
+ * as an event type to report it.
292
+ */
293
+ typedef struct _iJIT_Method_Load
294
+ {
295
+ unsigned int method_id; /**<\brief Unique method ID. Cannot be 0.
296
+ * You must either use the API function
297
+ * iJIT_GetNewMethodID to get a valid and unique
298
+ * method ID, or else manage ID uniqueness
299
+ * and correct range by yourself.\n
300
+ * You must use the same method ID for all code
301
+ * regions of the same method, otherwise different
302
+ * method IDs specify different methods. */
303
+
304
+ char* method_name; /**<\brief The name of the method. It can be optionally
305
+ * prefixed with its class name and appended with
306
+ * its complete signature. Can't be NULL. */
307
+
308
+ void* method_load_address; /**<\brief The start virtual address of the method code
309
+ * region. If NULL, data provided with
310
+ * event are not accepted. */
311
+
312
+ unsigned int method_size; /**<\brief The code size of the method in memory.
313
+ * If 0, then data provided with the event are not
314
+ * accepted. */
315
+
316
+ unsigned int line_number_size; /**<\brief The number of entries in the line number
317
+ * table.0 if none. */
318
+
319
+ pLineNumberInfo line_number_table; /**<\brief Pointer to the line numbers info
320
+ * array. Can be NULL if
321
+ * line_number_size is 0. See
322
+ * LineNumberInfo Structure for a
323
+ * description of a single entry in
324
+ * the line number info array */
325
+
326
+ unsigned int class_id; /**<\brief This field is obsolete. */
327
+
328
+ char* class_file_name; /**<\brief Class name. Can be NULL.*/
329
+
330
+ char* source_file_name; /**<\brief Source file name. Can be NULL.*/
331
+
332
+ } *piJIT_Method_Load, iJIT_Method_Load;
333
+
334
+ /**
335
+ * @brief Description of a JIT-compiled method
336
+ * @details When you use the iJIT_Method_Load_V2 structure to describe
337
+ * the JIT compiled method, use iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V2
338
+ * as an event type to report it.
339
+ */
340
+ typedef struct _iJIT_Method_Load_V2
341
+ {
342
+ unsigned int method_id; /**<\brief Unique method ID. Cannot be 0.
343
+ * You must either use the API function
344
+ * iJIT_GetNewMethodID to get a valid and unique
345
+ * method ID, or else manage ID uniqueness
346
+ * and correct range by yourself.\n
347
+ * You must use the same method ID for all code
348
+ * regions of the same method, otherwise different
349
+ * method IDs specify different methods. */
350
+
351
+ char* method_name; /**<\brief The name of the method. It can be optionally
352
+ * prefixed with its class name and appended with
353
+ * its complete signature. Can't be NULL. */
354
+
355
+ void* method_load_address; /**<\brief The start virtual address of the method code
356
+ * region. If NULL, then data provided with the
357
+ * event are not accepted. */
358
+
359
+ unsigned int method_size; /**<\brief The code size of the method in memory.
360
+ * If 0, then data provided with the event are not
361
+ * accepted. */
362
+
363
+ unsigned int line_number_size; /**<\brief The number of entries in the line number
364
+ * table. 0 if none. */
365
+
366
+ pLineNumberInfo line_number_table; /**<\brief Pointer to the line numbers info
367
+ * array. Can be NULL if
368
+ * line_number_size is 0. See
369
+ * LineNumberInfo Structure for a
370
+ * description of a single entry in
371
+ * the line number info array. */
372
+
373
+ char* class_file_name; /**<\brief Class name. Can be NULL. */
374
+
375
+ char* source_file_name; /**<\brief Source file name. Can be NULL. */
376
+
377
+ char* module_name; /**<\brief Module name. Can be NULL.
378
+ The module name can be useful for distinguishing among
379
+ different JIT engines. VTune Amplifier will display
380
+ reported methods grouped by specific module. */
381
+
382
+ } *piJIT_Method_Load_V2, iJIT_Method_Load_V2;
383
+
384
+ /**
385
+ * @brief Description of a JIT-compiled method
386
+ * @details The iJIT_Method_Load_V3 structure is the same as iJIT_Method_Load_V2
387
+ * with a newly introduced 'arch' field that specifies architecture of the code region.
388
+ * When you use the iJIT_Method_Load_V3 structure to describe
389
+ * the JIT compiled method, use iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V3
390
+ * as an event type to report it.
391
+ */
392
+ typedef struct _iJIT_Method_Load_V3
393
+ {
394
+ unsigned int method_id; /**<\brief Unique method ID. Cannot be 0.
395
+ * You must either use the API function
396
+ * iJIT_GetNewMethodID to get a valid and unique
397
+ * method ID, or manage ID uniqueness
398
+ * and correct range by yourself.\n
399
+ * You must use the same method ID for all code
400
+ * regions of the same method, otherwise they are
401
+ * treated as regions of different methods. */
402
+
403
+ char* method_name; /**<\brief The name of the method. It can be optionally
404
+ * prefixed with its class name and appended with
405
+ * its complete signature. Cannot be NULL. */
406
+
407
+ void* method_load_address; /**<\brief The start virtual address of the method code
408
+ * region. If NULL, then data provided with the
409
+ * event are not accepted. */
410
+
411
+ unsigned int method_size; /**<\brief The code size of the method in memory.
412
+ * If 0, then data provided with the event are not
413
+ * accepted. */
414
+
415
+ unsigned int line_number_size; /**<\brief The number of entries in the line number
416
+ * table. 0 if none. */
417
+
418
+ pLineNumberInfo line_number_table; /**<\brief Pointer to the line numbers info
419
+ * array. Can be NULL if
420
+ * line_number_size is 0. See
421
+ * LineNumberInfo Structure for a
422
+ * description of a single entry in
423
+ * the line number info array. */
424
+
425
+ char* class_file_name; /**<\brief Class name. Can be NULL. */
426
+
427
+ char* source_file_name; /**<\brief Source file name. Can be NULL. */
428
+
429
+ char* module_name; /**<\brief Module name. Can be NULL.
430
+ * The module name can be useful for distinguishing among
431
+ * different JIT engines. VTune Amplifier will display
432
+ * reported methods grouped by specific module. */
433
+
434
+ iJIT_CodeArchitecture module_arch; /**<\brief Architecture of the method's code region.
435
+ * By default, it is the same as the process
436
+ * architecture that is calling it.
437
+ * For example, you can use it if your 32-bit JIT
438
+ * engine generates 64-bit code.
439
+ *
440
+ * If JIT engine reports both 32-bit and 64-bit types
441
+ * of methods then VTune Amplifier splits the methods
442
+ * with the same module name but with different
443
+ * architectures in two different modules. VTune Amplifier
444
+ * modifies the original name provided with a 64-bit method
445
+ * version by ending it with '(64)' */
446
+
447
+ } *piJIT_Method_Load_V3, iJIT_Method_Load_V3;
448
+
449
+ /**
450
+ * @brief Description of an inline JIT-compiled method
451
+ * @details When you use the_iJIT_Method_Inline_Load structure to describe
452
+ * the JIT compiled method, use iJVM_EVENT_TYPE_METHOD_INLINE_LOAD_FINISHED
453
+ * as an event type to report it.
454
+ */
455
+ typedef struct _iJIT_Method_Inline_Load
456
+ {
457
+ unsigned int method_id; /**<\brief Unique method ID. Cannot be 0.
458
+ * You must either use the API function
459
+ * iJIT_GetNewMethodID to get a valid and unique
460
+ * method ID, or else manage ID uniqueness
461
+ * and correct range by yourself. */
462
+
463
+ unsigned int parent_method_id; /**<\brief Unique immediate parent's method ID.
464
+ * Cannot be 0.
465
+ * You must either use the API function
466
+ * iJIT_GetNewMethodID to get a valid and unique
467
+ * method ID, or else manage ID uniqueness
468
+ * and correct range by yourself. */
469
+
470
+ char* method_name; /**<\brief The name of the method. It can be optionally
471
+ * prefixed with its class name and appended with
472
+ * its complete signature. Can't be NULL. */
473
+
474
+ void* method_load_address; /** <\brief The virtual address on which the method
475
+ * is inlined. If NULL, then data provided with
476
+ * the event are not accepted. */
477
+
478
+ unsigned int method_size; /**<\brief The code size of the method in memory.
479
+ * If 0, then data provided with the event are not
480
+ * accepted. */
481
+
482
+ unsigned int line_number_size; /**<\brief The number of entries in the line number
483
+ * table. 0 if none. */
484
+
485
+ pLineNumberInfo line_number_table; /**<\brief Pointer to the line numbers info
486
+ * array. Can be NULL if
487
+ * line_number_size is 0. See
488
+ * LineNumberInfo Structure for a
489
+ * description of a single entry in
490
+ * the line number info array */
491
+
492
+ char* class_file_name; /**<\brief Class name. Can be NULL.*/
493
+
494
+ char* source_file_name; /**<\brief Source file name. Can be NULL.*/
495
+
496
+ } *piJIT_Method_Inline_Load, iJIT_Method_Inline_Load;
497
+
498
+ /** @cond exclude_from_documentation */
499
+ /**
500
+ * @brief Description of a segment type
501
+ * @details Use the segment type to specify a type of data supplied
502
+ * with the iJVM_EVENT_TYPE_METHOD_UPDATE_V2 event to be applied to
503
+ * a certain code trace.
504
+ */
505
+ typedef enum _iJIT_SegmentType
506
+ {
507
+ iJIT_CT_UNKNOWN = 0,
508
+
509
+ iJIT_CT_CODE, /**<\brief Executable code. */
510
+
511
+ iJIT_CT_DATA, /**<\brief Data (not executable code).
512
+ * VTune Amplifier uses the format string
513
+ * (see iJIT_Method_Update) to represent
514
+ * this data in the VTune Amplifier GUI */
515
+
516
+ iJIT_CT_KEEP, /**<\brief Use the previous markup for the trace.
517
+ * Can be used for the following
518
+ * iJVM_EVENT_TYPE_METHOD_UPDATE_V2 events,
519
+ * if the type of the previously reported segment
520
+ * type is the same. */
521
+ iJIT_CT_EOF
522
+ } iJIT_SegmentType;
523
+
524
+ /**
525
+ * @brief Description of a dynamic update of the content within JIT-compiled method
526
+ * @details The JIT engine may generate the methods that are updated at runtime
527
+ * partially by mixed (data + executable code) content. When you use the iJIT_Method_Update
528
+ * structure to describe the update of the content within a JIT-compiled method,
529
+ * use iJVM_EVENT_TYPE_METHOD_UPDATE_V2 as an event type to report it.
530
+ *
531
+ * On the first Update event, VTune Amplifier copies the original code range reported by
532
+ * the iJVM_EVENT_TYPE_METHOD_LOAD event, then modifies it with the supplied bytes and
533
+ * adds the modified range to the original method. For next update events, VTune Amplifier
534
+ * does the same but it uses the latest modified version of a code region for update.
535
+ * Eventually, VTune Amplifier GUI displays multiple code ranges for the method reported by
536
+ * the iJVM_EVENT_TYPE_METHOD_LOAD event.
537
+ * Notes:
538
+ * - Multiple update events with different types for the same trace are allowed
539
+ * but they must be reported for the same code ranges.
540
+ * Example,
541
+ * @code
542
+ * [-- data---] Allowed
543
+ * [-- code --] Allowed
544
+ * [code] Ignored
545
+ * [-- data---] Allowed
546
+ * [-- code --] Allowed
547
+ * [------------ trace ---------]
548
+ * @endcode
549
+ * - The types of previously reported events can be changed but they must be reported
550
+ * for the same code ranges.
551
+ * Example,
552
+ * @code
553
+ * [-- data---] Allowed
554
+ * [-- code --] Allowed
555
+ * [-- data---] Allowed
556
+ * [-- code --] Allowed
557
+ * [------------ trace ---------]
558
+ * @endcode
559
+ */
560
+
561
+ typedef struct _iJIT_Method_Update
562
+ {
563
+ void* load_address; /**<\brief Start address of the update within a method */
564
+
565
+ unsigned int size; /**<\brief The update size */
566
+
567
+ iJIT_SegmentType type; /**<\brief Type of the update */
568
+
569
+ const char* data_format; /**<\brief C string that contains a format string
570
+ * that follows the same specifications as format in printf.
571
+ * The format string is used for iJIT_CT_CODE only
572
+ * and cannot be NULL.
573
+ * Format can be changed on the fly. */
574
+ } *piJIT_Method_Update, iJIT_Method_Update;
575
+
576
+ /** @endcond */
577
+
578
+ #pragma pack(pop)
579
+
580
+ /** @cond exclude_from_documentation */
581
+ #ifdef __cplusplus
582
+ extern "C" {
583
+ #endif /* __cplusplus */
584
+
585
+ #ifndef JITAPI_CDECL
586
+ # if defined WIN32 || defined _WIN32
587
+ # define JITAPI_CDECL __cdecl
588
+ # else /* defined WIN32 || defined _WIN32 */
589
+ # if defined _M_IX86 || defined __i386__
590
+ # define JITAPI_CDECL __attribute__ ((cdecl))
591
+ # else /* _M_IX86 || __i386__ */
592
+ # define JITAPI_CDECL /* actual only on x86_64 platform */
593
+ # endif /* _M_IX86 || __i386__ */
594
+ # endif /* defined WIN32 || defined _WIN32 */
595
+ #endif /* JITAPI_CDECL */
596
+
597
+ #define JITAPI JITAPI_CDECL
598
+ /** @endcond */
599
+
600
+ /**
601
+ * @brief Generates a new unique method ID.
602
+ *
603
+ * You must use this API to obtain unique and valid method IDs for methods or
604
+ * traces reported to the agent if you don't have your own mechanism to generate
605
+ * unique method IDs.
606
+ *
607
+ * @return a new unique method ID. When out of unique method IDs, this API
608
+ * returns 0, which is not an accepted value.
609
+ */
610
+ unsigned int JITAPI iJIT_GetNewMethodID(void);
611
+
612
+ /**
613
+ * @brief Returns the current mode of the agent.
614
+ *
615
+ * @return iJIT_SAMPLING_ON, indicating that agent is running, or
616
+ * iJIT_NOTHING_RUNNING if no agent is running.
617
+ */
618
+ iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
619
+
620
+ /**
621
+ * @brief Reports infomation about JIT-compiled code to the agent.
622
+ *
623
+ * The reported information is used to attribute samples obtained from any
624
+ * Intel(R) VTune(TM) Amplifier collector. This API needs to be called
625
+ * after JIT compilation and before the first entry into the JIT-compiled
626
+ * code.
627
+ *
628
+ * @param[in] event_type - type of the data sent to the agent
629
+ * @param[in] EventSpecificData - pointer to event-specific data
630
+ *
631
+ * @returns 1 on success, otherwise 0.
632
+ */
633
+ int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
634
+
635
+ #ifdef __cplusplus
636
+ }
637
+ #endif /* __cplusplus */
638
+ /** @endcond */
639
+
640
+ /** @} jitapi group */
641
+
642
+ #endif /* __JITPROFILING_H__ */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/ittnotify/legacy/ittnotify.h ADDED
@@ -0,0 +1,992 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (C) 2005-2019 Intel Corporation
3
+
4
+ SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
5
+ */
6
+ #ifndef _LEGACY_ITTNOTIFY_H_
7
+ #define _LEGACY_ITTNOTIFY_H_
8
+
9
+ /**
10
+ * @file
11
+ * @brief Legacy User API functions and types
12
+ */
13
+
14
+ /** @cond exclude_from_documentation */
15
+ #ifndef ITT_OS_WIN
16
+ # define ITT_OS_WIN 1
17
+ #endif /* ITT_OS_WIN */
18
+
19
+ #ifndef ITT_OS_LINUX
20
+ # define ITT_OS_LINUX 2
21
+ #endif /* ITT_OS_LINUX */
22
+
23
+ #ifndef ITT_OS_MAC
24
+ # define ITT_OS_MAC 3
25
+ #endif /* ITT_OS_MAC */
26
+
27
+ #ifndef ITT_OS_FREEBSD
28
+ # define ITT_OS_FREEBSD 4
29
+ #endif /* ITT_OS_FREEBSD */
30
+
31
+ #ifndef ITT_OS
32
+ # if defined WIN32 || defined _WIN32
33
+ # define ITT_OS ITT_OS_WIN
34
+ # elif defined( __APPLE__ ) && defined( __MACH__ )
35
+ # define ITT_OS ITT_OS_MAC
36
+ # elif defined( __FreeBSD__ )
37
+ # define ITT_OS ITT_OS_FREEBSD
38
+ # else
39
+ # define ITT_OS ITT_OS_LINUX
40
+ # endif
41
+ #endif /* ITT_OS */
42
+
43
+ #ifndef ITT_PLATFORM_WIN
44
+ # define ITT_PLATFORM_WIN 1
45
+ #endif /* ITT_PLATFORM_WIN */
46
+
47
+ #ifndef ITT_PLATFORM_POSIX
48
+ # define ITT_PLATFORM_POSIX 2
49
+ #endif /* ITT_PLATFORM_POSIX */
50
+
51
+ #ifndef ITT_PLATFORM_MAC
52
+ # define ITT_PLATFORM_MAC 3
53
+ #endif /* ITT_PLATFORM_MAC */
54
+
55
+ #ifndef ITT_PLATFORM_FREEBSD
56
+ # define ITT_PLATFORM_FREEBSD 4
57
+ #endif /* ITT_PLATFORM_FREEBSD */
58
+
59
+ #ifndef ITT_PLATFORM
60
+ # if ITT_OS==ITT_OS_WIN
61
+ # define ITT_PLATFORM ITT_PLATFORM_WIN
62
+ # elif ITT_OS==ITT_OS_MAC
63
+ # define ITT_PLATFORM ITT_PLATFORM_MAC
64
+ # elif ITT_OS==ITT_OS_FREEBSD
65
+ # define ITT_PLATFORM ITT_PLATFORM_FREEBSD
66
+ # else
67
+ # define ITT_PLATFORM ITT_PLATFORM_POSIX
68
+ # endif
69
+ #endif /* ITT_PLATFORM */
70
+
71
+ #if defined(_UNICODE) && !defined(UNICODE)
72
+ #define UNICODE
73
+ #endif
74
+
75
+ #include <stddef.h>
76
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
77
+ #include <tchar.h>
78
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
79
+ #include <stdint.h>
80
+ #if defined(UNICODE) || defined(_UNICODE)
81
+ #include <wchar.h>
82
+ #endif /* UNICODE || _UNICODE */
83
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
84
+
85
+ #ifndef ITTAPI_CDECL
86
+ # if ITT_PLATFORM==ITT_PLATFORM_WIN
87
+ # define ITTAPI_CDECL __cdecl
88
+ # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
89
+ # if defined _M_IX86 || defined __i386__
90
+ # define ITTAPI_CDECL __attribute__ ((cdecl))
91
+ # else /* _M_IX86 || __i386__ */
92
+ # define ITTAPI_CDECL /* actual only on x86 platform */
93
+ # endif /* _M_IX86 || __i386__ */
94
+ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
95
+ #endif /* ITTAPI_CDECL */
96
+
97
+ #ifndef STDCALL
98
+ # if ITT_PLATFORM==ITT_PLATFORM_WIN
99
+ # define STDCALL __stdcall
100
+ # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
101
+ # if defined _M_IX86 || defined __i386__
102
+ # define STDCALL __attribute__ ((stdcall))
103
+ # else /* _M_IX86 || __i386__ */
104
+ # define STDCALL /* supported only on x86 platform */
105
+ # endif /* _M_IX86 || __i386__ */
106
+ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
107
+ #endif /* STDCALL */
108
+
109
+ #define ITTAPI ITTAPI_CDECL
110
+ #define LIBITTAPI ITTAPI_CDECL
111
+
112
+ /* TODO: Temporary for compatibility! */
113
+ #define ITTAPI_CALL ITTAPI_CDECL
114
+ #define LIBITTAPI_CALL ITTAPI_CDECL
115
+
116
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
117
+ /* use __forceinline (VC++ specific) */
118
+ #if defined(__MINGW32__) && !defined(__cplusplus)
119
+ #define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__))
120
+ #else
121
+ #define ITT_INLINE static __forceinline
122
+ #endif /* __MINGW32__ */
123
+
124
+ #define ITT_INLINE_ATTRIBUTE /* nothing */
125
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
126
+ /*
127
+ * Generally, functions are not inlined unless optimization is specified.
128
+ * For functions declared inline, this attribute inlines the function even
129
+ * if no optimization level was specified.
130
+ */
131
+ #ifdef __STRICT_ANSI__
132
+ #define ITT_INLINE static
133
+ #define ITT_INLINE_ATTRIBUTE __attribute__((unused))
134
+ #else /* __STRICT_ANSI__ */
135
+ #define ITT_INLINE static inline
136
+ #define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused))
137
+ #endif /* __STRICT_ANSI__ */
138
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
139
+ /** @endcond */
140
+
141
+ /** @cond exclude_from_documentation */
142
+ /* Helper macro for joining tokens */
143
+ #define ITT_JOIN_AUX(p,n) p##n
144
+ #define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n)
145
+
146
+ #ifdef ITT_MAJOR
147
+ #undef ITT_MAJOR
148
+ #endif
149
+ #ifdef ITT_MINOR
150
+ #undef ITT_MINOR
151
+ #endif
152
+ #define ITT_MAJOR 3
153
+ #define ITT_MINOR 0
154
+
155
+ /* Standard versioning of a token with major and minor version numbers */
156
+ #define ITT_VERSIONIZE(x) \
157
+ ITT_JOIN(x, \
158
+ ITT_JOIN(_, \
159
+ ITT_JOIN(ITT_MAJOR, \
160
+ ITT_JOIN(_, ITT_MINOR))))
161
+
162
+ #ifndef INTEL_ITTNOTIFY_PREFIX
163
+ # define INTEL_ITTNOTIFY_PREFIX __itt_
164
+ #endif /* INTEL_ITTNOTIFY_PREFIX */
165
+ #ifndef INTEL_ITTNOTIFY_POSTFIX
166
+ # define INTEL_ITTNOTIFY_POSTFIX _ptr_
167
+ #endif /* INTEL_ITTNOTIFY_POSTFIX */
168
+
169
+ #define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
170
+ #define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)))
171
+
172
+ #define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)
173
+ #define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)
174
+
175
+ #define ITTNOTIFY_VOID_D0(n,d) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d)
176
+ #define ITTNOTIFY_VOID_D1(n,d,x) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x)
177
+ #define ITTNOTIFY_VOID_D2(n,d,x,y) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y)
178
+ #define ITTNOTIFY_VOID_D3(n,d,x,y,z) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z)
179
+ #define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)
180
+ #define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)
181
+ #define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)
182
+ #define ITTNOTIFY_DATA_D0(n,d) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d)
183
+ #define ITTNOTIFY_DATA_D1(n,d,x) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x)
184
+ #define ITTNOTIFY_DATA_D2(n,d,x,y) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y)
185
+ #define ITTNOTIFY_DATA_D3(n,d,x,y,z) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z)
186
+ #define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)
187
+ #define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)
188
+ #define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)
189
+
190
+ #ifdef ITT_STUB
191
+ #undef ITT_STUB
192
+ #endif
193
+ #ifdef ITT_STUBV
194
+ #undef ITT_STUBV
195
+ #endif
196
+ #define ITT_STUBV(api,type,name,args) \
197
+ typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \
198
+ extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);
199
+ #define ITT_STUB ITT_STUBV
200
+ /** @endcond */
201
+
202
+ #ifdef __cplusplus
203
+ extern "C" {
204
+ #endif /* __cplusplus */
205
+
206
+ /**
207
+ * @defgroup legacy Legacy API
208
+ * @{
209
+ * @}
210
+ */
211
+
212
+ /**
213
+ * @defgroup legacy_control Collection Control
214
+ * @ingroup legacy
215
+ * General behavior: application continues to run, but no profiling information is being collected
216
+ *
217
+ * Pausing occurs not only for the current thread but for all process as well as spawned processes
218
+ * - Intel(R) Parallel Inspector and Intel(R) Inspector XE:
219
+ * - Does not analyze or report errors that involve memory access.
220
+ * - Other errors are reported as usual. Pausing data collection in
221
+ * Intel(R) Parallel Inspector and Intel(R) Inspector XE
222
+ * only pauses tracing and analyzing memory access.
223
+ * It does not pause tracing or analyzing threading APIs.
224
+ * .
225
+ * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE:
226
+ * - Does continue to record when new threads are started.
227
+ * .
228
+ * - Other effects:
229
+ * - Possible reduction of runtime overhead.
230
+ * .
231
+ * @{
232
+ */
233
+ #ifndef _ITTNOTIFY_H_
234
+ /** @brief Pause collection */
235
+ void ITTAPI __itt_pause(void);
236
+ /** @brief Resume collection */
237
+ void ITTAPI __itt_resume(void);
238
+ /** @brief Detach collection */
239
+ void ITTAPI __itt_detach(void);
240
+
241
+ /** @cond exclude_from_documentation */
242
+ #ifndef INTEL_NO_MACRO_BODY
243
+ #ifndef INTEL_NO_ITTNOTIFY_API
244
+ ITT_STUBV(ITTAPI, void, pause, (void))
245
+ ITT_STUBV(ITTAPI, void, resume, (void))
246
+ ITT_STUBV(ITTAPI, void, detach, (void))
247
+ #define __itt_pause ITTNOTIFY_VOID(pause)
248
+ #define __itt_pause_ptr ITTNOTIFY_NAME(pause)
249
+ #define __itt_resume ITTNOTIFY_VOID(resume)
250
+ #define __itt_resume_ptr ITTNOTIFY_NAME(resume)
251
+ #define __itt_detach ITTNOTIFY_VOID(detach)
252
+ #define __itt_detach_ptr ITTNOTIFY_NAME(detach)
253
+ #else /* INTEL_NO_ITTNOTIFY_API */
254
+ #define __itt_pause()
255
+ #define __itt_pause_ptr 0
256
+ #define __itt_resume()
257
+ #define __itt_resume_ptr 0
258
+ #define __itt_detach()
259
+ #define __itt_detach_ptr 0
260
+ #endif /* INTEL_NO_ITTNOTIFY_API */
261
+ #else /* INTEL_NO_MACRO_BODY */
262
+ #define __itt_pause_ptr 0
263
+ #define __itt_resume_ptr 0
264
+ #define __itt_detach_ptr 0
265
+ #endif /* INTEL_NO_MACRO_BODY */
266
+ /** @endcond */
267
+ #endif /* _ITTNOTIFY_H_ */
268
+ /** @} legacy_control group */
269
+
270
+ /**
271
+ * @defgroup legacy_threads Threads
272
+ * @ingroup legacy
273
+ * Threads group
274
+ * @warning Legacy API
275
+ * @{
276
+ */
277
+ /**
278
+ * @deprecated Legacy API
279
+ * @brief Set name to be associated with thread in analysis GUI.
280
+ * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)
281
+ */
282
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
283
+ int LIBITTAPI __itt_thr_name_setA(const char *name, int namelen);
284
+ int LIBITTAPI __itt_thr_name_setW(const wchar_t *name, int namelen);
285
+ #if defined(UNICODE) || defined(_UNICODE)
286
+ # define __itt_thr_name_set __itt_thr_name_setW
287
+ # define __itt_thr_name_set_ptr __itt_thr_name_setW_ptr
288
+ #else
289
+ # define __itt_thr_name_set __itt_thr_name_setA
290
+ # define __itt_thr_name_set_ptr __itt_thr_name_setA_ptr
291
+ #endif /* UNICODE */
292
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
293
+ int LIBITTAPI __itt_thr_name_set(const char *name, int namelen);
294
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
295
+
296
+ /** @cond exclude_from_documentation */
297
+ #ifndef INTEL_NO_MACRO_BODY
298
+ #ifndef INTEL_NO_ITTNOTIFY_API
299
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
300
+ ITT_STUB(LIBITTAPI, int, thr_name_setA, (const char *name, int namelen))
301
+ ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen))
302
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
303
+ ITT_STUB(LIBITTAPI, int, thr_name_set, (const char *name, int namelen))
304
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
305
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
306
+ #define __itt_thr_name_setA ITTNOTIFY_DATA(thr_name_setA)
307
+ #define __itt_thr_name_setA_ptr ITTNOTIFY_NAME(thr_name_setA)
308
+ #define __itt_thr_name_setW ITTNOTIFY_DATA(thr_name_setW)
309
+ #define __itt_thr_name_setW_ptr ITTNOTIFY_NAME(thr_name_setW)
310
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
311
+ #define __itt_thr_name_set ITTNOTIFY_DATA(thr_name_set)
312
+ #define __itt_thr_name_set_ptr ITTNOTIFY_NAME(thr_name_set)
313
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
314
+ #else /* INTEL_NO_ITTNOTIFY_API */
315
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
316
+ #define __itt_thr_name_setA(name, namelen)
317
+ #define __itt_thr_name_setA_ptr 0
318
+ #define __itt_thr_name_setW(name, namelen)
319
+ #define __itt_thr_name_setW_ptr 0
320
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
321
+ #define __itt_thr_name_set(name, namelen)
322
+ #define __itt_thr_name_set_ptr 0
323
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
324
+ #endif /* INTEL_NO_ITTNOTIFY_API */
325
+ #else /* INTEL_NO_MACRO_BODY */
326
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
327
+ #define __itt_thr_name_setA_ptr 0
328
+ #define __itt_thr_name_setW_ptr 0
329
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
330
+ #define __itt_thr_name_set_ptr 0
331
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
332
+ #endif /* INTEL_NO_MACRO_BODY */
333
+ /** @endcond */
334
+
335
+ /**
336
+ * @deprecated Legacy API
337
+ * @brief Mark current thread as ignored from this point on, for the duration of its existence.
338
+ */
339
+ void LIBITTAPI __itt_thr_ignore(void);
340
+
341
+ /** @cond exclude_from_documentation */
342
+ #ifndef INTEL_NO_MACRO_BODY
343
+ #ifndef INTEL_NO_ITTNOTIFY_API
344
+ ITT_STUBV(LIBITTAPI, void, thr_ignore, (void))
345
+ #define __itt_thr_ignore ITTNOTIFY_VOID(thr_ignore)
346
+ #define __itt_thr_ignore_ptr ITTNOTIFY_NAME(thr_ignore)
347
+ #else /* INTEL_NO_ITTNOTIFY_API */
348
+ #define __itt_thr_ignore()
349
+ #define __itt_thr_ignore_ptr 0
350
+ #endif /* INTEL_NO_ITTNOTIFY_API */
351
+ #else /* INTEL_NO_MACRO_BODY */
352
+ #define __itt_thr_ignore_ptr 0
353
+ #endif /* INTEL_NO_MACRO_BODY */
354
+ /** @endcond */
355
+ /** @} legacy_threads group */
356
+
357
+ /**
358
+ * @defgroup legacy_sync Synchronization
359
+ * @ingroup legacy
360
+ * Synchronization group
361
+ * @warning Legacy API
362
+ * @{
363
+ */
364
+ /**
365
+ * @hideinitializer
366
+ * @brief possible value of attribute argument for sync object type
367
+ */
368
+ #define __itt_attr_barrier 1
369
+
370
+ /**
371
+ * @hideinitializer
372
+ * @brief possible value of attribute argument for sync object type
373
+ */
374
+ #define __itt_attr_mutex 2
375
+
376
+ /**
377
+ * @deprecated Legacy API
378
+ * @brief Assign a name to a sync object using char or Unicode string
379
+ * @param[in] addr - pointer to the sync object. You should use a real pointer to your object
380
+ * to make sure that the values don't clash with other object addresses
381
+ * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will
382
+ * be assumed to be of generic "User Synchronization" type
383
+ * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned
384
+ * to the object -- you can use the __itt_sync_rename call later to assign
385
+ * the name
386
+ * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the
387
+ * exact semantics of how prepare/acquired/releasing calls work.
388
+ */
389
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
390
+ void ITTAPI __itt_sync_set_nameA(void *addr, const char *objtype, const char *objname, int attribute);
391
+ void ITTAPI __itt_sync_set_nameW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute);
392
+ #if defined(UNICODE) || defined(_UNICODE)
393
+ # define __itt_sync_set_name __itt_sync_set_nameW
394
+ # define __itt_sync_set_name_ptr __itt_sync_set_nameW_ptr
395
+ #else /* UNICODE */
396
+ # define __itt_sync_set_name __itt_sync_set_nameA
397
+ # define __itt_sync_set_name_ptr __itt_sync_set_nameA_ptr
398
+ #endif /* UNICODE */
399
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
400
+ void ITTAPI __itt_sync_set_name(void *addr, const char* objtype, const char* objname, int attribute);
401
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
402
+
403
+ /** @cond exclude_from_documentation */
404
+ #ifndef INTEL_NO_MACRO_BODY
405
+ #ifndef INTEL_NO_ITTNOTIFY_API
406
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
407
+ ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char *objtype, const char *objname, int attribute))
408
+ ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute))
409
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
410
+ ITT_STUBV(ITTAPI, void, sync_set_name, (void *addr, const char *objtype, const char *objname, int attribute))
411
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
412
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
413
+ #define __itt_sync_set_nameA ITTNOTIFY_VOID(sync_set_nameA)
414
+ #define __itt_sync_set_nameA_ptr ITTNOTIFY_NAME(sync_set_nameA)
415
+ #define __itt_sync_set_nameW ITTNOTIFY_VOID(sync_set_nameW)
416
+ #define __itt_sync_set_nameW_ptr ITTNOTIFY_NAME(sync_set_nameW)
417
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
418
+ #define __itt_sync_set_name ITTNOTIFY_VOID(sync_set_name)
419
+ #define __itt_sync_set_name_ptr ITTNOTIFY_NAME(sync_set_name)
420
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
421
+ #else /* INTEL_NO_ITTNOTIFY_API */
422
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
423
+ #define __itt_sync_set_nameA(addr, objtype, objname, attribute)
424
+ #define __itt_sync_set_nameA_ptr 0
425
+ #define __itt_sync_set_nameW(addr, objtype, objname, attribute)
426
+ #define __itt_sync_set_nameW_ptr 0
427
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
428
+ #define __itt_sync_set_name(addr, objtype, objname, attribute)
429
+ #define __itt_sync_set_name_ptr 0
430
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
431
+ #endif /* INTEL_NO_ITTNOTIFY_API */
432
+ #else /* INTEL_NO_MACRO_BODY */
433
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
434
+ #define __itt_sync_set_nameA_ptr 0
435
+ #define __itt_sync_set_nameW_ptr 0
436
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
437
+ #define __itt_sync_set_name_ptr 0
438
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
439
+ #endif /* INTEL_NO_MACRO_BODY */
440
+ /** @endcond */
441
+
442
+ /**
443
+ * @deprecated Legacy API
444
+ * @brief Assign a name and type to a sync object using char or Unicode string
445
+ * @param[in] addr - pointer to the sync object. You should use a real pointer to your object
446
+ * to make sure that the values don't clash with other object addresses
447
+ * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will
448
+ * be assumed to be of generic "User Synchronization" type
449
+ * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned
450
+ * to the object -- you can use the __itt_sync_rename call later to assign
451
+ * the name
452
+ * @param[in] typelen, namelen - a length of string for appropriate objtype and objname parameter
453
+ * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the
454
+ * exact semantics of how prepare/acquired/releasing calls work.
455
+ * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)
456
+ */
457
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
458
+ int LIBITTAPI __itt_notify_sync_nameA(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute);
459
+ int LIBITTAPI __itt_notify_sync_nameW(void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute);
460
+ #if defined(UNICODE) || defined(_UNICODE)
461
+ # define __itt_notify_sync_name __itt_notify_sync_nameW
462
+ #else
463
+ # define __itt_notify_sync_name __itt_notify_sync_nameA
464
+ #endif
465
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
466
+ int LIBITTAPI __itt_notify_sync_name(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute);
467
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
468
+
469
+ /** @cond exclude_from_documentation */
470
+ #ifndef INTEL_NO_MACRO_BODY
471
+ #ifndef INTEL_NO_ITTNOTIFY_API
472
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
473
+ ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute))
474
+ ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute))
475
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
476
+ ITT_STUB(LIBITTAPI, int, notify_sync_name, (void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute))
477
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
478
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
479
+ #define __itt_notify_sync_nameA ITTNOTIFY_DATA(notify_sync_nameA)
480
+ #define __itt_notify_sync_nameA_ptr ITTNOTIFY_NAME(notify_sync_nameA)
481
+ #define __itt_notify_sync_nameW ITTNOTIFY_DATA(notify_sync_nameW)
482
+ #define __itt_notify_sync_nameW_ptr ITTNOTIFY_NAME(notify_sync_nameW)
483
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
484
+ #define __itt_notify_sync_name ITTNOTIFY_DATA(notify_sync_name)
485
+ #define __itt_notify_sync_name_ptr ITTNOTIFY_NAME(notify_sync_name)
486
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
487
+ #else /* INTEL_NO_ITTNOTIFY_API */
488
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
489
+ #define __itt_notify_sync_nameA(addr, objtype, typelen, objname, namelen, attribute)
490
+ #define __itt_notify_sync_nameA_ptr 0
491
+ #define __itt_notify_sync_nameW(addr, objtype, typelen, objname, namelen, attribute)
492
+ #define __itt_notify_sync_nameW_ptr 0
493
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
494
+ #define __itt_notify_sync_name(addr, objtype, typelen, objname, namelen, attribute)
495
+ #define __itt_notify_sync_name_ptr 0
496
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
497
+ #endif /* INTEL_NO_ITTNOTIFY_API */
498
+ #else /* INTEL_NO_MACRO_BODY */
499
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
500
+ #define __itt_notify_sync_nameA_ptr 0
501
+ #define __itt_notify_sync_nameW_ptr 0
502
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
503
+ #define __itt_notify_sync_name_ptr 0
504
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
505
+ #endif /* INTEL_NO_MACRO_BODY */
506
+ /** @endcond */
507
+
508
+ /**
509
+ * @deprecated Legacy API
510
+ * @brief Enter spin loop on user-defined sync object
511
+ */
512
+ void LIBITTAPI __itt_notify_sync_prepare(void* addr);
513
+
514
+ /** @cond exclude_from_documentation */
515
+ #ifndef INTEL_NO_MACRO_BODY
516
+ #ifndef INTEL_NO_ITTNOTIFY_API
517
+ ITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *addr))
518
+ #define __itt_notify_sync_prepare ITTNOTIFY_VOID(notify_sync_prepare)
519
+ #define __itt_notify_sync_prepare_ptr ITTNOTIFY_NAME(notify_sync_prepare)
520
+ #else /* INTEL_NO_ITTNOTIFY_API */
521
+ #define __itt_notify_sync_prepare(addr)
522
+ #define __itt_notify_sync_prepare_ptr 0
523
+ #endif /* INTEL_NO_ITTNOTIFY_API */
524
+ #else /* INTEL_NO_MACRO_BODY */
525
+ #define __itt_notify_sync_prepare_ptr 0
526
+ #endif /* INTEL_NO_MACRO_BODY */
527
+ /** @endcond */
528
+
529
+ /**
530
+ * @deprecated Legacy API
531
+ * @brief Quit spin loop without acquiring spin object
532
+ */
533
+ void LIBITTAPI __itt_notify_sync_cancel(void *addr);
534
+
535
+ /** @cond exclude_from_documentation */
536
+ #ifndef INTEL_NO_MACRO_BODY
537
+ #ifndef INTEL_NO_ITTNOTIFY_API
538
+ ITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *addr))
539
+ #define __itt_notify_sync_cancel ITTNOTIFY_VOID(notify_sync_cancel)
540
+ #define __itt_notify_sync_cancel_ptr ITTNOTIFY_NAME(notify_sync_cancel)
541
+ #else /* INTEL_NO_ITTNOTIFY_API */
542
+ #define __itt_notify_sync_cancel(addr)
543
+ #define __itt_notify_sync_cancel_ptr 0
544
+ #endif /* INTEL_NO_ITTNOTIFY_API */
545
+ #else /* INTEL_NO_MACRO_BODY */
546
+ #define __itt_notify_sync_cancel_ptr 0
547
+ #endif /* INTEL_NO_MACRO_BODY */
548
+ /** @endcond */
549
+
550
+ /**
551
+ * @deprecated Legacy API
552
+ * @brief Successful spin loop completion (sync object acquired)
553
+ */
554
+ void LIBITTAPI __itt_notify_sync_acquired(void *addr);
555
+
556
+ /** @cond exclude_from_documentation */
557
+ #ifndef INTEL_NO_MACRO_BODY
558
+ #ifndef INTEL_NO_ITTNOTIFY_API
559
+ ITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *addr))
560
+ #define __itt_notify_sync_acquired ITTNOTIFY_VOID(notify_sync_acquired)
561
+ #define __itt_notify_sync_acquired_ptr ITTNOTIFY_NAME(notify_sync_acquired)
562
+ #else /* INTEL_NO_ITTNOTIFY_API */
563
+ #define __itt_notify_sync_acquired(addr)
564
+ #define __itt_notify_sync_acquired_ptr 0
565
+ #endif /* INTEL_NO_ITTNOTIFY_API */
566
+ #else /* INTEL_NO_MACRO_BODY */
567
+ #define __itt_notify_sync_acquired_ptr 0
568
+ #endif /* INTEL_NO_MACRO_BODY */
569
+ /** @endcond */
570
+
571
+ /**
572
+ * @deprecated Legacy API
573
+ * @brief Start sync object releasing code. Is called before the lock release call.
574
+ */
575
+ void LIBITTAPI __itt_notify_sync_releasing(void* addr);
576
+
577
+ /** @cond exclude_from_documentation */
578
+ #ifndef INTEL_NO_MACRO_BODY
579
+ #ifndef INTEL_NO_ITTNOTIFY_API
580
+ ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *addr))
581
+ #define __itt_notify_sync_releasing ITTNOTIFY_VOID(notify_sync_releasing)
582
+ #define __itt_notify_sync_releasing_ptr ITTNOTIFY_NAME(notify_sync_releasing)
583
+ #else /* INTEL_NO_ITTNOTIFY_API */
584
+ #define __itt_notify_sync_releasing(addr)
585
+ #define __itt_notify_sync_releasing_ptr 0
586
+ #endif /* INTEL_NO_ITTNOTIFY_API */
587
+ #else /* INTEL_NO_MACRO_BODY */
588
+ #define __itt_notify_sync_releasing_ptr 0
589
+ #endif /* INTEL_NO_MACRO_BODY */
590
+ /** @endcond */
591
+ /** @} legacy_sync group */
592
+
593
+ #ifndef _ITTNOTIFY_H_
594
+ /**
595
+ * @defgroup legacy_events Events
596
+ * @ingroup legacy
597
+ * Events group
598
+ * @{
599
+ */
600
+
601
+ /** @brief user event type */
602
+ typedef int __itt_event;
603
+
604
+ /**
605
+ * @brief Create an event notification
606
+ * @note name or namelen being null/name and namelen not matching, user event feature not enabled
607
+ * @return non-zero event identifier upon success and __itt_err otherwise
608
+ */
609
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
610
+ __itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen);
611
+ __itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen);
612
+ #if defined(UNICODE) || defined(_UNICODE)
613
+ # define __itt_event_create __itt_event_createW
614
+ # define __itt_event_create_ptr __itt_event_createW_ptr
615
+ #else
616
+ # define __itt_event_create __itt_event_createA
617
+ # define __itt_event_create_ptr __itt_event_createA_ptr
618
+ #endif /* UNICODE */
619
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
620
+ __itt_event LIBITTAPI __itt_event_create(const char *name, int namelen);
621
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
622
+
623
+ /** @cond exclude_from_documentation */
624
+ #ifndef INTEL_NO_MACRO_BODY
625
+ #ifndef INTEL_NO_ITTNOTIFY_API
626
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
627
+ ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen))
628
+ ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen))
629
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
630
+ ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen))
631
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
632
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
633
+ #define __itt_event_createA ITTNOTIFY_DATA(event_createA)
634
+ #define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA)
635
+ #define __itt_event_createW ITTNOTIFY_DATA(event_createW)
636
+ #define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW)
637
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
638
+ #define __itt_event_create ITTNOTIFY_DATA(event_create)
639
+ #define __itt_event_create_ptr ITTNOTIFY_NAME(event_create)
640
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
641
+ #else /* INTEL_NO_ITTNOTIFY_API */
642
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
643
+ #define __itt_event_createA(name, namelen) (__itt_event)0
644
+ #define __itt_event_createA_ptr 0
645
+ #define __itt_event_createW(name, namelen) (__itt_event)0
646
+ #define __itt_event_createW_ptr 0
647
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
648
+ #define __itt_event_create(name, namelen) (__itt_event)0
649
+ #define __itt_event_create_ptr 0
650
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
651
+ #endif /* INTEL_NO_ITTNOTIFY_API */
652
+ #else /* INTEL_NO_MACRO_BODY */
653
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
654
+ #define __itt_event_createA_ptr 0
655
+ #define __itt_event_createW_ptr 0
656
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
657
+ #define __itt_event_create_ptr 0
658
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
659
+ #endif /* INTEL_NO_MACRO_BODY */
660
+ /** @endcond */
661
+
662
+ /**
663
+ * @brief Record an event occurrence.
664
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
665
+ */
666
+ int LIBITTAPI __itt_event_start(__itt_event event);
667
+
668
+ /** @cond exclude_from_documentation */
669
+ #ifndef INTEL_NO_MACRO_BODY
670
+ #ifndef INTEL_NO_ITTNOTIFY_API
671
+ ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event))
672
+ #define __itt_event_start ITTNOTIFY_DATA(event_start)
673
+ #define __itt_event_start_ptr ITTNOTIFY_NAME(event_start)
674
+ #else /* INTEL_NO_ITTNOTIFY_API */
675
+ #define __itt_event_start(event) (int)0
676
+ #define __itt_event_start_ptr 0
677
+ #endif /* INTEL_NO_ITTNOTIFY_API */
678
+ #else /* INTEL_NO_MACRO_BODY */
679
+ #define __itt_event_start_ptr 0
680
+ #endif /* INTEL_NO_MACRO_BODY */
681
+ /** @endcond */
682
+
683
+ /**
684
+ * @brief Record an event end occurrence.
685
+ * @note It is optional if events do not have durations.
686
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
687
+ */
688
+ int LIBITTAPI __itt_event_end(__itt_event event);
689
+
690
+ /** @cond exclude_from_documentation */
691
+ #ifndef INTEL_NO_MACRO_BODY
692
+ #ifndef INTEL_NO_ITTNOTIFY_API
693
+ ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event))
694
+ #define __itt_event_end ITTNOTIFY_DATA(event_end)
695
+ #define __itt_event_end_ptr ITTNOTIFY_NAME(event_end)
696
+ #else /* INTEL_NO_ITTNOTIFY_API */
697
+ #define __itt_event_end(event) (int)0
698
+ #define __itt_event_end_ptr 0
699
+ #endif /* INTEL_NO_ITTNOTIFY_API */
700
+ #else /* INTEL_NO_MACRO_BODY */
701
+ #define __itt_event_end_ptr 0
702
+ #endif /* INTEL_NO_MACRO_BODY */
703
+ /** @endcond */
704
+ /** @} legacy_events group */
705
+ #endif /* _ITTNOTIFY_H_ */
706
+
707
+ /**
708
+ * @defgroup legacy_memory Memory Accesses
709
+ * @ingroup legacy
710
+ */
711
+
712
+ /**
713
+ * @deprecated Legacy API
714
+ * @brief Inform the tool of memory accesses on reading
715
+ */
716
+ void LIBITTAPI __itt_memory_read(void *addr, size_t size);
717
+
718
+ /** @cond exclude_from_documentation */
719
+ #ifndef INTEL_NO_MACRO_BODY
720
+ #ifndef INTEL_NO_ITTNOTIFY_API
721
+ ITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size))
722
+ #define __itt_memory_read ITTNOTIFY_VOID(memory_read)
723
+ #define __itt_memory_read_ptr ITTNOTIFY_NAME(memory_read)
724
+ #else /* INTEL_NO_ITTNOTIFY_API */
725
+ #define __itt_memory_read(addr, size)
726
+ #define __itt_memory_read_ptr 0
727
+ #endif /* INTEL_NO_ITTNOTIFY_API */
728
+ #else /* INTEL_NO_MACRO_BODY */
729
+ #define __itt_memory_read_ptr 0
730
+ #endif /* INTEL_NO_MACRO_BODY */
731
+ /** @endcond */
732
+
733
+ /**
734
+ * @deprecated Legacy API
735
+ * @brief Inform the tool of memory accesses on writing
736
+ */
737
+ void LIBITTAPI __itt_memory_write(void *addr, size_t size);
738
+
739
+ /** @cond exclude_from_documentation */
740
+ #ifndef INTEL_NO_MACRO_BODY
741
+ #ifndef INTEL_NO_ITTNOTIFY_API
742
+ ITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size))
743
+ #define __itt_memory_write ITTNOTIFY_VOID(memory_write)
744
+ #define __itt_memory_write_ptr ITTNOTIFY_NAME(memory_write)
745
+ #else /* INTEL_NO_ITTNOTIFY_API */
746
+ #define __itt_memory_write(addr, size)
747
+ #define __itt_memory_write_ptr 0
748
+ #endif /* INTEL_NO_ITTNOTIFY_API */
749
+ #else /* INTEL_NO_MACRO_BODY */
750
+ #define __itt_memory_write_ptr 0
751
+ #endif /* INTEL_NO_MACRO_BODY */
752
+ /** @endcond */
753
+
754
+ /**
755
+ * @deprecated Legacy API
756
+ * @brief Inform the tool of memory accesses on updating
757
+ */
758
+ void LIBITTAPI __itt_memory_update(void *address, size_t size);
759
+
760
+ /** @cond exclude_from_documentation */
761
+ #ifndef INTEL_NO_MACRO_BODY
762
+ #ifndef INTEL_NO_ITTNOTIFY_API
763
+ ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size))
764
+ #define __itt_memory_update ITTNOTIFY_VOID(memory_update)
765
+ #define __itt_memory_update_ptr ITTNOTIFY_NAME(memory_update)
766
+ #else /* INTEL_NO_ITTNOTIFY_API */
767
+ #define __itt_memory_update(addr, size)
768
+ #define __itt_memory_update_ptr 0
769
+ #endif /* INTEL_NO_ITTNOTIFY_API */
770
+ #else /* INTEL_NO_MACRO_BODY */
771
+ #define __itt_memory_update_ptr 0
772
+ #endif /* INTEL_NO_MACRO_BODY */
773
+ /** @endcond */
774
+ /** @} legacy_memory group */
775
+
776
+ /**
777
+ * @defgroup legacy_state Thread and Object States
778
+ * @ingroup legacy
779
+ */
780
+
781
+ /** @brief state type */
782
+ typedef int __itt_state_t;
783
+
784
+ /** @cond exclude_from_documentation */
785
+ typedef enum __itt_obj_state {
786
+ __itt_obj_state_err = 0,
787
+ __itt_obj_state_clr = 1,
788
+ __itt_obj_state_set = 2,
789
+ __itt_obj_state_use = 3
790
+ } __itt_obj_state_t;
791
+
792
+ typedef enum __itt_thr_state {
793
+ __itt_thr_state_err = 0,
794
+ __itt_thr_state_clr = 1,
795
+ __itt_thr_state_set = 2
796
+ } __itt_thr_state_t;
797
+
798
+ typedef enum __itt_obj_prop {
799
+ __itt_obj_prop_watch = 1,
800
+ __itt_obj_prop_ignore = 2,
801
+ __itt_obj_prop_sharable = 3
802
+ } __itt_obj_prop_t;
803
+
804
+ typedef enum __itt_thr_prop {
805
+ __itt_thr_prop_quiet = 1
806
+ } __itt_thr_prop_t;
807
+ /** @endcond */
808
+
809
+ /**
810
+ * @deprecated Legacy API
811
+ * @brief managing thread and object states
812
+ */
813
+ __itt_state_t LIBITTAPI __itt_state_get(void);
814
+
815
+ /** @cond exclude_from_documentation */
816
+ #ifndef INTEL_NO_MACRO_BODY
817
+ #ifndef INTEL_NO_ITTNOTIFY_API
818
+ ITT_STUB(ITTAPI, __itt_state_t, state_get, (void))
819
+ #define __itt_state_get ITTNOTIFY_DATA(state_get)
820
+ #define __itt_state_get_ptr ITTNOTIFY_NAME(state_get)
821
+ #else /* INTEL_NO_ITTNOTIFY_API */
822
+ #define __itt_state_get(void) (__itt_state_t)0
823
+ #define __itt_state_get_ptr 0
824
+ #endif /* INTEL_NO_ITTNOTIFY_API */
825
+ #else /* INTEL_NO_MACRO_BODY */
826
+ #define __itt_state_get_ptr 0
827
+ #endif /* INTEL_NO_MACRO_BODY */
828
+ /** @endcond */
829
+
830
+ /**
831
+ * @deprecated Legacy API
832
+ * @brief managing thread and object states
833
+ */
834
+ __itt_state_t LIBITTAPI __itt_state_set(__itt_state_t s);
835
+
836
+ /** @cond exclude_from_documentation */
837
+ #ifndef INTEL_NO_MACRO_BODY
838
+ #ifndef INTEL_NO_ITTNOTIFY_API
839
+ ITT_STUB(ITTAPI, __itt_state_t, state_set, (__itt_state_t s))
840
+ #define __itt_state_set ITTNOTIFY_DATA(state_set)
841
+ #define __itt_state_set_ptr ITTNOTIFY_NAME(state_set)
842
+ #else /* INTEL_NO_ITTNOTIFY_API */
843
+ #define __itt_state_set(s) (__itt_state_t)0
844
+ #define __itt_state_set_ptr 0
845
+ #endif /* INTEL_NO_ITTNOTIFY_API */
846
+ #else /* INTEL_NO_MACRO_BODY */
847
+ #define __itt_state_set_ptr 0
848
+ #endif /* INTEL_NO_MACRO_BODY */
849
+ /** @endcond */
850
+
851
+ /**
852
+ * @deprecated Legacy API
853
+ * @brief managing thread and object modes
854
+ */
855
+ __itt_thr_state_t LIBITTAPI __itt_thr_mode_set(__itt_thr_prop_t p, __itt_thr_state_t s);
856
+
857
+ /** @cond exclude_from_documentation */
858
+ #ifndef INTEL_NO_MACRO_BODY
859
+ #ifndef INTEL_NO_ITTNOTIFY_API
860
+ ITT_STUB(ITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s))
861
+ #define __itt_thr_mode_set ITTNOTIFY_DATA(thr_mode_set)
862
+ #define __itt_thr_mode_set_ptr ITTNOTIFY_NAME(thr_mode_set)
863
+ #else /* INTEL_NO_ITTNOTIFY_API */
864
+ #define __itt_thr_mode_set(p, s) (__itt_thr_state_t)0
865
+ #define __itt_thr_mode_set_ptr 0
866
+ #endif /* INTEL_NO_ITTNOTIFY_API */
867
+ #else /* INTEL_NO_MACRO_BODY */
868
+ #define __itt_thr_mode_set_ptr 0
869
+ #endif /* INTEL_NO_MACRO_BODY */
870
+ /** @endcond */
871
+
872
+ /**
873
+ * @deprecated Legacy API
874
+ * @brief managing thread and object modes
875
+ */
876
+ __itt_obj_state_t LIBITTAPI __itt_obj_mode_set(__itt_obj_prop_t p, __itt_obj_state_t s);
877
+
878
+ /** @cond exclude_from_documentation */
879
+ #ifndef INTEL_NO_MACRO_BODY
880
+ #ifndef INTEL_NO_ITTNOTIFY_API
881
+ ITT_STUB(ITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s))
882
+ #define __itt_obj_mode_set ITTNOTIFY_DATA(obj_mode_set)
883
+ #define __itt_obj_mode_set_ptr ITTNOTIFY_NAME(obj_mode_set)
884
+ #else /* INTEL_NO_ITTNOTIFY_API */
885
+ #define __itt_obj_mode_set(p, s) (__itt_obj_state_t)0
886
+ #define __itt_obj_mode_set_ptr 0
887
+ #endif /* INTEL_NO_ITTNOTIFY_API */
888
+ #else /* INTEL_NO_MACRO_BODY */
889
+ #define __itt_obj_mode_set_ptr 0
890
+ #endif /* INTEL_NO_MACRO_BODY */
891
+ /** @endcond */
892
+ /** @} legacy_state group */
893
+
894
+ /**
895
+ * @defgroup frames Frames
896
+ * @ingroup legacy
897
+ * Frames group
898
+ * @{
899
+ */
900
+ /**
901
+ * @brief opaque structure for frame identification
902
+ */
903
+ typedef struct __itt_frame_t *__itt_frame;
904
+
905
+ /**
906
+ * @brief Create a global frame with given domain
907
+ */
908
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
909
+ __itt_frame ITTAPI __itt_frame_createA(const char *domain);
910
+ __itt_frame ITTAPI __itt_frame_createW(const wchar_t *domain);
911
+ #if defined(UNICODE) || defined(_UNICODE)
912
+ # define __itt_frame_create __itt_frame_createW
913
+ # define __itt_frame_create_ptr __itt_frame_createW_ptr
914
+ #else /* UNICODE */
915
+ # define __itt_frame_create __itt_frame_createA
916
+ # define __itt_frame_create_ptr __itt_frame_createA_ptr
917
+ #endif /* UNICODE */
918
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
919
+ __itt_frame ITTAPI __itt_frame_create(const char *domain);
920
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
921
+
922
+ /** @cond exclude_from_documentation */
923
+ #ifndef INTEL_NO_MACRO_BODY
924
+ #ifndef INTEL_NO_ITTNOTIFY_API
925
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
926
+ ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char *domain))
927
+ ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain))
928
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
929
+ ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain))
930
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
931
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
932
+ #define __itt_frame_createA ITTNOTIFY_DATA(frame_createA)
933
+ #define __itt_frame_createA_ptr ITTNOTIFY_NAME(frame_createA)
934
+ #define __itt_frame_createW ITTNOTIFY_DATA(frame_createW)
935
+ #define __itt_frame_createW_ptr ITTNOTIFY_NAME(frame_createW)
936
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
937
+ #define __itt_frame_create ITTNOTIFY_DATA(frame_create)
938
+ #define __itt_frame_create_ptr ITTNOTIFY_NAME(frame_create)
939
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
940
+ #else /* INTEL_NO_ITTNOTIFY_API */
941
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
942
+ #define __itt_frame_createA(domain)
943
+ #define __itt_frame_createA_ptr 0
944
+ #define __itt_frame_createW(domain)
945
+ #define __itt_frame_createW_ptr 0
946
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
947
+ #define __itt_frame_create(domain)
948
+ #define __itt_frame_create_ptr 0
949
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
950
+ #endif /* INTEL_NO_ITTNOTIFY_API */
951
+ #else /* INTEL_NO_MACRO_BODY */
952
+ #if ITT_PLATFORM==ITT_PLATFORM_WIN
953
+ #define __itt_frame_createA_ptr 0
954
+ #define __itt_frame_createW_ptr 0
955
+ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
956
+ #define __itt_frame_create_ptr 0
957
+ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
958
+ #endif /* INTEL_NO_MACRO_BODY */
959
+ /** @endcond */
960
+
961
+ /** @brief Record a frame begin occurrence. */
962
+ void ITTAPI __itt_frame_begin(__itt_frame frame);
963
+ /** @brief Record a frame end occurrence. */
964
+ void ITTAPI __itt_frame_end (__itt_frame frame);
965
+
966
+ /** @cond exclude_from_documentation */
967
+ #ifndef INTEL_NO_MACRO_BODY
968
+ #ifndef INTEL_NO_ITTNOTIFY_API
969
+ ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame))
970
+ ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame))
971
+ #define __itt_frame_begin ITTNOTIFY_VOID(frame_begin)
972
+ #define __itt_frame_begin_ptr ITTNOTIFY_NAME(frame_begin)
973
+ #define __itt_frame_end ITTNOTIFY_VOID(frame_end)
974
+ #define __itt_frame_end_ptr ITTNOTIFY_NAME(frame_end)
975
+ #else /* INTEL_NO_ITTNOTIFY_API */
976
+ #define __itt_frame_begin(frame)
977
+ #define __itt_frame_begin_ptr 0
978
+ #define __itt_frame_end(frame)
979
+ #define __itt_frame_end_ptr 0
980
+ #endif /* INTEL_NO_ITTNOTIFY_API */
981
+ #else /* INTEL_NO_MACRO_BODY */
982
+ #define __itt_frame_begin_ptr 0
983
+ #define __itt_frame_end_ptr 0
984
+ #endif /* INTEL_NO_MACRO_BODY */
985
+ /** @endcond */
986
+ /** @} frames group */
987
+
988
+ #ifdef __cplusplus
989
+ }
990
+ #endif /* __cplusplus */
991
+
992
+ #endif /* _LEGACY_ITTNOTIFY_H_ */
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/common/kernel_cache.hpp ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2023 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef COMMON_KERNEL_CACHE_HPP
18
+ #define COMMON_KERNEL_CACHE_HPP
19
+
20
+ #include <cstddef>
21
+ #include <memory>
22
+ #include <thread>
23
+
24
+ #include "c_types_map.hpp"
25
+
26
+ namespace dnnl {
27
+ namespace impl {
28
+ namespace kernel_cache {
29
+
30
+ struct key_impl_t {
31
+ key_impl_t() = default;
32
+ virtual ~key_impl_t() = default;
33
+
34
+ key_impl_t(const key_impl_t &) = delete;
35
+ key_impl_t &operator=(const key_impl_t &) = delete;
36
+
37
+ virtual bool compare(const key_impl_t *key_impl) const = 0;
38
+ virtual size_t hash() const = 0;
39
+ };
40
+
41
+ // Key
42
+ struct key_t {
43
+ key_t(const std::shared_ptr<key_impl_t> &impl,
44
+ bool has_runtime_dependencies = false)
45
+ : impl_(impl)
46
+ , thread_id_(std::this_thread::get_id())
47
+ , has_runtime_dependencies_(has_runtime_dependencies) {}
48
+ key_t(std::shared_ptr<key_impl_t> &&impl,
49
+ bool has_runtime_dependencies = false)
50
+ : impl_(std::move(impl))
51
+ , thread_id_(std::this_thread::get_id())
52
+ , has_runtime_dependencies_(has_runtime_dependencies) {}
53
+ virtual ~key_t() = default;
54
+
55
+ bool operator==(const key_t &other) const {
56
+ return impl_->compare(other.impl_.get());
57
+ };
58
+ size_t hash() const { return impl_->hash(); };
59
+
60
+ const std::thread::id &thread_id() const { return thread_id_; }
61
+ bool has_runtime_dependencies() const { return has_runtime_dependencies_; }
62
+
63
+ protected:
64
+ std::shared_ptr<key_impl_t> impl_;
65
+
66
+ private:
67
+ // Thread ID is not used as part of the key, it's only used to get
68
+ // information about what thread inserted the key and the corresponding
69
+ // primitive to handle some multithreaded scenarios.
70
+ std::thread::id thread_id_;
71
+
72
+ // Used to correctly handle destruction on process termination. If there are
73
+ // runtime dependencies, attempts to destroy the cached object may fail.
74
+ bool has_runtime_dependencies_;
75
+ };
76
+
77
+ struct value_impl_t {
78
+ value_impl_t() = default;
79
+ virtual ~value_impl_t() = default;
80
+
81
+ value_impl_t(const value_impl_t &) = delete;
82
+ value_impl_t &operator=(const value_impl_t &) = delete;
83
+ };
84
+
85
+ struct value_t {
86
+ value_t() = default;
87
+ value_t(std::nullptr_t) : value_t() {};
88
+ value_t(const std::shared_ptr<value_impl_t> &impl) : impl_(impl) {}
89
+ value_t(std::shared_ptr<value_impl_t> &&impl) : impl_(std::move(impl)) {}
90
+ virtual ~value_t() = default;
91
+ const std::shared_ptr<value_impl_t> &impl() const { return impl_; }
92
+ std::shared_ptr<value_impl_t> &impl() { return impl_; }
93
+ std::shared_ptr<value_impl_t> release() {
94
+ std::shared_ptr<value_impl_t> ret = nullptr;
95
+ std::swap(ret, impl_);
96
+ return ret;
97
+ }
98
+ bool is_empty() const { return impl_ == nullptr; }
99
+
100
+ private:
101
+ std::shared_ptr<value_impl_t> impl_;
102
+ };
103
+
104
+ struct iface_t {
105
+ struct cache_t;
106
+ struct result_t {
107
+ result_t() : status(status::success) {};
108
+ result_t(value_t p, status_t s) : value(std::move(p)), status(s) {}
109
+ bool is_empty() const { return value.is_empty(); }
110
+ value_t &get_value() { return value; }
111
+ value_t value;
112
+ status_t status;
113
+ };
114
+
115
+ using create_func_t = result_t (&)(void *);
116
+ using create_func_ptr_t = result_t (*)(void *);
117
+
118
+ iface_t(cache_t &cache) : cache_(cache) {};
119
+
120
+ ~iface_t() = default;
121
+
122
+ status_t set_capacity(int capacity);
123
+ int get_capacity() const;
124
+ int get_size() const;
125
+
126
+ result_t get_or_create(
127
+ const key_t &key, create_func_t create, void *create_context);
128
+
129
+ private:
130
+ cache_t &cache_;
131
+ };
132
+
133
+ iface_t get();
134
+
135
+ } // namespace kernel_cache
136
+ } // namespace impl
137
+ } // namespace dnnl
138
+
139
+ #endif