repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/xnnpack/packb.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stdint.h> #include <stddef.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t groups, \ size_t channels, \ const uint32_t* bias, \ uint32_t* packed_weights, \ size_t channel_tile_stride, \ size_t channel_subtile_stride, \ const union xnn_x32_packb_params* params); \ DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_2c1s1r__scalar_float) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_2c1s1r__scalar_int) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_2c2s1r__scalar_float) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_2c2s1r__scalar_int) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_4c1s1r__scalar_float) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_4c1s1r__scalar_int) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_4c4s1r__scalar_float) DECLARE_X32_PACKB_GEMM_UKERNEL_FUNCTION(xnn_x32_packb_gemm_ukernel_4c4s1r__scalar_int) #ifdef __cplusplus } // extern "C" #endif
1,605
39.15
88
h
XNNPACK
XNNPACK-master/src/xnnpack/packw.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stdint.h> #include <stddef.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X8_PACKW_GEMM_GOI_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t g, \ size_t nc, \ size_t kc, \ size_t nr, \ size_t kr, \ size_t sr, \ const int8_t* k, \ const uint32_t* b, \ int8_t* packed_weights, \ size_t extra_bytes, \ const void* params); \ DECLARE_X8_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x8_packw_gemm_goi_ukernel_x2__scalar_int_x4) DECLARE_X8_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x8_packw_gemm_goi_ukernel_x4__scalar_int_x4) DECLARE_X8_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x8_packw_gemm_goi_ukernel_x8__scalar_int_x4) DECLARE_X8_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x8_packw_gemm_goi_ukernel_x16__scalar_int_x4) #define DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t g, \ size_t nc, \ size_t kc, \ size_t nr, \ size_t kr, \ size_t sr, \ const uint16_t* k, \ const uint16_t* b, \ uint16_t* packed_weights, \ size_t extra_bytes, \ const void* params); \ DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__scalar_int_x4) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__scalar_int_x4) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x12) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x12_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x16) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x16_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x8) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x8_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x12) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x12_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x16) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x16_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__avx2_x16) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x8__avx2_x16_prfm) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__avx2_x16) DECLARE_X16_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x16_packw_gemm_goi_ukernel_x16__avx2_x16_prfm) #define DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t g, \ size_t nc, \ size_t kc, \ size_t nr, \ size_t kr, \ size_t sr, \ const uint32_t* k, \ const uint32_t* b, \ uint32_t* packed_weights, \ size_t extra_bytes, \ const void* params); \ DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2__scalar_float_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2__scalar_int_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x3__scalar_float_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x3__scalar_int_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x4__scalar_float_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x4__scalar_int_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__scalar_float_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__scalar_int_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__scalar_float_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__scalar_int_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2__neon_ld2lane_x2) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2__neon_ld2lane_x2_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__neon_ld4lane_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__neon_ld4lane_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__neon_ld4lane_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__neon_ld4lane_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__neon_ld4lane_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x12__neon_ld4lane_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__neon_ld4lane_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__neon_ld4lane_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2c4__sse2_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2c4__sse2_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__sse2_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__sse2_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__sse2_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__sse2_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__sse2_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__sse2_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__sse2_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__sse2_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__sse2_x8) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__sse2_x8_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__avx_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__avx_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__avx_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__avx_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__avx_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__avx_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__avx_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16s4__avx_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__avx512f_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x16__avx512f_x4_prfm) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x2c4__wasmsimd_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8__wasmsimd_x4) DECLARE_X32_PACKW_GEMM_GOI_UKERNEL_FUNCTION(xnn_x32_packw_gemm_goi_ukernel_x8s4__wasmsimd_x4) #ifdef __cplusplus } // extern "C" #endif
10,935
68.21519
102
h
XNNPACK
XNNPACK-master/src/xnnpack/packx.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X32_PACKX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t m, \ size_t k, \ const uint32_t* x, \ size_t x_stride, \ uint32_t* y); DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_2x__scalar) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_3x__scalar) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__scalar) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__sse) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__wasmsimd) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__neon_st4_x4) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__neon_st4_x4_prfm) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__neon_st4_x8) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_4x__neon_st4_x8_prfm) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_8x__neon_st4_x4) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_8x__neon_st4_x4_prfm) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_8x__neon_st4_x8) DECLARE_X32_PACKX_UKERNEL_FUNCTION(xnn_x32_packx_ukernel_8x__neon_st4_x8_prfm) #ifdef __cplusplus } // extern "C" #endif
1,643
35.533333
78
h
XNNPACK
XNNPACK-master/src/xnnpack/pad.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_PAD_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t rows, \ size_t channels, \ size_t pre_padding, \ size_t post_padding, \ const void* input, \ size_t input_stride, \ void* output, \ size_t output_stride, \ const uint32_t fill_pattern); DECLARE_PAD_UKERNEL_FUNCTION(xnn_xx_pad_ukernel__neon) DECLARE_PAD_UKERNEL_FUNCTION(xnn_xx_pad_ukernel__scalar) DECLARE_PAD_UKERNEL_FUNCTION(xnn_xx_pad_ukernel__sse2) DECLARE_PAD_UKERNEL_FUNCTION(xnn_xx_pad_ukernel__wasmsimd) #ifdef __cplusplus } // extern "C" #endif
1,076
26.615385
72
h
XNNPACK
XNNPACK-master/src/xnnpack/params.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <xnnpack.h> #include <xnnpack/common.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams.h> #include <xnnpack/config.h> // Indicates that XNNPACK as a whole has initialized. // This does not guarantee that any particular microkernels are available. #define XNN_INIT_FLAG_XNNPACK 0x00000001 struct xnn_parameters { // Bitwise combination of XNN_INIT_FLAG_* flags uint32_t init_flags; struct xnn_allocator allocator; }; #ifdef __cplusplus extern "C" XNN_INTERNAL struct xnn_parameters xnn_params; #else extern XNN_INTERNAL struct xnn_parameters xnn_params; #endif
916
24.472222
74
h
XNNPACK
XNNPACK-master/src/xnnpack/pavgpool.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F16_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t output_pixels, \ size_t kernel_elements, \ size_t channels, \ const void** input, \ size_t input_offset, \ const void* zero, \ const void* multiplier, \ void* buffer, \ void* output, \ size_t input_increment, \ size_t output_increment, \ const union xnn_f16_minmax_params* params); DECLARE_F16_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8) DECLARE_F16_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8) #define DECLARE_F16_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t output_pixels, \ size_t kernel_elements, \ size_t channels, \ const void** input, \ size_t input_offset, \ const void* zero, \ const void* multiplier, \ void* output, \ size_t input_increment, \ size_t output_increment, \ const union xnn_f16_minmax_params* params); DECLARE_F16_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_pavgpool_minmax_ukernel_9x__avx2_c8) DECLARE_F16_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8) #define DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t output_pixels, \ size_t kernel_elements, \ size_t channels, \ const float** input, \ size_t input_offset, \ const float* zero, \ const float* multiplier, \ float* buffer, \ float* output, \ size_t input_increment, \ size_t output_increment, \ const union xnn_f32_minmax_params* params); DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4) DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1) DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4) DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1) DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4) DECLARE_F32_PAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4) #define DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t output_pixels, \ size_t kernel_elements, \ size_t channels, \ const float** input, \ size_t input_offset, \ const float* zero, \ const float* multiplier, \ float* output, \ size_t input_increment, \ size_t output_increment, \ const union xnn_f32_minmax_params* params); DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4) DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__scalar_c1) DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__sse_c4) DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__wasm_c1) DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_arm_c4) DECLARE_F32_PAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_x86_c4) #ifdef __cplusplus } // extern "C" #endif
5,882
55.567308
110
h
XNNPACK
XNNPACK-master/src/xnnpack/post-operation.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> // Operators that can be applied post convolution. enum xnn_post_operation_type { xnn_post_operation_type_none, xnn_post_operation_type_hardswish, }; // Struct representing a post operation and its associated data. For example, // an addition with constant will specify the constant in the arg1 field, a // clamp will specify min in arg1, and max in arg2. struct xnn_post_operation { enum xnn_post_operation_type op_type; float arg1; float arg2; }; #ifdef __cplusplus extern "C" { #endif // Allocate space for params required for post_operations and initialize all params. // This allocation will be freed when the operator holding these params is deleted. char* allocate_and_initialize_post_operation_params( size_t num_post_operations, const struct xnn_post_operation* post_operations); #ifdef __cplusplus } // extern "C" #endif
1,055
26.076923
84
h
XNNPACK
XNNPACK-master/src/xnnpack/ppmm.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t mr, \ size_t nc, \ size_t kc, \ const float* a, \ const float* w, \ float* c, \ size_t cm_stride, \ size_t cn_stride, \ const union xnn_f32_minmax_params* params); DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__aarch64_neonfma) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__aarch64_neonfma_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__neon) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__neon_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x16__aarch64_neonfma) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x16__aarch64_neonfma_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x16__neon) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x16__neon_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__aarch64_neonfma) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__aarch64_neonfma_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__neon) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_8x8__neon_prfm) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__sse) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_2x4__scalar) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_3x3__scalar) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x2__scalar) DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x4__scalar) #ifdef __cplusplus } // extern "C" #endif
3,463
50.701493
110
h
XNNPACK
XNNPACK-master/src/xnnpack/prefetch.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #ifdef _MSC_VER #include <intrin.h> #endif #include <xnnpack/common.h> XNN_INLINE static void xnn_prefetch_to_l1(const void* address) { #if defined(__GNUC__) __builtin_prefetch(address); #elif defined(_MSC_VER) #if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) __prefetch(address); #elif defined(_M_X64) _mm_prefetch(address, _MM_HINT_T0); #elif defined(_M_IX86) #if _M_IX86_FP >= 1 // Targeting SSE+ _mm_prefetch(address, _MM_HINT_T0); #else _m_prefetch((void*) address); #endif #else #error "Architecture-specific implementation of xnn_prefetch_to_l1 required" #endif #else #error "Compiler-specific implementation of xnn_prefetch_to_l1 required" #endif }
952
24.756757
82
h
XNNPACK
XNNPACK-master/src/xnnpack/prelu.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F16_PRELU_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t rows, \ size_t channels, \ const void* input, \ size_t input_stride, \ const void* weights, \ void* output, \ size_t output_stride); DECLARE_F16_PRELU_UKERNEL_FUNCTION(xnn_f16_prelu_ukernel__neonfp16arith_2x8) DECLARE_F16_PRELU_UKERNEL_FUNCTION(xnn_f16_prelu_ukernel__neonfp16arith_2x16) DECLARE_F16_PRELU_UKERNEL_FUNCTION(xnn_f16_prelu_ukernel__f16c_2x8) DECLARE_F16_PRELU_UKERNEL_FUNCTION(xnn_f16_prelu_ukernel__f16c_2x16) #define DECLARE_F32_PRELU_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t rows, \ size_t channels, \ const float* input, \ size_t input_stride, \ const float* weights, \ float* output, \ size_t output_stride); DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_1x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_1x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_1x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_4x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_4x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__neon_4x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse2_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse2_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse41_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__sse41_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__avx_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__avx_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__avx512f_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__avx512f_2x32) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x8) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x16) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasm_2x1) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__wasm_2x4) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__scalar_2x1) DECLARE_F32_PRELU_UKERNEL_FUNCTION(xnn_f32_prelu_ukernel__scalar_2x4) #ifdef __cplusplus } // extern "C" #endif
6,215
50.8
90
h
XNNPACK
XNNPACK-master/src/xnnpack/quantization.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <assert.h> #include <stdint.h> #include <math.h> #include <xnnpack/math.h> #include <xnnpack/microparams.h> static inline struct xnn_qd8_quantization_params xnn_f32_qd8_asymmetric_quantization_params( float min, float max) { struct xnn_qd8_quantization_params quantization_params; const float qmin = INT8_MIN; const float qmax = INT8_MAX; const float rmin = math_min_f32(0.0f, min); const float rmax = math_max_f32(0.0f, max); const float scale = rmin == rmax ? 1.f : (qmax - qmin) / (rmax - rmin); const float rmin_scale = rmin * scale; const float rmax_scale = rmax * scale; const float zero_point_from_min_error = qmin + rmin_scale; const float zero_point_from_max_error = qmax + rmax_scale; float zero_point = zero_point_from_min_error + zero_point_from_max_error > 0 ? qmin - rmin_scale : qmax - rmax_scale; zero_point = math_max_f32(zero_point, qmin); zero_point = math_min_f32(zero_point, qmax); assert(zero_point >= INT8_MIN); assert(zero_point <= INT8_MAX); const int8_t nudged_zero_point = ((int8_t) rintf(zero_point)); quantization_params.scale = scale; quantization_params.zero_point = nudged_zero_point; return quantization_params; }
1,397
33.097561
92
h
XNNPACK
XNNPACK-master/src/xnnpack/raddexpminusmax.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* input, \ float* sum, \ float max); DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64_acc4) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc3) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc6) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x144) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x144_acc3) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc2) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc3) DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6) #ifdef __cplusplus } /* extern "C" */ #endif
3,071
56.962264
106
h
XNNPACK
XNNPACK-master/src/xnnpack/raddextexp.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* input, \ float* sum); DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x64) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x64_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x64_acc4) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x72) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x72_acc3) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x80) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x80_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x80_acc5) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x96) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc3) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc6) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc2) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc3) DECLARE_F32_RADDEXTEXP_UKERNEL_FUNCTION(xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6) #ifdef __cplusplus } /* extern "C" */ #endif
2,749
50.886792
96
h
XNNPACK
XNNPACK-master/src/xnnpack/raddstoreexpminusmax.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const void* input, \ const void* max, \ void* output, \ void* sum, \ const union xnn_f16_expminus_params* params); DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32_acc4) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc5) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64_acc4) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x72) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x72_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80_acc5) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc6) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32_acc4) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40_acc5) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64_acc4) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x72) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x72_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc5) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc2) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc3) DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc6) #define DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* input, \ const float* max, \ float* output, \ float* sum, \ const union xnn_f32_expminus_params* params); DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x72) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x72_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc6) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x144) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x144_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc6) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x8) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x8_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12_acc3) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20_acc5) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x1) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x1) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4_acc2) DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4_acc4) #ifdef __cplusplus } /* extern "C" */ #endif
19,702
90.64186
120
h
XNNPACK
XNNPACK-master/src/xnnpack/reduce.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F16_RSUM_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch, \ const void* input, \ void* output, \ const union xnn_f16_scale_params* params); DECLARE_F16_RSUM_UKERNEL_FUNCTION(xnn_f16_rsum_ukernel__neonfp16arith_x8) DECLARE_F16_RSUM_UKERNEL_FUNCTION(xnn_f16_rsum_ukernel__neonfp16arith_x16_acc2) DECLARE_F16_RSUM_UKERNEL_FUNCTION(xnn_f16_rsum_ukernel__neonfp16arith_x24_acc3) DECLARE_F16_RSUM_UKERNEL_FUNCTION(xnn_f16_rsum_ukernel__neonfp16arith_x32_acc2) DECLARE_F16_RSUM_UKERNEL_FUNCTION(xnn_f16_rsum_ukernel__neonfp16arith_x32_acc4) #define DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch, \ const void* input, \ void* output, \ const union xnn_f16_f32acc_scale_params* params); DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__f16c_x8) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__f16c_x16_acc2) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__f16c_x24_acc3) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__f16c_x32_acc2) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__f16c_x32_acc4) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x4) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x8) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x16_acc2) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x24_acc3) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc2) DECLARE_F16_F32ACC_RSUM_UKERNEL_FUNCTION(xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4) #define DECLARE_F32_REDUCE_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch, \ const float* input, \ float* output, \ const union xnn_f32_default_params* params); DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasm_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasm_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasm_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasm_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasm_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_minmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_minmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_minmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_minmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_minmax_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_pminmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_pminmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_pminmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_pminmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_pminmax_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__neon_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__neon_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__neon_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__neon_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__neon_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__scalar_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__scalar_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__scalar_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__scalar_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__scalar_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__sse_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__sse_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__sse_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__sse_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__sse_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasm_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasm_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasm_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasm_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasm_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_minmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_minmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_minmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_minmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_minmax_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_pminmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_pminmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_pminmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_pminmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rmin_ukernel__wasmsimd_pminmax_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__neon_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__neon_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__neon_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__neon_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__neon_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__scalar_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__scalar_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__scalar_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__scalar_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__scalar_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__sse_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__sse_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__sse_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__sse_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__sse_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasm_x1) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasm_x2_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasm_x3_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasm_x4_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasm_x4_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_minmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_minmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_minmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_minmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_minmax_x16_acc4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x4) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x8_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x12_acc3) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x16_acc2) DECLARE_F32_REDUCE_UKERNEL_FUNCTION(xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x16_acc4) #define DECLARE_F32_RSUM_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch, \ const float* input, \ float* output, \ const union xnn_f32_scale_params* params); DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__avx_x8) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__avx_x16_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__avx_x24_acc3) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__avx_x32_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__avx_x32_acc4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__neon_x4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__neon_x8_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__neon_x12_acc3) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__neon_x16_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__neon_x16_acc4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__scalar_x1) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__scalar_x2_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__scalar_x3_acc3) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__scalar_x4_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__scalar_x4_acc4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__sse_x4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__sse_x8_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__sse_x12_acc3) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__sse_x16_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__sse_x16_acc4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__wasmsimd_x4) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__wasmsimd_x8_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__wasmsimd_x12_acc3) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__wasmsimd_x16_acc2) DECLARE_F32_RSUM_UKERNEL_FUNCTION(xnn_f32_rsum_ukernel__wasmsimd_x16_acc4) #ifdef __cplusplus } // extern "C" #endif
11,653
61.994595
88
h
XNNPACK
XNNPACK-master/src/xnnpack/requantization-stubs.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stdint.h> #include <stddef.h> #ifdef __cplusplus extern "C" { #endif typedef void (*xnn_qu8_requantization_fn)( size_t n, const int32_t* input, float scale, uint8_t zero_point, uint8_t qmin, uint8_t qmax, uint8_t* output); #define DECLARE_QU8_REQUANTIZATION_FUNCTION(fn_name) \ void fn_name( \ size_t n, \ const int32_t* input, \ float scale, \ uint8_t zero_point, \ uint8_t qmin, \ uint8_t qmax, \ uint8_t* output); DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__neon) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_fmagic) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_lrintf) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__sse2) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__wasmsimd) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__neon) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__scalar) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__sse2) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__sse41) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__ssse3) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__wasmsimd) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__neon) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__scalar_signed64) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__scalar_unsigned32) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__scalar_unsigned64) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__sse2) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__sse41) DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_rndna__ssse3) typedef void (*xnn_qs8_requantization_fn)( size_t n, const int32_t* input, float scale, int8_t zero_point, int8_t qmin, int8_t qmax, int8_t* output); #define DECLARE_QS8_REQUANTIZATION_FUNCTION(fn_name) \ void fn_name( \ size_t n, \ const int32_t* input, \ float scale, \ int8_t zero_point, \ int8_t qmin, \ int8_t qmax, \ int8_t* output); DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__neon) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_fmagic) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_lrintf) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__sse2) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__sse41) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__wasmsimd) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__neon) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__scalar) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__sse2) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__sse41) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__ssse3) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__wasmsimd) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__neon) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__scalar_signed64) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__scalar_unsigned32) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__scalar_unsigned64) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__sse2) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__sse41) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndna__ssse3) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndnu__neon_mull) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndnu__neon_qdmulh) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndnu__scalar) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndnu__sse41_sra) DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_rndnu__sse41_srl) #ifdef __cplusplus } // extern "C" #endif
4,731
41.630631
80
h
XNNPACK
XNNPACK-master/src/xnnpack/rmax.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F16_RMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const void* x, \ void* y); DECLARE_F16_RMAX_UKERNEL_FUNCTION(xnn_f16_rmax_ukernel__f16c) DECLARE_F16_RMAX_UKERNEL_FUNCTION(xnn_f16_rmax_ukernel__neonfp16arith) #define DECLARE_F32_RMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* x, \ float* y); DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__avx) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__avx512f) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__neon) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__scalar) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__sse) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_arm) DECLARE_F32_RMAX_UKERNEL_FUNCTION(xnn_f32_rmax_ukernel__wasmsimd_x86) #define DECLARE_U8_RMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* x, \ uint8_t* y); DECLARE_U8_RMAX_UKERNEL_FUNCTION(xnn_u8_rmax_ukernel__neon) DECLARE_U8_RMAX_UKERNEL_FUNCTION(xnn_u8_rmax_ukernel__scalar) DECLARE_U8_RMAX_UKERNEL_FUNCTION(xnn_u8_rmax_ukernel__sse2) #ifdef __cplusplus } // extern "C" #endif
1,873
30.233333
72
h
XNNPACK
XNNPACK-master/src/xnnpack/rmaxabs.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch_size, \ const int16_t* input, \ uint16_t* output); DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__neon_x8) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__neon_x16) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__neon_x24) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__neon_x32) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__scalar_x1) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__scalar_x2) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__scalar_x3) DECLARE_S16_RMAXABS_UKERNEL_FUNCTION(xnn_s16_rmaxabs_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
1,163
29.631579
72
h
XNNPACK
XNNPACK-master/src/xnnpack/spmm.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t mc, \ size_t nc, \ const float* input, \ const float* weights, \ const int32_t* widx_dmap, \ const uint32_t* nidx_nnzmap, \ float* output, \ size_t output_stride, \ const union xnn_f32_minmax_params* params); DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_1x1__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_1x1__scalar_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_2x1__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_2x1__scalar_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__sse) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x2__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x4__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__sse) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x2__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x2__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x4__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x4__scalar) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x1__neon) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x1__neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x2__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x4__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__sse) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x2__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x4__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__sse) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x2) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x2__aarch64_neonfma) DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x4__aarch64_neonfma) #define DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t mc, \ size_t nc, \ const void* input, \ const void* weights, \ const int32_t* widx_dmap, \ const uint32_t* nidx_nnzmap, \ void* output, \ size_t output_stride, \ const union xnn_f16_minmax_params* params); DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_pipelined) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_x2) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_pipelined) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_x2) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_pipelined) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_x2) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_pipelined) DECLARE_F16_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_x2) #ifdef __cplusplus } // extern "C" #endif
10,898
71.66
101
h
XNNPACK
XNNPACK-master/src/xnnpack/subgraph-validation.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <xnnpack.h> #include <xnnpack/subgraph.h> #ifdef __cplusplus extern "C" { #endif enum xnn_status xnn_subgraph_check_xnnpack_initialized(enum xnn_node_type node_type); enum xnn_status xnn_subgraph_check_input_node_id(enum xnn_node_type node_type, uint32_t input_id, size_t num_values); enum xnn_status xnn_subgraph_check_nth_input_node_id( enum xnn_node_type node_type, uint32_t input_id, size_t num_values, size_t nth); enum xnn_status xnn_subgraph_check_input_type_dense( enum xnn_node_type node_type, uint32_t input_id, const struct xnn_value* input_value); enum xnn_status xnn_subgraph_check_nth_input_type_dense( enum xnn_node_type node_type, uint32_t input_id, const struct xnn_value* input_value, size_t nth); enum xnn_status xnn_subgraph_check_output_node_id(enum xnn_node_type node_type, uint32_t output_id, size_t num_values); enum xnn_status xnn_subgraph_check_output_type_dense( enum xnn_node_type node_type, uint32_t output_id, const struct xnn_value* output_value); enum xnn_status xnn_subgraph_check_datatype_matches( enum xnn_node_type node_type, uint32_t input_id, const struct xnn_value* input_value, uint32_t output_id, const struct xnn_value* output_value); enum xnn_status xnn_subgraph_check_datatype_matches_two_inputs( enum xnn_node_type node_type, uint32_t input1_id, const struct xnn_value* input1_value, uint32_t input2_id, const struct xnn_value* input2_value, uint32_t output_id, const struct xnn_value* output_value); enum xnn_status xnn_subgraph_check_output_min_max(enum xnn_node_type node_type, float output_min, float output_max); enum xnn_status xnn_subgraph_check_quantization_parameter_matches( enum xnn_node_type node_type, uint32_t input_id, const struct xnn_value* input_value, uint32_t output_id, const struct xnn_value* output_value); // Check that two tensors have the same shape. enum xnn_status xnn_subgraph_check_all_dims_match( enum xnn_node_type node_type, uint32_t tensor1_id, const struct xnn_value* tensor1_value, uint32_t tensor2_id, const struct xnn_value* tensor2_value); #ifdef __cplusplus } // extern "C" #endif
2,332
32.328571
119
h
XNNPACK
XNNPACK-master/src/xnnpack/subgraph.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack.h> #include <xnnpack/common.h> #include <xnnpack/cache.h> #include <xnnpack/node-type.h> #if defined(EMSCRIPTEN) #include <emscripten/emscripten.h> #elif XNN_PLATFORM_WINDOWS #include <windows.h> #else #include <time.h> #endif #define XNN_MAX_INPUTS 4 #define XNN_MAX_OUTPUTS 4 #define XNN_MAX_RUNTIME_INPUTS 4 #define XNN_MAX_RUNTIME_OUTPUTS 4 #define XNN_INVALID_NODE_ID UINT32_MAX #define XNN_MAX_OPERATOR_OBJECTS 4 /// Disable fusion of nodes in subgraph. Fusion is enabled by default, set this flag to turn it off. #define XNN_FLAG_NO_OPERATOR_FUSION 0x80000000 #ifdef __cplusplus extern "C" { #endif struct xnn_shape { size_t num_dims; size_t dim[XNN_MAX_TENSOR_DIMS]; }; enum xnn_value_type { xnn_value_type_invalid = 0, xnn_value_type_dense_tensor = 1, }; enum xnn_layout_type { xnn_layout_type_nhwc = 0, xnn_layout_type_nchw = 1, }; enum xnn_allocation_type { xnn_allocation_type_invalid = 0, /// Static data that is provided by caller, needs to outlive the xnn_runtime. xnn_allocation_type_static, /// Lives in XNNPACK-managed internal workspace. xnn_allocation_type_workspace, /// Non-static data that is external to the runtime, provided by caller, specified in xnn_setup_runtime. xnn_allocation_type_external, // Persistent data is internal to XNNPACK-managed workspace, but shared by multiple runtime/subgraph. xnn_allocation_type_persistent, /// Data allocated dynamically and managed by XNNPACK, not part of workspace. xnn_allocation_type_dynamic, }; /// Abstraction for a collections of elements produced and consumed by nodes. struct xnn_value { /// Unique ID for the value. uint32_t id; /// Type of the collection of elements. /// /// Currently only dense tensors are supported. /// Other types (e.g. sparse tensors) might be supported in the future. enum xnn_value_type type; /// Type of elements in the collection. enum xnn_datatype datatype; /// Per-value quantization parameters. struct { /// Offset from zero of the quantized elements. int32_t zero_point; union { /// Multiplication factor to convert quantized elements to real representation. float scale; struct { /// Per-channel multiplication factor to convert quantized elements to real representation. const float* channelwise_scale; /// Index of the channel dimension with per-channel quantization parameters. size_t channel_dimension; }; }; } quantization; /// Tensor shape. struct xnn_shape shape; /// Size of tensor. size_t size; /// Type of allocation for this tensors' data. enum xnn_allocation_type allocation_type; /// Binary features of the tensor. Supported values are any combination of: /// - XNN_VALUE_FLAG_EXTERNAL_INPUT /// - XNN_VALUE_FLAG_EXTERNAL_OUTPUT /// - XNN_VALUE_FLAG_PERSISTENT uint32_t flags; /// Static initialization data. Must be null for non-static values. void* data; /// Index of the Subgraph node that produced the value, or XNN_INVALID_NODE_ID is the Value is an external input. uint32_t producer; /// Index of the first Node that consume the value, or XNN_INVALID_NODE_ID if the Value has no consumers within the /// graph (e.g. Value is an external output). uint32_t first_consumer; /// Number of Nodes that consume the value. /// If multiple inputs in a Node refer to this Value as input, the Node is counted as consumer multiple times. /// If the Value is an external output, it counts as having an extra consumer. uint32_t num_consumers; uint32_t num_nchw_compatible_consumers; enum xnn_layout_type layout; /// Set during analysis in xnn_subgraph_rewrite_for_fp16. /// Indicates that this value should be converted to FP16. bool fp16_compatible; /// Set during analysis in xnn_subgraph_rewrite_for_fp16. /// Indicates Value ID of the FP16 variant of this Value. uint32_t fp16_id; /// Set during analysis in xnn_subgraph_rewrite_for_fp16. /// Indicates Value ID of the FP32 variant of this Value. uint32_t fp32_id; /// Used during analysis in xnn_subgraph_rewrite_for_fp16. /// Temporary buffer to convert static data to FP16. void* fp16_temp_data; // Pointer to original fp32 data if this value was converted from fp32 to fp16 (only for static values). This is used // for nodes like Convolution, where the filter is expected to be kept as fp32, but could have been converted to fp16 // if another node (like Subtraction) also consumed the weights. // If NULL, no conversion to fp16 was done, use field `data`. // If not NULL, points to the original fp32 data, (which should be `data` before it was overwritten to point to // converted fp16 buffer. const void* fp32_data; }; XNN_INLINE bool xnn_value_is_external(const struct xnn_value* value) { return (value->flags & (XNN_VALUE_FLAG_EXTERNAL_INPUT | XNN_VALUE_FLAG_EXTERNAL_OUTPUT)) != 0; } XNN_INLINE bool xnn_value_is_external_output(const struct xnn_value* value) { return (value->flags & XNN_VALUE_FLAG_EXTERNAL_OUTPUT) != 0; } XNN_INLINE bool xnn_value_is_external_input(const struct xnn_value* value) { return (value->flags & XNN_VALUE_FLAG_EXTERNAL_INPUT) != 0; } XNN_INLINE bool xnn_value_is_internal(const struct xnn_value* value) { return ( (value->flags & (XNN_VALUE_FLAG_EXTERNAL_INPUT | XNN_VALUE_FLAG_EXTERNAL_OUTPUT | XNN_VALUE_FLAG_PERSISTENT)) == 0); } XNN_INLINE bool xnn_value_is_persistent(const struct xnn_value* value) { return value->allocation_type == xnn_allocation_type_persistent; } XNN_INLINE bool xnn_value_is_valid(const struct xnn_value* value) { return value->type != xnn_value_type_invalid; } XNN_INLINE bool xnn_value_is_static(const struct xnn_value* value) { return value->allocation_type == xnn_allocation_type_static; } struct xnn_node; struct xnn_operator_data; typedef enum xnn_status (*xnn_create_operator_fn)( const struct xnn_node* node, const struct xnn_value* values, size_t num_values, struct xnn_operator_data* opdata, struct xnn_code_cache* code_cache, struct xnn_weights_cache* weights_cache); typedef enum xnn_status (*xnn_reshape_operator_fn)( struct xnn_operator_data* opdata, const struct xnn_value* values, size_t num_values, pthreadpool_t threadpool); typedef enum xnn_status (*xnn_setup_operator_fn)( const struct xnn_operator_data* opdata, const struct xnn_value* values, size_t num_values, pthreadpool_t threadpool); enum xnn_compute_type { xnn_compute_type_invalid = 0, xnn_compute_type_fp32, xnn_compute_type_fp16, xnn_compute_type_qc8, xnn_compute_type_qs8, xnn_compute_type_qu8, xnn_compute_type_fp32_to_fp16, xnn_compute_type_fp32_to_qs8, xnn_compute_type_fp32_to_qu8, xnn_compute_type_fp16_to_fp32, xnn_compute_type_qs8_to_fp32, xnn_compute_type_qu8_to_fp32, }; struct xnn_node { enum xnn_node_type type; uint32_t id; enum xnn_compute_type compute_type; /// Static parameters of the operator node. union { struct { size_t axis; } concatenate; struct { uint32_t input_padding_top; uint32_t input_padding_right; uint32_t input_padding_bottom; uint32_t input_padding_left; uint32_t kernel_height; uint32_t kernel_width; uint32_t subsampling_height; uint32_t subsampling_width; uint32_t dilation_height; uint32_t dilation_width; uint32_t groups; size_t group_input_channels; size_t group_output_channels; } convolution_2d; struct { uint32_t padding_top; uint32_t padding_right; uint32_t padding_bottom; uint32_t padding_left; uint32_t adjustment_height; uint32_t adjustment_width; uint32_t kernel_height; uint32_t kernel_width; uint32_t upsampling_height; uint32_t upsampling_width; uint32_t dilation_height; uint32_t dilation_width; uint32_t groups; size_t group_input_channels; size_t group_output_channels; } deconvolution_2d; struct { uint32_t input_padding_top; uint32_t input_padding_right; uint32_t input_padding_bottom; uint32_t input_padding_left; uint32_t kernel_height; uint32_t kernel_width; uint32_t subsampling_height; uint32_t subsampling_width; uint32_t dilation_height; uint32_t dilation_width; uint32_t depth_multiplier; size_t input_channels; } depthwise_convolution_2d; struct { uint32_t block_size; } depth_to_space; struct { size_t axis; } even_split; struct { uint32_t padding_top; uint32_t padding_right; uint32_t padding_bottom; uint32_t padding_left; uint32_t pooling_height; uint32_t pooling_width; uint32_t stride_height; uint32_t stride_width; uint32_t dilation_height; uint32_t dilation_width; } pooling_2d; struct { float alpha; } elu; struct { float negative_slope; } leaky_relu; struct { size_t pre_paddings[XNN_MAX_TENSOR_DIMS]; size_t post_paddings[XNN_MAX_TENSOR_DIMS]; uint32_t padding_value; } static_pad; struct { struct xnn_shape new_shape; } static_reshape; struct { size_t new_height; size_t new_width; } static_resize; struct { size_t max_sequence_size; } rope; struct { size_t num_dims; size_t offsets[XNN_MAX_TENSOR_DIMS]; size_t sizes[XNN_MAX_TENSOR_DIMS]; } slice; struct { uint32_t block_size; } space_to_depth_2d; struct { size_t num_reduction_axes; size_t reduction_axes[XNN_MAX_TENSOR_DIMS]; } reduce; struct { size_t perm[XNN_MAX_TENSOR_DIMS]; size_t num_dims; } transpose; } params; struct { float output_min; float output_max; } activation; /// Value IDs for node inputs. uint32_t inputs[XNN_MAX_INPUTS]; uint32_t num_inputs; /// Value IDs for node outputs. uint32_t outputs[XNN_MAX_OUTPUTS]; uint32_t num_outputs; uint32_t flags; uint32_t layout_flags; uint32_t cluster_leader; // Number of filter parameters in all 1x1 Convolutions of the sparse cluster. // This value is properly initialized only in sparse inference analysis of 1x1 Convolutions. size_t num_params; // Number of zero filter parameters in all 1x1 Convolutions of the sparse cluster. // This value is properly initialized only in sparse inference analysis of 1x1 Convolutions. size_t num_zeroes; // Factory function to create an operator object from the node. xnn_create_operator_fn create; // Function to reshape an operator using opdata. xnn_reshape_operator_fn reshape; // Function to setup an operator using opdata. xnn_setup_operator_fn setup; }; #ifdef __MACH__ typedef uint64_t xnn_timestamp; #elif __EMSCRIPTEN__ typedef double xnn_timestamp; #elif XNN_PLATFORM_WINDOWS typedef LARGE_INTEGER xnn_timestamp; #else typedef struct timespec xnn_timestamp; #endif struct xnn_operator_data { enum xnn_node_type type; uint32_t id; xnn_operator_t operator_objects[XNN_MAX_OPERATOR_OBJECTS]; xnn_reshape_operator_fn reshape; xnn_setup_operator_fn setup; size_t batch_size; size_t sequence_size; size_t heads; size_t input_height; size_t input_width; size_t output_height; size_t output_width; size_t input_channels; size_t output_channels; struct xnn_shape shape1; struct xnn_shape shape2; size_t num_reduction_axes; size_t reduction_axes[XNN_MAX_TENSOR_DIMS]; size_t pre_paddings[XNN_MAX_TENSOR_DIMS]; size_t post_paddings[XNN_MAX_TENSOR_DIMS]; // TODO(zhin): merge this with pre_paddings/post_paddings to reduce size of this struct. size_t offsets[XNN_MAX_TENSOR_DIMS]; size_t sizes[XNN_MAX_TENSOR_DIMS]; uint32_t adjustment_height; uint32_t adjustment_width; uint32_t num_inputs; uint32_t inputs[XNN_MAX_RUNTIME_INPUTS]; uint32_t num_outputs; uint32_t outputs[XNN_MAX_RUNTIME_OUTPUTS]; xnn_timestamp end_ts[XNN_MAX_OPERATOR_OBJECTS]; void* workspace; size_t workspace_size; size_t workspace_alignment; }; struct xnn_subgraph { /// Number of Value IDs reserved for communication with external graph representation. /// Values created during subgraph transformation avoid using IDs in [0, reserved_value_ids-1] range. uint32_t external_value_ids; uint32_t num_reserved_values; uint32_t num_values; struct xnn_value* values; uint32_t num_reserved_nodes; uint32_t num_nodes; struct xnn_node* nodes; }; /// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values. struct xnn_runtime { uint32_t num_external_values; /// List of operators in the execution plan, in execution order. struct xnn_operator_data* opdata; /// Number of operators in the execution plan. size_t num_ops; struct xnn_value* values; size_t num_values; struct xnn_workspace* workspace; struct xnn_runtime* next_workspace_user; #if XNN_PLATFORM_JIT struct xnn_code_cache code_cache; #endif // XNN_PLATFORM_JIT pthreadpool_t threadpool; bool profiling; // The start timestamp of the first operator in the subgraph. This is set when profiling is true. xnn_timestamp start_ts; // True if runtime has ever been setup. If it has been setup, the pointers inside of opdata need to be updated if // workspace changes. bool has_been_setup; }; struct xnn_value* xnn_subgraph_new_internal_value(xnn_subgraph_t subgraph); struct xnn_node* xnn_subgraph_new_node(xnn_subgraph_t subgraph); enum xnn_status xnn_subgraph_add_nodes(xnn_subgraph_t subgraph, size_t num_nodes); size_t xnn_tensor_get_size(const struct xnn_value* value); size_t xnn_tensor_get_size_by_id(xnn_subgraph_t subgraph, uint32_t value_id); // Product of all shape dimensions size_t xnn_shape_multiply_all_dims( const struct xnn_shape shape[1]); // Product of all shape dimensions, except for the specified number of the last dimensions size_t xnn_shape_multiply_batch_dims( const struct xnn_shape shape[1], size_t num_nonbatch_dims); // Product of all shape dimensions, except for the last (channel) one size_t xnn_shape_multiply_non_channel_dims( const struct xnn_shape shape[1]); enum xnn_status xnn_subgraph_optimize(xnn_subgraph_t subgraph, uint32_t flags); void xnn_subgraph_rewrite_for_nchw(xnn_subgraph_t subgraph); // Rewrites subgraph for FP16, returns true if success, false if rewrite failed. bool xnn_subgraph_rewrite_for_fp16(xnn_subgraph_t subgraph); void xnn_node_clear(struct xnn_node* node); void xnn_value_clear(struct xnn_value* value); void xnn_value_copy(struct xnn_value* dst_value, const struct xnn_value* src_value); void xnn_init_convert_node( struct xnn_node* node, enum xnn_compute_type compute_type, uint32_t input_id, uint32_t output_id, uint32_t flags); struct xnn_workspace { void* data; size_t size; struct xnn_runtime* first_user; // Workspace will be destroyed in xnn_delete_runtime or xnn_delete_workspace if num_users reaches 0. size_t ref_count; size_t persistent_size; }; void xnn_subgraph_analyze_consumers_and_producers(xnn_subgraph_t subgraph); #ifdef __cplusplus } // extern "C" #endif
15,400
30.366599
120
h
XNNPACK
XNNPACK-master/src/xnnpack/transpose.h
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_XX_TRANSPOSEV_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const void* input, \ void* output, \ size_t input_row_stride, \ size_t output_row_stride, \ size_t input_element_stride, \ size_t output_element_stride, \ size_t element_size, \ size_t block_width, \ size_t block_height); DECLARE_XX_TRANSPOSEV_UKERNEL_FUNCTION(xnn_xx_transposev_ukernel__1x1_scalar_memcpy) #define DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const uint64_t* input, \ uint64_t* output, \ size_t input_stride, \ size_t output_stride, \ size_t block_width, \ size_t block_height, \ const union xnn_x64_transpose_params* params); DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__1x2_scalar_float) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__1x2_scalar_int) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x1_scalar_float) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x1_scalar_int) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_dec_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_mov_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_mov_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_multi_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_multi_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_switch_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_multi_switch_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_dec_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_mov_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_mov_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_multi_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_multi_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_switch_sse2) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_reuse_switch_zip_neon) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_scalar_float) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__2x2_scalar_int) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x1_scalar_float) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x1_scalar_int) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x2_scalar_float) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x2_scalar_int) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_multi_mov_avx) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_multi_multi_avx) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_multi_switch_avx) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_reuse_mov_avx) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_reuse_multi_avx) DECLARE_X64_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x64_transposec_ukernel__4x4_reuse_switch_avx) #define DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const uint32_t* input, \ uint32_t* output, \ size_t input_stride, \ size_t output_stride, \ size_t block_width, \ size_t block_height, \ const union xnn_x32_transpose_params* params); DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__1x2_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__1x2_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__1x4_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__1x4_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x1_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x1_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_multi_dec_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_multi_mov_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_multi_multi_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_multi_switch_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_reuse_dec_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_reuse_mov_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_reuse_multi_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_reuse_switch_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x2_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x4_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__2x4_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x1_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x1_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x2_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x2_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_aarch64_neon_tbl128) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_dec_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_mov_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_mov_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_mov_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_multi_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_multi_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_multi_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_switch_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_switch_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_multi_switch_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_dec_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_mov_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_mov_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_mov_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_multi_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_multi_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_multi_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_switch_sse2) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_switch_wasmsimd) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_reuse_switch_zip_neon) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_scalar_float) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_scalar_int) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__4x4_sse) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__8x8_multi_mov_avx) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__8x8_multi_switch_avx) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__8x8_reuse_mov_avx) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__8x8_reuse_multi_avx) DECLARE_X32_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x32_transposec_ukernel__8x8_reuse_switch_avx) #define DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const void* input, \ void* output, \ size_t input_stride, \ size_t output_stride, \ size_t block_width, \ size_t block_height, \ const union xnn_x24_transpose_params* params); DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__1x2_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__1x4_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__2x1_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__2x2_neon_tbl64) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__2x2_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__2x4_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__4x1_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__4x2_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__4x4_aarch64_neon_tbl128) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__4x4_scalar) DECLARE_X24_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x24_transposec_ukernel__4x4_ssse3) #define DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const uint16_t* input, \ uint16_t* output, \ size_t input_stride, \ size_t output_stride, \ size_t block_width, \ size_t block_height, \ const union xnn_x16_transpose_params* params); DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__1x2_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__1x4_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__2x1_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__2x2_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__2x4_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x1_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x2_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_multi_dec_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_multi_mov_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_multi_multi_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_multi_switch_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_reuse_mov_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_reuse_multi_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_reuse_switch_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x4_scalar_int) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__4x8_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_mov_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_switch_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_switch_wasmsimd) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_multi_switch_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_multi_wasmsimd) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_multi_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_switch_sse2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_switch_wasmsimd) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__8x8_reuse_switch_zip_neon) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__16x16_reuse_mov_avx2) DECLARE_X16_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x16_transposec_ukernel__16x16_reuse_switch_avx2) #define DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ const uint8_t* input, \ uint8_t* output, \ size_t input_stride, \ size_t output_stride, \ size_t block_width, \ size_t block_height, \ const union xnn_x8_transpose_params* params); DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__1x2_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__1x4_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__2x1_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__2x2_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__2x4_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__4x1_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__4x2_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__4x4_scalar_int) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_multi_switch_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_reuse_multi_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__8x8_reuse_switch_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_switch_sse2) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_switch_wasmsimd) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__16x16_reuse_switch_zip_neon) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__32x32_reuse_mov_avx2) DECLARE_X8_TRANSPOSEC_UKERNEL_FUNCTION(xnn_x8_transposec_ukernel__32x32_reuse_switch_avx2) #ifdef __cplusplus } // extern "C" #endif
17,178
69.987603
94
h
XNNPACK
XNNPACK-master/src/xnnpack/unaligned.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> XNN_INLINE static uint16_t unaligned_load_u16(const void* address) { typedef XNN_UNALIGNED uint16_t xnn_unaligned_uint16_t; return *((const xnn_unaligned_uint16_t*) address); } XNN_INLINE static float unaligned_load_f32(const void* address) { typedef XNN_UNALIGNED float xnn_unaligned_float; return *((const xnn_unaligned_float*) address); } XNN_INLINE static int32_t unaligned_load_s32(const void* address) { typedef XNN_UNALIGNED int32_t xnn_unaligned_int32_t; return *((const xnn_unaligned_int32_t*) address); } XNN_INLINE static uint32_t unaligned_load_u32(const void* address) { typedef XNN_UNALIGNED uint32_t xnn_unaligned_uint32_t; return *((const xnn_unaligned_uint32_t*) address); } XNN_INLINE static float unaligned_indexed_load_f32(const void* address, size_t index) { typedef XNN_UNALIGNED float xnn_unaligned_float; return ((const xnn_unaligned_float*) address)[index]; } XNN_INLINE static int32_t unaligned_indexed_load_s32(const void* address, size_t index) { typedef XNN_UNALIGNED int32_t xnn_unaligned_int32_t; return ((const xnn_unaligned_int32_t*) address)[index]; } XNN_INLINE static uint32_t unaligned_indexed_load_u32(const void* address, size_t index) { typedef XNN_UNALIGNED uint32_t xnn_unaligned_uint32_t; return ((const xnn_unaligned_uint32_t*) address)[index]; } XNN_INLINE static void unaligned_store_u16(void* address, uint16_t value) { typedef XNN_UNALIGNED uint16_t xnn_unaligned_uint16_t; *((xnn_unaligned_uint16_t*) address) = value; } XNN_INLINE static void unaligned_store_f32(void* address, float value) { typedef XNN_UNALIGNED float xnn_unaligned_float; *((xnn_unaligned_float*) address) = value; } XNN_INLINE static void unaligned_store_s32(void* address, int32_t value) { typedef XNN_UNALIGNED int32_t xnn_unaligned_int32_t; *((xnn_unaligned_int32_t*) address) = value; } XNN_INLINE static void unaligned_store_u32(void* address, uint32_t value) { typedef XNN_UNALIGNED uint32_t xnn_unaligned_uint32_t; *((xnn_unaligned_uint32_t*) address) = value; } XNN_INLINE static void unaligned_indexed_store_f32(void* address, size_t index, float value) { typedef XNN_UNALIGNED float xnn_unaligned_float; ((xnn_unaligned_float*) address)[index] = value; } XNN_INLINE static void unaligned_indexed_store_s32(void* address, size_t index, int32_t value) { typedef XNN_UNALIGNED int32_t xnn_unaligned_int32_t; ((xnn_unaligned_int32_t*) address)[index] = value; } XNN_INLINE static void unaligned_indexed_store_u32(void* address, size_t index, uint32_t value) { typedef XNN_UNALIGNED uint32_t xnn_unaligned_uint32_t; ((xnn_unaligned_uint32_t*) address)[index] = value; } XNN_INLINE static void unaligned_indexed_store_u16(void* address, size_t index, uint16_t value) { typedef XNN_UNALIGNED uint16_t xnn_unaligned_uint16_t; ((xnn_unaligned_uint16_t*) address)[index] = value; }
3,127
34.545455
97
h
XNNPACK
XNNPACK-master/src/xnnpack/unpool.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X32_UNPOOL_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t p, \ size_t c, \ uint32_t f, \ const uint32_t* input, \ const uint32_t* index, \ uint32_t** output); DECLARE_X32_UNPOOL_UKERNEL_FUNCTION(xnn_x32_unpool_ukernel__neon) DECLARE_X32_UNPOOL_UKERNEL_FUNCTION(xnn_x32_unpool_ukernel__scalar) DECLARE_X32_UNPOOL_UKERNEL_FUNCTION(xnn_x32_unpool_ukernel__sse2) DECLARE_X32_UNPOOL_UKERNEL_FUNCTION(xnn_x32_unpool_ukernel__wasmsimd) #ifdef __cplusplus } // extern "C" #endif
1,015
27.222222
72
h
XNNPACK
XNNPACK-master/src/xnnpack/vadd.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* input_a, \ const uint8_t* input_b, \ uint8_t* output, \ const union xnn_qu8_add_minmax_params* params); DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld64_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld128_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx2_mul32_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx2_mul32_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx512skx_mul32_ld128_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx512skx_mul32_ld128_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__wasmsimd_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__wasmsimd_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__wasmsimd_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x1) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x2) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x4) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld128_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx512skx_mul32_ld128_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx512skx_mul32_ld128_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x8) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x16) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x32) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x1) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x2) DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x4) #define DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const int8_t* input_a, \ const int8_t* input_b, \ int8_t* output, \ const union xnn_qs8_add_minmax_params* params); DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld128_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__neon_ld128_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__sse41_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx512skx_mul32_ld128_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx512skx_mul32_ld128_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__scalar_x1) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__scalar_x2) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__scalar_x4) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld128_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__neon_ld128_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul16_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__sse41_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx512skx_mul32_ld128_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx512skx_mul32_ld128_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x8) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x16) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x24) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x32) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__scalar_x1) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__scalar_x2) DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
13,620
58.480349
97
h
XNNPACK
XNNPACK-master/src/xnnpack/vhswish.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const int8_t* input, \ int8_t* output, \ const union xnn_qs8_hswish_params* params); DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__neon_x8) DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__neon_x16) DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__neon_x32) DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__scalar_x1) DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__scalar_x2) DECLARE_QS8_VHSWISH_UKERNEL_FUNCTION(xnn_qs8_vhswish_ukernel__scalar_x4) #define DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* input, \ uint8_t* output, \ const union xnn_qu8_hswish_params* params); DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__neon_x8) DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__neon_x16) DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__neon_x32) DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__scalar_x1) DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__scalar_x2) DECLARE_QU8_VHSWISH_UKERNEL_FUNCTION(xnn_qu8_vhswish_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
1,901
36.294118
72
h
XNNPACK
XNNPACK-master/src/xnnpack/vlog.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_U32_VLOG_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch_size, \ const uint32_t* input, \ uint32_t input_lshift, \ uint32_t output_scale, \ uint16_t* output); DECLARE_U32_VLOG_UKERNEL_FUNCTION(xnn_u32_vlog_ukernel__scalar_x1) DECLARE_U32_VLOG_UKERNEL_FUNCTION(xnn_u32_vlog_ukernel__scalar_x2) DECLARE_U32_VLOG_UKERNEL_FUNCTION(xnn_u32_vlog_ukernel__scalar_x3) DECLARE_U32_VLOG_UKERNEL_FUNCTION(xnn_u32_vlog_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
945
26.028571
72
h
XNNPACK
XNNPACK-master/src/xnnpack/vlrelu.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const int8_t* input, \ int8_t* output, \ const union xnn_qs8_lrelu_params* params); DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__neon_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__neon_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__neon_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__sse2_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__sse2_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__ssse3_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__ssse3_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__sse41_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__sse41_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__sse41_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx2_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx2_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__avx2_x64) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmsimd_arm_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmsimd_arm_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x16) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__armsimd32_x4) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__armsimd32_x8) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_andxor_x1) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_andxor_x2) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_andxor_x4) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_select_x1) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_select_x2) DECLARE_QS8_VLRELU_UKERNEL_FUNCTION(xnn_qs8_vlrelu_ukernel__scalar_select_x4) #define DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* input, \ uint8_t* output, \ const union xnn_qu8_lrelu_params* params); DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__neon_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__neon_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__neon_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__sse2_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__sse2_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__ssse3_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__ssse3_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__sse41_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__sse41_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__sse41_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx2_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx2_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__avx2_x64) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmsimd_arm_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmsimd_arm_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x16) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__armsimd32_x4) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__armsimd32_x8) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_andxor_x1) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_andxor_x2) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_andxor_x4) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_select_x1) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_select_x2) DECLARE_QU8_VLRELU_UKERNEL_FUNCTION(xnn_qu8_vlrelu_ukernel__scalar_select_x4) #ifdef __cplusplus } // extern "C" #endif
6,140
45.522727
84
h
XNNPACK
XNNPACK-master/src/xnnpack/vlshift.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch, \ const uint16_t* input, \ uint16_t* output, \ uint32_t shift); DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__neon_x8) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__neon_x16) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__neon_x24) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__neon_x32) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__scalar_x1) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__scalar_x2) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__scalar_x3) DECLARE_I16_VLSHIFT_UKERNEL_FUNCTION(xnn_i16_vlshift_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
1,217
30.230769
72
h
XNNPACK
XNNPACK-master/src/xnnpack/vmul.h
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* input_a, \ const uint8_t* input_b, \ uint8_t* output, \ const union xnn_qu8_mul_minmax_params* params); DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x1) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld128_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x1) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2) DECLARE_QU8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4) #define DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const int8_t* input_a, \ const int8_t* input_b, \ int8_t* output, \ const union xnn_qs8_mul_minmax_params* params); DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x1) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld128_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x1) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2) DECLARE_QS8_VMUL_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
8,668
55.292208
100
h
XNNPACK
XNNPACK-master/src/xnnpack/vmulcaddc.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t m, \ size_t c, \ const float* x, \ size_t x_stride, \ const float* w, \ float* y, \ size_t y_stride, \ const union xnn_f32_minmax_params* params); DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__neon_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__neon_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__neonfma_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__neonfma_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__sse_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__sse_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_arm_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_arm_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_x86_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmrelaxedsimd_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmrelaxedsimd_fma_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_fma_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c1__wasm_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c2__wasm_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__wasm_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c1__scalar_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c2__scalar_2x) DECLARE_F32_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f32_vmulcaddc_minmax_ukernel_c4__scalar_2x) #define DECLARE_F16_VMULCADDC_MINMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t m, \ size_t c, \ const void* x, \ size_t x_stride, \ const void* w, \ void* y, \ size_t y_stride, \ const union xnn_f16_minmax_params* params); DECLARE_F16_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f16_vmulcaddc_minmax_ukernel_c8__neonfp16arith_2x) DECLARE_F16_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f16_vmulcaddc_minmax_ukernel_c16__neonfp16arith_2x) DECLARE_F16_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f16_vmulcaddc_minmax_ukernel_c8__fma3_2x) DECLARE_F16_VMULCADDC_MINMAX_UKERNEL_FUNCTION(xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x) #ifdef __cplusplus } // extern "C" #endif
3,971
49.278481
106
h
XNNPACK
XNNPACK-master/src/xnnpack/vscaleexpminusmax.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* input, \ float* output, \ float scale, \ float max); DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x24) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x32) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x40) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x48) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x56) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x64) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x112) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x128) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x144) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x160) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x176) DECLARE_F32_VSCALEEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x192) #ifdef __cplusplus } /* extern "C" */ #endif
3,156
57.462963
105
h
XNNPACK
XNNPACK-master/src/xnnpack/vscaleextexp.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const float* input, \ float* output, \ float scale_mantissa, \ float scale_exponent); DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x8) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x16) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x24) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x32) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x40) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x48) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x56) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x64) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x72) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x80) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x88) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx2_p5_x96) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x176) DECLARE_F32_VSCALEEXTEXP_UKERNEL_FUNCTION(xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x192) #ifdef __cplusplus } /* extern "C" */ #endif
2,897
52.666667
95
h
XNNPACK
XNNPACK-master/src/xnnpack/vsquareabs.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t batch_size, \ const int16_t* input, \ uint32_t* output); DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__scalar_x1) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__scalar_x2) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__scalar_x3) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__scalar_x4) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x4) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x8) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x12) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x16) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x2) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x4) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x6) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x8) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x10) DECLARE_CS16_VSQUAREABS_UKERNEL_FUNCTION(xnn_cs16_vsquareabs_ukernel__hexagon_x12) #ifdef __cplusplus } // extern "C" #endif
1,781
38.6
90
h
XNNPACK
XNNPACK-master/src/xnnpack/wasmsimd-gemm-igemm-loadsplat-commons.h
#include <algorithm> #include <wasm_simd128.h> #include <xnnpack/microparams.h> #include <xnnpack/post-operation.h> #include <xnnpack/wasm-assembler.h> namespace xnnpack { namespace internal { class PostOps : public WasmAssembler { public: using WasmAssembler::WasmAssembler; void InitPostOps(const jit_gemm_params* jit_gemm_params, Local& params) { InitClampLimit(jit_gemm_params->f32_minmax.min, jit_gemm_params->f32_minmax.max); InitNonClampPostOps(jit_gemm_params->post_operations, jit_gemm_params->num_post_operations, params); } void ApplyPostOps(LocalsArray& values) { Clamp(values); ApplyNonClampPostOps(values); } private: Local MakeV128Load64Splat(const Local& address, uint32_t offset) { return MakeLocal(V128Load64Splat(address, offset)); } void InitClampLimit(float min, float max) { clamps_consts_.clamp_min = min != -std::numeric_limits<float>::infinity(); clamps_consts_.clamp_max = max != +std::numeric_limits<float>::infinity(); if (clamps_consts_.clamp_min) { clamps_consts_.vmin = MakeLocal(V128Const(min)); } if (clamps_consts_.clamp_max) { clamps_consts_.vmax = MakeLocal(V128Const(max)); } } void InitNonClampPostOps(const xnn_post_operation* ops, size_t num_ops, Local& params) { ops_ = ops; num_ops_ = num_ops; for (size_t i = 0; i < num_ops; i++) { switch (ops[i].op_type) { case xnn_post_operation_type_hardswish: hswish_consts_.vsixth = MakeV128Load64Splat(params, /*offset=*/0); hswish_consts_.vthree = MakeV128Load64Splat(params, /*offset=*/2 * sizeof(float)); hswish_consts_.vsix = MakeV128Load64Splat(params, /*offset=*/4 * sizeof(float)); hswish_consts_.vzero = MakeLocal(F32x4Splat(F32Const(0))); break; default: XNN_UNREACHABLE; } params = I32Add(params, I32Const(6 * sizeof(float))); } } void Clamp(Local& value) { if (clamps_consts_.clamp_max) { value = F32x4Pmin(clamps_consts_.vmax, value); } if (clamps_consts_.clamp_min) { value = F32x4Pmax(clamps_consts_.vmin, value); } } void Clamp(LocalsArray& values) { for (auto& value : values) Clamp(value); } void ApplyNonClampPostOps(Local& v) { for (size_t i = 0; i < num_ops_; i++) { switch (ops_[i].op_type) { case xnn_post_operation_type_hardswish: Hswish(v); break; default: XNN_UNREACHABLE; } } } void ApplyNonClampPostOps(LocalsArray& vs) { for (auto& v : vs) ApplyNonClampPostOps(v); } void Hswish(Local& v) { Local vacc = MakeLocal(F32x4Add(v, hswish_consts_.vthree)); v = F32x4Mul(v, hswish_consts_.vsixth); vacc = F32x4Pmax(vacc, hswish_consts_.vzero); vacc = F32x4Pmin(vacc, hswish_consts_.vsix); v = F32x4Mul(vacc, v); } struct HswishConsts { Local vsixth; Local vsix; Local vthree; Local vzero; }; struct ClampConsts { bool clamp_min{}; bool clamp_max{}; Local vmin; Local vmax; }; const xnn_post_operation* ops_ = nullptr; size_t num_ops_{}; HswishConsts hswish_consts_; ClampConsts clamps_consts_; }; class GemmIGemmLoadsplatCommons : public PostOps { public: using PostOps::PostOps; void InitAccumulators(LocalsArray& vaccs, const Local& w, size_t offset) { vaccs[0] = V128Load(w, offset); std::for_each(std::next(std::begin(vaccs)), std::end(vaccs), [&](auto& vacc) { vacc = vaccs[0]; }); } void InnerLoop(LocalsArray& as, LocalsArray& vacc0123, LocalsArray& vacc4567, Local& w, Local& kc, size_t max_mr, size_t loop_unroll_iters) { Local k = MakeLocal(kc); InnerLoopMainPart(as, vacc0123, vacc4567, w, k, max_mr, loop_unroll_iters); const size_t max_iters_left = loop_unroll_iters - 1; size_t mask = max_iters_left > 0 ? (1 << static_cast<size_t>(log2(max_iters_left))) : 0; if (max_iters_left > 0) { If([&] { I32NeZ(k); }, [&] { while (mask > 0) { If([&] { I32GeU(k, I32Const(mask * sizeof(float))); }, [&] { InnerLoopBody(as, vacc0123, vacc4567, w, k, max_mr, mask); }); mask >>= 1; } }); } } private: void InnerLoopBody(LocalsArray& as, LocalsArray& vacc0123, LocalsArray& vacc4567, Local& w, Local& k, size_t max_mr, size_t loop_unroll_iters) { for (size_t unrolled_iter = 0; unrolled_iter < loop_unroll_iters; unrolled_iter++) { const auto vb0123 = MakeLocal(V128Load(w, /*offset=*/(2 * unrolled_iter) * sizeof(v128_t))); const auto vb4567 = MakeLocal(V128Load(w, /*offset=*/(2 * unrolled_iter + 1) * sizeof(v128_t))); for (size_t i = 0; i < max_mr; i++) { const auto va = MakeLocal(V128Load32Splat(as[i])); vacc0123[i] = F32x4Add(vacc0123[i], F32x4Mul(va, vb0123)); vacc4567[i] = F32x4Add(vacc4567[i], F32x4Mul(va, vb4567)); as[i] = I32Add(as[i], I32Const(sizeof(float))); } } w = I32Add(w, I32Const(8 * loop_unroll_iters * sizeof(float))); k = I32Sub(k, I32Const(loop_unroll_iters * sizeof(float))); } void InnerLoopMainPart(LocalsArray& as, LocalsArray& vacc0123, LocalsArray& vacc4567, Local& w, Local& k, size_t max_mr, size_t loop_unroll_iters) { const auto body = [&] { InnerLoopBody(as, vacc0123, vacc4567, w, k, max_mr, loop_unroll_iters); }; if (loop_unroll_iters == 1) { DoWhile(body, [&] { I32NeZ(k); }); } else { While([&] { I32GeU(k, I32Const(loop_unroll_iters * sizeof(float))); }, body); } } }; } // namespace internal } // namespace xnnpack
5,676
31.255682
118
h
XNNPACK
XNNPACK-master/src/xnnpack/window.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_S16_WINDOW_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t rows, \ size_t batch_size, \ const int16_t* input, \ const int16_t* weights, \ int16_t* output, \ uint32_t shift); DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__neon_x8) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__neon_x16) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__neon_x24) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__neon_x32) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift12_ukernel__neon_x8) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift12_ukernel__neon_x16) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift12_ukernel__neon_x24) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift12_ukernel__neon_x32) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift15_ukernel__neon_x8) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift15_ukernel__neon_x16) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift15_ukernel__neon_x24) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_shift15_ukernel__neon_x32) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__scalar_x1) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__scalar_x2) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__scalar_x3) DECLARE_S16_WINDOW_UKERNEL_FUNCTION(xnn_s16_window_ukernel__scalar_x4) #ifdef __cplusplus } // extern "C" #endif
1,930
36.862745
77
h
XNNPACK
XNNPACK-master/src/xnnpack/zerob.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stdint.h> #include <stddef.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t groups, \ size_t channels, \ uint32_t* packed_weights, \ size_t channel_tile_stride, \ size_t channel_subtile_stride, \ const union xnn_x32_packb_params* params); \ DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_2c1s1r__scalar_float) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_2c1s1r__scalar_int) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_2c2s1r__scalar_float) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_2c2s1r__scalar_int) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_4c1s1r__scalar_float) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_4c1s1r__scalar_int) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_4c4s1r__scalar_float) DECLARE_X32_ZEROB_GEMM_UKERNEL_FUNCTION(xnn_x32_zerob_gemm_ukernel_4c4s1r__scalar_int) #ifdef __cplusplus } // extern "C" #endif
1,543
38.589744
88
h
XNNPACK
XNNPACK-master/src/xnnpack/zip.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #ifdef __cplusplus extern "C" { #endif #define DECLARE_X8_ZIPC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint8_t* x, \ uint8_t* y); DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x2_ukernel__neon) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x2_ukernel__scalar) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x2_ukernel__sse2) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x3_ukernel__neon) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x3_ukernel__scalar) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x3_ukernel__sse2) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x4_ukernel__neon) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x4_ukernel__scalar) DECLARE_X8_ZIPC_UKERNEL_FUNCTION(xnn_x8_zip_x4_ukernel__sse2) #define DECLARE_X32_ZIPC_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ const uint32_t* x, \ uint32_t* y); DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x2_ukernel__neon) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x2_ukernel__scalar) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x2_ukernel__sse2) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x2_ukernel__wasmsimd) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x3_ukernel__neon) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x3_ukernel__scalar) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x3_ukernel__sse2) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x3_ukernel__wasmsimd) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x4_ukernel__neon) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x4_ukernel__scalar) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x4_ukernel__sse2) DECLARE_X32_ZIPC_UKERNEL_FUNCTION(xnn_x32_zip_x4_ukernel__wasmsimd) #define DECLARE_X8_ZIPV_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ size_t m, \ const uint8_t* x, \ uint8_t* y); DECLARE_X8_ZIPV_UKERNEL_FUNCTION(xnn_x8_zip_xm_ukernel__neon) DECLARE_X8_ZIPV_UKERNEL_FUNCTION(xnn_x8_zip_xm_ukernel__scalar) DECLARE_X8_ZIPV_UKERNEL_FUNCTION(xnn_x8_zip_xm_ukernel__sse2) #define DECLARE_X32_ZIPV_UKERNEL_FUNCTION(fn_name) \ XNN_INTERNAL void fn_name( \ size_t n, \ size_t m, \ const uint32_t* x, \ uint32_t* y); DECLARE_X32_ZIPV_UKERNEL_FUNCTION(xnn_x32_zip_xm_ukernel__neon) DECLARE_X32_ZIPV_UKERNEL_FUNCTION(xnn_x32_zip_xm_ukernel__scalar) DECLARE_X32_ZIPV_UKERNEL_FUNCTION(xnn_x32_zip_xm_ukernel__sse2) DECLARE_X32_ZIPV_UKERNEL_FUNCTION(xnn_x32_zip_xm_ukernel__wasmsimd) #ifdef __cplusplus } // extern "C" #endif
3,247
36.767442
72
h
XNNPACK
XNNPACK-master/src/xx-fill/xx-fill-neon-x64.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/fill.h> void xnn_xx_fill_ukernel__neon_x64( size_t rows, size_t channels, void* output, size_t output_stride, const uint32_t fill_pattern) { assert(rows != 0); assert(channels != 0); const size_t output_increment = output_stride - channels; const uint8x16_t vfill_pattern = vreinterpretq_u8_u32(vdupq_n_u32(fill_pattern)); do { size_t c = channels; for (; c >= 64 * sizeof(uint8_t); c -= 64 * sizeof(uint8_t)) { vst1q_u8(output, vfill_pattern); output = ((uint8_t*) output + 16); vst1q_u8(output, vfill_pattern); output = ((uint8_t*) output + 16); vst1q_u8(output, vfill_pattern); output = ((uint8_t*) output + 16); vst1q_u8(output, vfill_pattern); output = ((uint8_t*) output + 16); } for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { vst1q_u8(output, vfill_pattern); output = ((uint8_t*) output + 16); } if XNN_UNLIKELY(c != 0) { if XNN_LIKELY(c & (8 * sizeof(uint8_t))) { vst1_u8(output, vget_low_u8(vfill_pattern)); output = ((uint8_t*) output + 8); } if XNN_LIKELY(c & (4 * sizeof(uint8_t))) { vst1q_lane_u32(output, vreinterpretq_u32_u8(vfill_pattern), 0); output = ((uint8_t*) output + 4); } uint8x8_t vfill_subpattern = vget_low_u8(vfill_pattern); if XNN_LIKELY(c & (2 * sizeof(uint8_t))) { vst1_lane_u16(output, vreinterpret_u16_u8(vfill_subpattern), 0); output = ((uint8_t*) output + 2); vfill_subpattern = vext_u8(vfill_subpattern, vfill_subpattern, 2); } if XNN_LIKELY(c & (1 * sizeof(uint8_t))) { vst1_lane_u8(output, vfill_subpattern, 0); output = ((uint8_t*) output + 1); } } output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
2,006
34.839286
106
c
XNNPACK
XNNPACK-master/src/xx-fill/xx-fill-scalar-x16.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/fill.h> #include <xnnpack/unaligned.h> void xnn_xx_fill_ukernel__scalar_x16( size_t rows, size_t channels, void* output, size_t output_stride, const uint32_t fill_pattern) { assert(rows != 0); assert(channels != 0); const size_t output_increment = output_stride - channels; do { uint32_t vfill_pattern = fill_pattern; size_t c = channels; for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { unaligned_indexed_store_u32(output, 0, vfill_pattern); unaligned_indexed_store_u32(output, 1, vfill_pattern); unaligned_indexed_store_u32(output, 2, vfill_pattern); unaligned_indexed_store_u32(output, 3, vfill_pattern); output = ((uint8_t*) output + 16); } if XNN_UNLIKELY(c != 0) { if XNN_LIKELY(c & (8 * sizeof(uint8_t))) { unaligned_indexed_store_u32(output, 0, vfill_pattern); unaligned_indexed_store_u32(output, 1, vfill_pattern); output = ((uint8_t*) output + 8); } if XNN_LIKELY(c & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, vfill_pattern); output = ((uint8_t*) output + 4); } if XNN_LIKELY(c & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_pattern); vfill_pattern >>= 16; output = ((uint8_t*) output + 2); } if XNN_LIKELY(c & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_pattern; output = ((uint8_t*) output + 1); } } output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
1,789
30.403509
72
c
XNNPACK
XNNPACK-master/src/xx-fill/xx-fill-sse2-x64.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/fill.h> #include <xnnpack/unaligned.h> void xnn_xx_fill_ukernel__sse2_x64( size_t rows, size_t channels, void* output, size_t output_stride, const uint32_t fill_pattern) { assert(rows != 0); assert(channels != 0); const size_t output_increment = output_stride - channels; const __m128i vfill = _mm_shuffle_epi32(_mm_cvtsi32_si128(fill_pattern), _MM_SHUFFLE(0, 0, 0, 0)); do { size_t c = channels; for (; c >= 64 * sizeof(uint8_t); c -= 64 * sizeof(uint8_t)) { _mm_storeu_si128((__m128i*) output, vfill); _mm_storeu_si128((__m128i*) output + 1, vfill); _mm_storeu_si128((__m128i*) output + 2, vfill); _mm_storeu_si128((__m128i*) output + 3, vfill); output = ((uint8_t*) output + 64); } for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { _mm_storeu_si128((__m128i*) output, vfill); output = ((uint8_t*) output + 16); } if XNN_UNLIKELY(c != 0) { if XNN_LIKELY(c & (8 * sizeof(uint8_t))) { _mm_storel_epi64(output, vfill); output = ((uint8_t*) output + 8); } if XNN_LIKELY(c & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, fill_pattern); output = ((uint8_t*) output + 4); } uint32_t vfill_subpattern = fill_pattern; if XNN_LIKELY(c & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_subpattern); vfill_subpattern >>= 16; output = ((uint8_t*) output + 2); } if XNN_LIKELY(c & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = ((uint8_t*) output + 1); } } output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
1,972
30.31746
100
c
XNNPACK
XNNPACK-master/src/xx-fill/xx-fill-wasmsimd-x64.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/fill.h> #include <xnnpack/unaligned.h> void xnn_xx_fill_ukernel__wasmsimd_x64( size_t rows, size_t channels, void* output, size_t output_stride, const uint32_t fill_pattern) { assert(rows != 0); assert(channels != 0); const size_t output_increment = output_stride - channels; const v128_t vfill_pattern = wasm_i32x4_splat(fill_pattern); do { size_t c = channels; for (; c >= 64 * sizeof(uint8_t); c -= 64 * sizeof(uint8_t)) { wasm_v128_store(output, vfill_pattern); wasm_v128_store((uint8_t*) output + 16, vfill_pattern); wasm_v128_store((uint8_t*) output + 32, vfill_pattern); wasm_v128_store((uint8_t*) output + 48, vfill_pattern); output = ((uint8_t*) output + 64); } for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { wasm_v128_store(output, vfill_pattern); output = ((uint8_t*) output + 16); } if XNN_UNLIKELY(c != 0) { if XNN_LIKELY(c & (8 * sizeof(uint8_t))) { wasm_v128_store64_lane(output, vfill_pattern, 0); output = ((uint8_t*) output + 8); } if XNN_LIKELY(c & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, fill_pattern); output = ((uint8_t*) output + 4); } uint32_t vfill_subpattern = fill_pattern; if XNN_LIKELY(c & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_subpattern); vfill_subpattern >>= 16; output = ((uint8_t*) output + 2); } if XNN_LIKELY(c & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = ((uint8_t*) output + 1); } } output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
1,974
30.349206
72
c
XNNPACK
XNNPACK-master/src/xx-pad/xx-pad-neon.c
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/pad.h> void xnn_xx_pad_ukernel__neon( size_t rows, size_t channels, size_t pre_padding, size_t post_padding, const void* input, size_t input_stride, void* output, size_t output_stride, uint32_t fill_pattern) XNN_OOB_READS { const size_t input_increment = input_stride - channels; const size_t output_increment = output_stride - (pre_padding + channels + post_padding); const uint8x16_t vfill_pattern = vreinterpretq_u8_u32(vdupq_n_u32(fill_pattern)); do { // Pre-pad input channels. size_t l = pre_padding; if XNN_LIKELY(l != 0) { for (; l >= 16 * sizeof(uint8_t); l -= 16 * sizeof(uint8_t)) { vst1q_u8(output, vfill_pattern); output = (uint8_t*) output + 16; } if (l & (8 * sizeof(uint8_t))) { vst1_u8(output, vget_low_u8(vfill_pattern)); output = (uint8_t*) output + 8; } if (l & (4 * sizeof(uint8_t))) { vst1q_lane_u32(output, vreinterpretq_u32_u8(vfill_pattern), 0); output = (uint8_t*) output + 4; } uint8x8_t vfill_subpattern = vget_low_u8(vfill_pattern); if (l & (2 * sizeof(uint8_t))) { vst1_lane_u16(output, vreinterpret_u16_u8(vfill_subpattern), 0); output = (uint8_t*) output + 2; vfill_subpattern = vext_u8(vfill_subpattern, vfill_subpattern, 2); } if (l & (1 * sizeof(uint8_t))) { vst1_lane_u8(output, vfill_subpattern, 0); output = (uint8_t*) output + 1; } } // Copy input channels. size_t c = channels; for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { const uint8x16_t vdata = vld1q_u8(input); input = (const uint8_t*) input + 16; vst1q_u8(output, vdata); output = (uint8_t*) output + 16; } if XNN_UNLIKELY(c != 0) { uint8x16_t vdata = vld1q_u8(input); input = (const void*) ((uintptr_t) input + c); uint8x8_t vsubdata = vget_low_u8(vdata); if (c & (8 * sizeof(uint8_t))) { vst1_u8(output, vsubdata); output = (uint8_t*) output + 8; vsubdata = vget_high_u8(vdata); } if (c & (4 * sizeof(uint8_t))) { vst1_lane_u32(output, vreinterpret_u32_u8(vsubdata), 0); output = (uint8_t*) output + 4; vsubdata = vext_u8(vsubdata, vsubdata, 4); } if (c & (2 * sizeof(uint8_t))) { vst1_lane_u16(output, vreinterpret_u16_u8(vsubdata), 0); output = (uint8_t*) output + 2; vsubdata = vext_u8(vsubdata, vsubdata, 2); } if (c & (1 * sizeof(uint8_t))) { vst1_lane_u8(output, vsubdata, 0); output = (uint8_t*) output + 1; } } // Post-pad input channels. size_t r = post_padding; if XNN_LIKELY(r != 0) { for (; r >= 16 * sizeof(uint8_t); r -= 16 * sizeof(uint8_t)) { vst1q_u8(output, vfill_pattern); output = (uint8_t*) output + 16; } if (r & (8 * sizeof(uint8_t))) { vst1_u8(output, vget_low_u8(vfill_pattern)); output = (uint8_t*) output + 8; } if (r & (4 * sizeof(uint8_t))) { vst1q_lane_u32(output, vreinterpretq_u32_u8(vfill_pattern), 0); output = (uint8_t*) output + 4; } uint8x8_t vfill_subpattern = vget_low_u8(vfill_pattern); if (r & (2 * sizeof(uint8_t))) { vst1_lane_u16(output, vreinterpret_u16_u8(vfill_subpattern), 0); output = (uint8_t*) output + 2; vfill_subpattern = vext_u8(vfill_subpattern, vfill_subpattern, 2); } if (r & (1 * sizeof(uint8_t))) { vst1_lane_u8(output, vfill_subpattern, 0); output = (uint8_t*) output + 1; } } input = (const uint32_t*) ((uintptr_t) input + input_increment); output = (uint32_t*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
3,905
36.557692
104
c
XNNPACK
XNNPACK-master/src/xx-pad/xx-pad-scalar.c
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/pad.h> #include <xnnpack/unaligned.h> void xnn_xx_pad_ukernel__scalar( size_t rows, size_t channels, size_t pre_padding, size_t post_padding, const void* input, size_t input_stride, void* output, size_t output_stride, const uint32_t fill_pattern) XNN_OOB_READS { const size_t input_increment = input_stride - channels; const size_t output_increment = output_stride - (pre_padding + channels + post_padding); do { // Pre-pad input channels. size_t l = pre_padding; if XNN_LIKELY(l != 0) { uint32_t vfill_pattern = fill_pattern; for (; l >= 4 * sizeof(uint8_t); l -= 4 * sizeof(uint8_t)) { unaligned_store_u32(output, vfill_pattern); output = (uint8_t*) output + 4; } if XNN_LIKELY(l & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_pattern); vfill_pattern >>= 16; output = (uint8_t*) output + 2; } if XNN_LIKELY(l & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_pattern; output = (uint8_t*) output + 1; } } // Copy input channels. size_t c = channels; for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { const uint32_t vdata0 = unaligned_indexed_load_u32(input, 0); const uint32_t vdata1 = unaligned_indexed_load_u32(input, 1); const uint32_t vdata2 = unaligned_indexed_load_u32(input, 2); const uint32_t vdata3 = unaligned_indexed_load_u32(input, 3); input = (const uint8_t*) input + 16; unaligned_indexed_store_u32(output, 0, vdata0); unaligned_indexed_store_u32(output, 1, vdata1); unaligned_indexed_store_u32(output, 2, vdata2); unaligned_indexed_store_u32(output, 3, vdata3); output = (uint8_t*) output + 16; } if XNN_UNLIKELY(c != 0) { for (; c >= 4 * sizeof(uint8_t); c -= 4 * sizeof(uint8_t)) { unaligned_store_u32(output, unaligned_load_u32(input)); input = (const uint8_t*) input + 4; output = (uint8_t*) output + 4; } if XNN_UNLIKELY(c != 0) { uint32_t vdata = unaligned_load_u32(input); input = (const void*) ((uintptr_t) input + c); if XNN_LIKELY(c & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, vdata); vdata >>= 16; output = (uint8_t*) output + 2; } if XNN_LIKELY(c & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vdata; output = (uint8_t*) output + 1; } } } // Post-pad input channels. size_t r = post_padding; if XNN_LIKELY(r != 0) { uint32_t vfill_pattern = fill_pattern; for (; r >= 4 * sizeof(uint8_t); r -= 4 * sizeof(uint8_t)) { unaligned_store_u32(output, vfill_pattern); output = (uint8_t*) output + 4; } if XNN_LIKELY(r & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, vfill_pattern); vfill_pattern >>= 16; output = (uint8_t*) output + 2; } if XNN_LIKELY(r & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_pattern; output = (uint8_t*) output + 1; } } input = (const uint32_t*) ((uintptr_t) input + input_increment); output = (uint32_t*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
3,544
32.443396
90
c
XNNPACK
XNNPACK-master/src/xx-pad/xx-pad-sse2.c
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/pad.h> #include <xnnpack/unaligned.h> void xnn_xx_pad_ukernel__sse2( size_t rows, size_t channels, size_t pre_padding, size_t post_padding, const void* input, size_t input_stride, void* output, size_t output_stride, const uint32_t fill_pattern) XNN_OOB_READS { const size_t input_increment = input_stride - channels; const size_t output_increment = output_stride - (pre_padding + channels + post_padding); const __m128i vfill_pattern = _mm_shuffle_epi32(_mm_cvtsi32_si128((int) fill_pattern), _MM_SHUFFLE(0, 0, 0, 0)); do { // Pre-pad input channels. size_t l = pre_padding; if XNN_LIKELY(l != 0) { for (; l >= 16 * sizeof(uint8_t); l -= 16 * sizeof(uint8_t)) { _mm_storeu_si128((__m128i*) output, vfill_pattern); output = (uint8_t*) output + 16; } if (l & (8 * sizeof(uint8_t))) { _mm_storel_epi64((__m128i*) output, vfill_pattern); output = (uint8_t*) output + 8; } uint32_t vfill_subpattern = fill_pattern; if (l & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, vfill_subpattern); output = (uint8_t*) output + 4; } if (l & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, vfill_subpattern); vfill_subpattern >>= 16; output = (uint8_t*) output + 2; } if (l & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = (uint8_t*) output + 1; } } // Copy input channels. size_t c = channels; for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { const __m128i vdata = _mm_loadu_si128((const __m128i*) input); input = (const uint8_t*) input + 16; _mm_storeu_si128((__m128i*) output, vdata); output = (uint8_t*) output + 16; } if XNN_UNLIKELY(c != 0) { __m128i vdata = _mm_loadu_si128((const __m128i*) input); input = (const void*) ((uintptr_t) input + c); if (c & (8 * sizeof(uint8_t))) { _mm_storel_epi64((__m128i*) output, vdata); vdata = _mm_unpackhi_epi64(vdata, vdata); output = (uint8_t*) output + 8; } if (c & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vdata)); vdata = _mm_srli_epi64(vdata, 32); output = (uint8_t*) output + 4; } uint32_t vsubdata = (uint32_t) _mm_cvtsi128_si32(vdata); if (c & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vsubdata); vsubdata >>= 16; output = (uint8_t*) output + 2; } if (c & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vsubdata; output = (uint8_t*) output + 1; } } // Post-pad input channels. size_t r = post_padding; if XNN_LIKELY(r != 0) { for (; r >= 16 * sizeof(uint8_t); r -= 16 * sizeof(uint8_t)) { _mm_storeu_si128((__m128i*) output, vfill_pattern); output = (uint8_t*) output + 16; } if (r & (8 * sizeof(uint8_t))) { _mm_storel_epi64((__m128i*) output, vfill_pattern); output = (uint8_t*) output + 8; } uint32_t vfill_subpattern = fill_pattern; if (r & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, vfill_subpattern); output = (uint8_t*) output + 4; } if (r & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_subpattern); vfill_subpattern >>= 16; output = (uint8_t*) output + 2; } if (r & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = (uint8_t*) output + 1; } } input = (const void*) ((uintptr_t) input + input_increment); output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
4,071
32.377049
114
c
XNNPACK
XNNPACK-master/src/xx-pad/xx-pad-wasmsimd.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/pad.h> #include <xnnpack/unaligned.h> void xnn_xx_pad_ukernel__wasmsimd( size_t rows, size_t channels, size_t pre_padding, size_t post_padding, const void* input, size_t input_stride, void* output, size_t output_stride, const uint32_t fill_pattern) XNN_OOB_READS { const size_t input_increment = input_stride - channels; const size_t output_increment = output_stride - (pre_padding + channels + post_padding); const v128_t vfill_pattern = wasm_i32x4_splat((int32_t) fill_pattern); do { // Pre-pad input channels. size_t l = pre_padding; if XNN_LIKELY(l != 0) { for (; l >= 16 * sizeof(uint8_t); l -= 16 * sizeof(uint8_t)) { wasm_v128_store(output, vfill_pattern); output = (uint8_t*) output + 16; } if (l & (8 * sizeof(uint8_t))) { wasm_v128_store64_lane(output, vfill_pattern, 0); output = (uint8_t*) output + 8; } if (l & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, fill_pattern); output = (uint8_t*) output + 4; } uint32_t vfill_subpattern = fill_pattern; if (l & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_subpattern); vfill_subpattern >>= 16; output = (uint8_t*) output + 2; } if (l & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = (uint8_t*) output + 1; } } // Copy input channels. size_t c = channels; for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) { const v128_t vdata = wasm_v128_load(input); input = (const uint8_t*) input + 16; wasm_v128_store(output, vdata); output = (uint8_t*) output + 16; } if XNN_UNLIKELY(c != 0) { v128_t vdata = wasm_v128_load(input); input = (const void*) ((uintptr_t) input + c); if (c & (8 * sizeof(uint8_t))) { wasm_v128_store64_lane(output, vdata, 0); vdata = wasm_v64x2_shuffle(vdata, vdata, 1, 1); output = (uint8_t*) output + 8; } if (c & (4 * sizeof(uint8_t))) { wasm_v128_store32_lane(output, vdata, 0); vdata = wasm_u64x2_shr(vdata, 32); output = (uint8_t*) output + 4; } if (c & (2 * sizeof(uint8_t))) { wasm_v128_store16_lane(output, vdata, 0); vdata = wasm_u32x4_shr(vdata, 16); output = (uint8_t*) output + 2; } if (c & (1 * sizeof(uint8_t))) { wasm_v128_store8_lane(output, vdata, 0); output = (uint8_t*) output + 1; } } // Post-pad input channels. size_t r = post_padding; if XNN_LIKELY(r != 0) { for (; r >= 16 * sizeof(uint8_t); r -= 16 * sizeof(uint8_t)) { wasm_v128_store(output, vfill_pattern); output = (uint8_t*) output + 16; } if (r & (8 * sizeof(uint8_t))) { wasm_v128_store64_lane(output, vfill_pattern, 0); output = (uint8_t*) output + 8; } if (r & (4 * sizeof(uint8_t))) { unaligned_store_u32(output, fill_pattern); output = (uint8_t*) output + 4; } uint32_t vfill_subpattern = fill_pattern; if (r & (2 * sizeof(uint8_t))) { unaligned_store_u16(output, (uint16_t) vfill_subpattern); vfill_subpattern >>= 16; output = (uint8_t*) output + 2; } if (r & (1 * sizeof(uint8_t))) { *((uint8_t*) output) = (uint8_t) vfill_subpattern; output = (uint8_t*) output + 1; } } input = (const void*) ((uintptr_t) input + input_increment); output = (void*) ((uintptr_t) output + output_increment); } while (--rows != 0); }
3,886
31.123967
90
c
XNNPACK
XNNPACK-master/src/xx-transposev/xx-transposev-1x1-scalar-memcpy.c
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <string.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/transpose.h> void xnn_xx_transposev_ukernel__1x1_scalar_memcpy( const void* input, void* output, size_t input_row_stride, size_t output_row_stride, size_t input_element_stride, size_t output_element_stride, size_t element_size, size_t block_width, size_t block_height) { const size_t input_reset = input_element_stride - block_height * input_row_stride; const size_t output_reset = output_row_stride - block_height * output_element_stride; const void* i = (const void*) input; void* o = (void*) output; do { size_t bh = block_height; for (; bh >= 1; bh -= 1) { memcpy(o, i, element_size); i = (const void*) ((uintptr_t) i + input_row_stride); o = (void*) ((uintptr_t) o + output_element_stride); } i = (const void*) ((uintptr_t) i + input_reset); o = (void*) ((uintptr_t) o + output_reset); block_width -= 1; } while (block_width != 0); }
1,211
27.186047
87
c
XNNPACK
XNNPACK-master/test/abs-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <memory> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class AbsOperatorTester { public: inline AbsOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline AbsOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline AbsOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline AbsOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline AbsOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint16_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c] & UINT16_C(0x7FFF); } } // Create, setup, run, and destroy Abs operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t abs_op = nullptr; const xnn_status status = xnn_create_abs_nc_f16( channels(), input_stride(), output_stride(), 0, &abs_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, abs_op); // Smart pointer to automatically delete abs_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_abs_op(abs_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_abs_nc_f16(abs_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_abs_nc_f16(abs_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(abs_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::fabs(input[i * input_stride() + c]); } } // Create, setup, run, and destroy Abs operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t abs_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_abs_nc_f32( channels(), input_stride(), output_stride(), 0, &abs_op)); ASSERT_NE(nullptr, abs_op); // Smart pointer to automatically delete abs_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_abs_op(abs_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_abs_nc_f32(abs_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_abs_nc_f32(abs_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(abs_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::fabs(input[i * input_stride() + c]); } } ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_abs_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
7,785
32.560345
109
h
XNNPACK
XNNPACK-master/test/assembler-helpers.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> // clang-format off #define EXPECT_INSTR(expected, actual) \ EXPECT_EQ(expected, actual) << "expected = 0x" << std::hex << std::setw(8) << std::setfill('0') << expected \ << std::endl << " actual = 0x" << actual; // clang-format on #define CHECK_ENCODING(expected, call) \ a.reset(); \ call; \ EXPECT_EQ(Error::kNoError, a.error()); \ EXPECT_INSTR(expected, *reinterpret_cast<const uint32_t*>(a.start())) #define EXPECT_ERROR(expected, call) \ a.reset(); \ call; \ EXPECT_EQ(expected, a.error()); namespace xnnpack { // Arguments are: input (r0|x0), output (r1|x1), params (r2|x2). typedef void (*JitF32HardswishFn)(float*, float*, void*); // Reference implementation of hardswish constexpr float hardswish(float x) { return x * std::min(std::max(0.0f, (x + 3.0f)), 6.0f) / 6.0f; } } // namespace xnnpack
1,249
31.894737
111
h
XNNPACK
XNNPACK-master/test/batch-matrix-multiply-operator-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <limits> #include <memory> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/common.h> class BatchMatMulOperatorTester { public: inline BatchMatMulOperatorTester& m(size_t m) { assert(m >= 1); this->m_ = m; return *this; } inline size_t m() const { return this->m_; } inline BatchMatMulOperatorTester& k(size_t k) { assert(k >= 1); this->k_ = k; return *this; } inline size_t k() const { return this->k_; } inline BatchMatMulOperatorTester& n(size_t n) { assert(n >= 1); this->n_ = n; return *this; } inline size_t n() const { return this->n_; } inline BatchMatMulOperatorTester& batch_size(size_t batch_size) { assert(batch_size >= 1); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline BatchMatMulOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.1f, 1.0f); std::vector<float> lhs(XNN_EXTRA_BYTES / sizeof(float) + batch_size() * m() * k()); std::vector<float> rhs(XNN_EXTRA_BYTES / sizeof(float) + batch_size() * k() * n()); std::vector<float> output(batch_size() * m() * n()); std::vector<float> output_ref(batch_size() * m() * n()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(lhs.begin(), lhs.end(), [&]() { return f32dist(rng); }); std::generate(rhs.begin(), rhs.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), nanf("")); // lhs is B*M*K, rhs is B*N*K // Compute reference results. std::fill(output_ref.begin(), output_ref.end(), 0.0f); for (size_t b = 0; b < batch_size(); b++) { for (size_t mi = 0; mi < m(); mi++) { for (size_t ni = 0; ni < n(); ni++) { for (size_t ki = 0; ki < k(); ki++) { output_ref[b * m() * n() + mi * n() + ni] += lhs[b * m() * k() + mi * k() + ki] * rhs[b * k() * n() + ni * k() + ki]; } } } } // Create, setup, run, and destroy Fully Connected operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t batch_matrix_multiply_op = nullptr; const xnn_status status = xnn_create_batch_matrix_multiply_nc_f32(/*flags=*/0, &batch_matrix_multiply_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, batch_matrix_multiply_op); // Smart pointer to automatically delete batch_matrix_multiply_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_batch_matrix_multiply_op( batch_matrix_multiply_op, xnn_delete_operator); size_t workspace_size = 0; size_t workspace_alignment = 0; ASSERT_EQ( xnn_status_success, xnn_reshape_batch_matrix_multiply_nc_f32( batch_matrix_multiply_op, batch_size(), m(), k(), n(), &workspace_size, &workspace_alignment, /*threadpool=*/nullptr)); ASSERT_NE(workspace_size, 0); ASSERT_LE(workspace_alignment, XNN_ALLOCATION_ALIGNMENT); std::vector<char, AlignedAllocator<char, XNN_ALLOCATION_ALIGNMENT>> workspace(workspace_size); ASSERT_EQ(xnn_status_success, xnn_setup_batch_matrix_multiply_nc_f32( batch_matrix_multiply_op, workspace.data(), lhs.data(), rhs.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(batch_matrix_multiply_op, nullptr /* thread pool */)); VerifyF32(output, output_ref); } } void VerifyF32(const std::vector<float>& output, const std::vector<float>& output_ref) const { // Verify results. for (size_t bi = 0; bi < batch_size(); bi++) { for (size_t mi = 0; mi < m(); mi++) { for (size_t ni = 0; ni < n(); ni++) { EXPECT_NEAR( output_ref[bi * m() * n() + mi * n() + ni], output[bi * m() * n() + mi * n() + ni], 1.0e-4f * std::abs(output_ref[bi * m() * n() + mi * n() + ni])) << "batch = " << bi << " / " << batch_size() << ", m = " << mi << " / " << m() << ", n = " << ni << " / " << n(); } } } } private: // TODO(zhin): support flags for transpose lhs and rhs. size_t m_{1}; size_t k_{1}; size_t n_{1}; size_t batch_size_{1}; size_t iterations_{1}; };
5,122
29.313609
117
h
XNNPACK
XNNPACK-master/test/bfly4-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/math.h> #include <xnnpack/params.h> // twiddle table for bfly4 for fft size 192 (complex numbers) // Even numbers are numpy.floor(0.5 + 32767 * numpy.cos(-2*pi*numpy.linspace(0, 255, num=256) / 256)).astype(numpy.int16).tolist() // Odd numbers are numpy.floor(0.5 + 32767 * numpy.sin(-2*pi*numpy.linspace(0, 255, num=256) / 256)).astype(numpy.int16).tolist() static const int16_t xnn_reference_table_fft256_twiddle[384] = { 32767, 0, 32757, -804, 32728, -1608, 32678, -2410, 32609, -3212, 32521, -4011, 32412, -4808, 32285, -5602, 32137, -6393, 31971, -7179, 31785, -7962, 31580, -8739, 31356, -9512, 31113,-10278, 30852,-11039, 30571,-11793, 30273,-12539, 29956,-13279, 29621,-14010, 29268,-14732, 28898,-15446, 28510,-16151, 28105,-16846, 27683,-17530, 27245,-18204, 26790,-18868, 26319,-19519, 25832,-20159, 25329,-20787, 24811,-21403, 24279,-22005, 23731,-22594, 23170,-23170, 22594,-23731, 22005,-24279, 21403,-24811, 20787,-25329, 20159,-25832, 19519,-26319, 18868,-26790, 18204,-27245, 17530,-27683, 16846,-28105, 16151,-28510, 15446,-28898, 14732,-29268, 14010,-29621, 13279,-29956, 12539,-30273, 11793,-30571, 11039,-30852, 10278,-31113, 9512,-31356, 8739,-31580, 7962,-31785, 7179,-31971, 6393,-32137, 5602,-32285, 4808,-32412, 4011,-32521, 3212,-32609, 2410,-32678, 1608,-32728, 804,-32757, 0,-32767, -804,-32757, -1608,-32728, -2410,-32678, -3212,-32609, -4011,-32521, -4808,-32412, -5602,-32285, -6393,-32137, -7179,-31971, -7962,-31785, -8739,-31580, -9512,-31356, -10278,-31113, -11039,-30852, -11793,-30571, -12539,-30273, -13279,-29956, -14010,-29621, -14732,-29268, -15446,-28898, -16151,-28510, -16846,-28105, -17530,-27683, -18204,-27245, -18868,-26790, -19519,-26319, -20159,-25832, -20787,-25329, -21403,-24811, -22005,-24279, -22594,-23731, -23170,-23170, -23731,-22594, -24279,-22005, -24811,-21403, -25329,-20787, -25832,-20159, -26319,-19519, -26790,-18868, -27245,-18204, -27683,-17530, -28105,-16846, -28510,-16151, -28898,-15446, -29268,-14732, -29621,-14010, -29956,-13279, -30273,-12539, -30571,-11793, -30852,-11039, -31113,-10278, -31356, -9512, -31580, -8739, -31785, -7962, -31971, -7179, -32137, -6393, -32285, -5602, -32412, -4808, -32521, -4011, -32609, -3212, -32678, -2410, -32728, -1608, -32757, -804, -32767, 0, -32757, 804, -32728, 1608, -32678, 2410, -32609, 3212, -32521, 4011, -32412, 4808, -32285, 5602, -32137, 6393, -31971, 7179, -31785, 7962, -31580, 8739, -31356, 9512, -31113, 10278, -30852, 11039, -30571, 11793, -30273, 12539, -29956, 13279, -29621, 14010, -29268, 14732, -28898, 15446, -28510, 16151, -28105, 16846, -27683, 17530, -27245, 18204, -26790, 18868, -26319, 19519, -25832, 20159, -25329, 20787, -24811, 21403, -24279, 22005, -23731, 22594, -23170, 23170, -22594, 23731, -22005, 24279, -21403, 24811, -20787, 25329, -20159, 25832, -19519, 26319, -18868, 26790, -18204, 27245, -17530, 27683, -16846, 28105, -16151, 28510, -15446, 28898, -14732, 29268, -14010, 29621, -13279, 29956, -12539, 30273, -11793, 30571, -11039, 30852, -10278, 31113, -9512, 31356, -8739, 31580, -7962, 31785, -7179, 31971, -6393, 32137, -5602, 32285, -4808, 32412, -4011, 32521, -3212, 32609, -2410, 32678, -1608, 32728, -804, 32757 }; static void xnn_cs16_bfly4_reference( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); int16_t* data0 = data; int16_t* data1 = data + samples * 2; int16_t* data2 = data + samples * 4; int16_t* data3 = data + samples * 6; for (size_t n = 0; n < batch; ++n) { const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; for (size_t m = 0; m < samples; ++m) { int32_t vout0_r = (int32_t) data0[0]; int32_t vout0_i = (int32_t) data0[1]; int32_t vout1_r = (int32_t) data1[0]; int32_t vout1_i = (int32_t) data1[1]; int32_t vout2_r = (int32_t) data2[0]; int32_t vout2_i = (int32_t) data2[1]; int32_t vout3_r = (int32_t) data3[0]; int32_t vout3_i = (int32_t) data3[1]; const int32_t tw1_r = (const int32_t) tw1[0]; const int32_t tw1_i = (const int32_t) tw1[1]; const int32_t tw2_r = (const int32_t) tw2[0]; const int32_t tw2_i = (const int32_t) tw2[1]; const int32_t tw3_r = (const int32_t) tw3[0]; const int32_t tw3_i = (const int32_t) tw3[1]; // Note 32767 / 4 = 8191. Should be 8192. vout0_r = (vout0_r * 8191 + 16384) >> 15; vout0_i = (vout0_i * 8191 + 16384) >> 15; vout1_r = (vout1_r * 8191 + 16384) >> 15; vout1_i = (vout1_i * 8191 + 16384) >> 15; vout2_r = (vout2_r * 8191 + 16384) >> 15; vout2_i = (vout2_i * 8191 + 16384) >> 15; vout3_r = (vout3_r * 8191 + 16384) >> 15; vout3_i = (vout3_i * 8191 + 16384) >> 15; const int32_t vtmp0_r = math_asr_s32(vout1_r * tw1_r - vout1_i * tw1_i + 16384, 15); const int32_t vtmp0_i = math_asr_s32(vout1_r * tw1_i + vout1_i * tw1_r + 16384, 15); const int32_t vtmp1_r = math_asr_s32(vout2_r * tw2_r - vout2_i * tw2_i + 16384, 15); const int32_t vtmp1_i = math_asr_s32(vout2_r * tw2_i + vout2_i * tw2_r + 16384, 15); const int32_t vtmp2_r = math_asr_s32(vout3_r * tw3_r - vout3_i * tw3_i + 16384, 15); const int32_t vtmp2_i = math_asr_s32(vout3_r * tw3_i + vout3_i * tw3_r + 16384, 15); const int32_t vtmp5_r = vout0_r - vtmp1_r; const int32_t vtmp5_i = vout0_i - vtmp1_i; vout0_r += vtmp1_r; vout0_i += vtmp1_i; const int32_t vtmp3_r = vtmp0_r + vtmp2_r; const int32_t vtmp3_i = vtmp0_i + vtmp2_i; const int32_t vtmp4_r = vtmp0_r - vtmp2_r; const int32_t vtmp4_i = vtmp0_i - vtmp2_i; vout2_r = vout0_r - vtmp3_r; vout2_i = vout0_i - vtmp3_i; tw1 += stride * 2; tw2 += stride * 4; tw3 += stride * 6; vout0_r += vtmp3_r; vout0_i += vtmp3_i; vout1_r = vtmp5_r + vtmp4_i; vout1_i = vtmp5_i - vtmp4_r; vout3_r = vtmp5_r - vtmp4_i; vout3_i = vtmp5_i + vtmp4_r; data0[0] = (int16_t) vout0_r; data0[1] = (int16_t) vout0_i; data1[0] = (int16_t) vout1_r; data1[1] = (int16_t) vout1_i; data2[0] = (int16_t) vout2_r; data2[1] = (int16_t) vout2_i; data3[0] = (int16_t) vout3_r; data3[1] = (int16_t) vout3_i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; } data0 += samples * 6; data1 += samples * 6; data2 += samples * 6; data3 += samples * 6; } while(--batch != 0); } class BFly4MicrokernelTester { public: inline BFly4MicrokernelTester& batch(size_t batch) { assert(batch != 0); this->batch_ = batch; return *this; } inline size_t batch() const { return this->batch_; } inline BFly4MicrokernelTester& samples(size_t samples) { assert(samples != 0); this->samples_ = samples; return *this; } inline size_t samples() const { return this->samples_; } inline BFly4MicrokernelTester& stride(uint32_t stride) { this->stride_ = stride; return *this; } inline uint32_t stride() const { return this->stride_; } inline BFly4MicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_cs16_bfly4_ukernel_fn bfly4) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i16rng = std::bind(std::uniform_int_distribution<int16_t>(), std::ref(rng)); const size_t fft_size = samples() * stride() * 4; // 4 for bfly4. // 256 complex numbers = fft_size * 2 = 512 std::vector<int16_t> y(fft_size * 2); std::vector<int16_t> y_ref(fft_size * 2); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(y.begin(), y.end(), std::ref(i16rng)); y_ref = y; // Compute reference results. xnn_cs16_bfly4_reference(batch(), samples(), y_ref.data(), xnn_reference_table_fft256_twiddle, stride()); // Call optimized micro-kernel. bfly4(batch(), samples() * sizeof(int16_t) * 2, y.data(), xnn_reference_table_fft256_twiddle, stride() * sizeof(int16_t) * 2); // Verify results. for (size_t n = 0; n < fft_size * 2; n++) { EXPECT_EQ(y[n], y_ref[n]) << "at sample " << n << " / " << fft_size << "\nsamples " << samples() << "\nstride " << stride(); } } } private: size_t batch_{1}; size_t samples_{1}; uint32_t stride_{1}; size_t iterations_{15}; };
9,320
35.69685
132
h
XNNPACK
XNNPACK-master/test/channel-shuffle-operator-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <limits> #include <random> #include <vector> #include <xnnpack.h> class ChannelShuffleOperatorTester { public: inline ChannelShuffleOperatorTester& groups(size_t groups) { assert(groups != 0); this->groups_ = groups; return *this; } inline size_t groups() const { return this->groups_; } inline ChannelShuffleOperatorTester& group_channels(size_t group_channels) { assert(group_channels != 0); this->group_channels_ = group_channels; return *this; } inline size_t group_channels() const { return this->group_channels_; } inline size_t channels() const { return groups() * group_channels(); } inline ChannelShuffleOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return channels(); } else { assert(this->input_stride_ >= channels()); return this->input_stride_; } } inline ChannelShuffleOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return channels(); } else { assert(this->output_stride_ >= channels()); return this->output_stride_; } } inline ChannelShuffleOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline ChannelShuffleOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestX8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Create, setup, run, and destroy Channel Shuffle operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t channel_shuffle_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_channel_shuffle_nc_x8( groups(), group_channels(), input_stride(), output_stride(), 0, &channel_shuffle_op)); ASSERT_NE(nullptr, channel_shuffle_op); // Smart pointer to automatically delete channel_shuffle_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_channel_shuffle_op(channel_shuffle_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_channel_shuffle_nc_x8( channel_shuffle_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_channel_shuffle_nc_x8( channel_shuffle_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(channel_shuffle_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t g = 0; g < groups(); g++) { for (size_t c = 0; c < group_channels(); c++) { ASSERT_EQ(int32_t(input[i * input_stride() + g * group_channels() + c]), int32_t(output[i * output_stride() + c * groups() + g])) << "batch index " << i << ", group " << g << ", channel " << c; } } } } } void TestX32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<uint32_t> u32dist; std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint32_t> output((batch_size() - 1) * output_stride() + channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u32dist(rng); }); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEAF)); // Create, setup, run, and destroy Channel Shuffle operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t channel_shuffle_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_channel_shuffle_nc_x32( groups(), group_channels(), input_stride(), output_stride(), 0, &channel_shuffle_op)); ASSERT_NE(nullptr, channel_shuffle_op); // Smart pointer to automatically delete channel_shuffle_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_channel_shuffle_op(channel_shuffle_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_channel_shuffle_nc_x32( channel_shuffle_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_channel_shuffle_nc_x32( channel_shuffle_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(channel_shuffle_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t g = 0; g < groups(); g++) { for (size_t c = 0; c < group_channels(); c++) { ASSERT_EQ(input[i * input_stride() + g * group_channels() + c], output[i * output_stride() + c * groups() + g]) << "batch index " << i << ", group " << g << ", channel " << c; } } } } } private: size_t groups_{1}; size_t group_channels_{1}; size_t batch_size_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
6,689
30.556604
133
h
XNNPACK
XNNPACK-master/test/convolution-test-helpers.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <cstdint> #include <cstddef> #include <vector> namespace xnnpack { void compute_convolution_qs8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t groups, size_t group_input_channels, size_t group_output_channels, size_t input_channel_stride, int8_t input_zero_point, const std::vector<int8_t>& input, const std::vector<int8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_convolution_qs8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t groups, size_t group_input_channels, size_t group_output_channels, int8_t input_zero_point, const std::vector<int8_t>& input, const std::vector<int8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_convolution_qu8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t groups, size_t group_input_channels, size_t group_output_channels, uint8_t input_zero_point, uint8_t kernel_zero_point, const std::vector<uint8_t>& input, const std::vector<uint8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_convolution_qu8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t groups, size_t group_input_channels, size_t group_output_channels, size_t input_channel_stride, uint8_t input_zero_point, uint8_t kernel_zero_point, const std::vector<uint8_t>& input, const std::vector<uint8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_depthwise_convolution_qs8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t input_channels, size_t depth_multiplier, size_t input_channel_stride, int8_t input_zero_point, const std::vector<int8_t>& input, const std::vector<int8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_depthwise_convolution_qs8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t input_channels, size_t depth_multiplier, int8_t input_zero_point, const std::vector<int8_t>& input, const std::vector<int8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_depthwise_convolution_qu8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t input_channels, size_t depth_multiplier, size_t input_channel_stride, uint8_t input_zero_point, uint8_t kernel_zero_point, const std::vector<uint8_t>& input, const std::vector<uint8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); void compute_depthwise_convolution_qu8_reference_results( size_t batch_size, size_t output_height, size_t output_width, size_t input_height, size_t input_width, size_t input_padding_top, size_t input_padding_right, size_t input_padding_bottom, size_t input_padding_left, size_t kernel_height, size_t kernel_width, size_t subsampling_height, size_t subsampling_width, size_t dilation_height, size_t dilation_width, size_t input_channels, size_t depth_multiplier, uint8_t input_zero_point, uint8_t kernel_zero_point, const std::vector<uint8_t>& input, const std::vector<uint8_t>& filter, std::vector<int32_t>& accumulators, bool has_bias, const std::vector<int32_t>& bias); } // namespace xnnpack
6,507
27.924444
72
h
XNNPACK
XNNPACK-master/test/copy-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <limits> #include <random> #include <vector> #include <xnnpack.h> class CopyOperatorTester { public: inline CopyOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline CopyOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline CopyOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline CopyOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline CopyOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestX8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<uint32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint8_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); std::fill(output.begin(), output.end(), UINT8_C(0xFA)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c]; } } // Create, setup, run, and destroy Copy operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t copy_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_copy_nc_x8( channels(), input_stride(), output_stride(), 0, &copy_op)); ASSERT_NE(nullptr, copy_op); // Smart pointer to automatically delete copy_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_copy_op(copy_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_copy_nc_x8(copy_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_copy_nc_x8(copy_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(copy_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel = " << c << " / " << channels(); } } } } void TestX16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<uint16_t> u16dist; std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint16_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u16dist(rng); }); std::fill(output.begin(), output.end(), UINT16_C(0xDEAD)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c]; } } // Create, setup, run, and destroy Copy operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t copy_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_copy_nc_x16( channels(), input_stride(), output_stride(), 0, &copy_op)); ASSERT_NE(nullptr, copy_op); // Smart pointer to automatically delete copy_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_copy_op(copy_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_copy_nc_x16(copy_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_copy_nc_x16(copy_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(copy_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel = " << c << " / " << channels(); } } } } void TestX32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<uint32_t> u32dist; std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint32_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint32_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u32dist(rng); }); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c]; } } // Create, setup, run, and destroy Copy operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t copy_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_copy_nc_x32( channels(), input_stride(), output_stride(), 0, &copy_op)); ASSERT_NE(nullptr, copy_op); // Smart pointer to automatically delete copy_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_copy_op(copy_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_copy_nc_x32(copy_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_copy_nc_x32(copy_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(copy_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel = " << c << " / " << channels(); } } } } void TestRunX32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<uint32_t> u32dist; std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint32_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint32_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u32dist(rng); }); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c]; } } // Create, setup, run, and destroy Copy operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_copy_nc_x32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel = " << c << " / " << channels(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
9,776
34.423913
111
h
XNNPACK
XNNPACK-master/test/elu-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class ELUOperatorTester { public: inline ELUOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline ELUOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline ELUOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline ELUOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline ELUOperatorTester& alpha(float alpha) { assert(alpha > 0.0f); assert(alpha < 1.0f); this->alpha_ = alpha; return *this; } inline float alpha() const { return this->alpha_; } inline ELUOperatorTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline ELUOperatorTester& input_zero_point(uint8_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline uint8_t input_zero_point() const { return this->input_zero_point_; } inline ELUOperatorTester& output_scale(float output_scale) { assert(output_scale > 0.0f); assert(std::isnormal(output_scale)); this->output_scale_ = output_scale; return *this; } inline float output_scale() const { return this->output_scale_; } inline ELUOperatorTester& output_zero_point(uint8_t output_zero_point) { this->output_zero_point_ = output_zero_point; return *this; } inline uint8_t output_zero_point() const { return this->output_zero_point_; } inline ELUOperatorTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline ELUOperatorTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline ELUOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-25.0f, 25.0f); std::vector<uint16_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = fp16_ieee_to_fp32_value(input[i * input_stride() + c]); output_ref[i * channels() + c] = std::signbit(x) ? std::expm1(x) * alpha() : x; } } // Create, setup, run, and destroy ELU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t elu_op = nullptr; const xnn_status status = xnn_create_elu_nc_f16( channels(), input_stride(), output_stride(), alpha(), 0, &elu_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, elu_op); // Smart pointer to automatically delete elu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_elu_op(elu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_elu_nc_f16(elu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_elu_nc_f16(elu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(elu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(1.0e-4f, std::abs(output_ref[i * channels() + c]) * 5.0e-3f)); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-20.0f, 20.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = double(input[i * input_stride() + c]); output_ref[i * channels() + c] = std::signbit(x) ? std::expm1(x) * alpha() : x; } } // Create, setup, run, and destroy ELU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t elu_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_elu_nc_f32( channels(), input_stride(), output_stride(), alpha(), 0, &elu_op)); ASSERT_NE(nullptr, elu_op); // Smart pointer to automatically delete elu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_elu_op(elu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_elu_nc_f32(elu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_elu_nc_f32(elu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(elu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR(output[i * output_stride() + c], output_ref[i * channels() + c], std::abs(output_ref[i * channels() + c]) * 1.0e-5) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c] << ", alpha " << alpha(); } } } } void TestQS8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> i8dist( std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); std::vector<int8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); std::fill(output.begin(), output.end(), INT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point() - 0x80)); const float elu_x = std::signbit(x) ? alpha() * std::expm1(x) : x; const float scaled_elu_x = elu_x / output_scale(); float y = scaled_elu_x; y = std::min<float>(y, int32_t(qmax() - 0x80) - int32_t(output_zero_point() - 0x80)); y = std::max<float>(y, int32_t(qmin() - 0x80) - int32_t(output_zero_point() - 0x80)); output_ref[i * channels() + c] = y + int32_t(output_zero_point() - 0x80); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t elu_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_elu_nc_qs8( channels(), input_stride(), output_stride(), alpha(), int8_t(input_zero_point() - 0x80), input_scale(), int8_t(output_zero_point() - 0x80), output_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80), 0, &elu_op)); ASSERT_NE(nullptr, elu_op); // Smart pointer to automatically delete elu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_elu_op(elu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_elu_nc_qs8(elu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_elu_nc_qs8(elu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(elu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-20.0f, 20.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = double(input[i * input_stride() + c]); output_ref[i * channels() + c] = std::signbit(x) ? std::expm1(x) * alpha() : x; } } // Create, setup, run, and destroy ELU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_elu_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), alpha(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR(output[i * output_stride() + c], output_ref[i * channels() + c], std::abs(output_ref[i * channels() + c]) * 1.0e-5) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c] << ", alpha " << alpha(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; float alpha_{0.5f}; float input_scale_{0.75f}; uint8_t input_zero_point_{121}; float output_scale_{0.75f}; uint8_t output_zero_point_{121}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
12,948
33.530667
119
h
XNNPACK
XNNPACK-master/test/fftr-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> #include <xnnpack/math.h> static const int16_t xnn_reference_table_fftr_twiddle[256] = { -402,-32765, -804,-32757, -1206,-32745, -1608,-32728, -2009,-32705, -2410,-32678, -2811,-32646, -3212,-32609, -3612,-32567, -4011,-32521, -4410,-32469, -4808,-32412, -5205,-32351, -5602,-32285, -5998,-32213, -6393,-32137, -6786,-32057, -7179,-31971, -7571,-31880, -7962,-31785, -8351,-31685, -8739,-31580, -9126,-31470, -9512,-31356, -9896,-31237, -10278,-31113, -10659,-30985, -11039,-30852, -11417,-30714, -11793,-30571, -12167,-30424, -12539,-30273, -12910,-30117, -13279,-29956, -13645,-29791, -14010,-29621, -14372,-29447, -14732,-29268, -15090,-29085, -15446,-28898, -15800,-28706, -16151,-28510, -16499,-28310, -16846,-28105, -17189,-27896, -17530,-27683, -17869,-27466, -18204,-27245, -18537,-27019, -18868,-26790, -19195,-26556, -19519,-26319, -19841,-26077, -20159,-25832, -20475,-25582, -20787,-25329, -21096,-25072, -21403,-24811, -21705,-24547, -22005,-24279, -22301,-24007, -22594,-23731, -22884,-23452, -23170,-23170, -23452,-22884, -23731,-22594, -24007,-22301, -24279,-22005, -24547,-21705, -24811,-21403, -25072,-21096, -25329,-20787, -25582,-20475, -25832,-20159, -26077,-19841, -26319,-19519, -26556,-19195, -26790,-18868, -27019,-18537, -27245,-18204, -27466,-17869, -27683,-17530, -27896,-17189, -28105,-16846, -28310,-16499, -28510,-16151, -28706,-15800, -28898,-15446, -29085,-15090, -29268,-14732, -29447,-14372, -29621,-14010, -29791,-13645, -29956,-13279, -30117,-12910, -30273,-12539, -30424,-12167, -30571,-11793, -30714,-11417, -30852,-11039, -30985,-10659, -31113,-10278, -31237, -9896, -31356, -9512, -31470, -9126, -31580, -8739, -31685, -8351, -31785, -7962, -31880, -7571, -31971, -7179, -32057, -6786, -32137, -6393, -32213, -5998, -32285, -5602, -32351, -5205, -32412, -4808, -32469, -4410, -32521, -4011, -32567, -3612, -32609, -3212, -32646, -2811, -32678, -2410, -32705, -2009, -32728, -1608, -32745, -1206, -32757, -804, -32765, -402, -32767, 0, }; static void xnn_cs16_fftr_reference( size_t samples, const int16_t* input, int16_t* output, const int16_t* twiddle) { assert(samples >= 2); assert(samples % 2 == 0); assert(input != NULL); assert(output != NULL); assert(twiddle != NULL); const int16_t* il = input; const int16_t* ir = input + samples * 2; int32_t vdcr = (int32_t) il[0]; int32_t vdci = (int32_t) il[1]; il += 2; vdcr = math_asr_s32(vdcr * 16383 + 16384, 15); vdci = math_asr_s32(vdci * 16383 + 16384, 15); int16_t* outl = output; int16_t* outr = output + samples * 2; outl[0] = vdcr + vdci; outl[1] = 0; outl += 2; outr[0] = vdcr - vdci; outr[1] = 0; samples >>= 1; do { int32_t vilr = (int32_t) il[0]; int32_t vili = (int32_t) il[1]; il += 2; ir -= 2; int32_t virr = (int32_t) ir[0]; int32_t viri = (int32_t) ir[1]; const int32_t vtwr = twiddle[0]; const int32_t vtwi = twiddle[1]; twiddle += 2; vilr = math_asr_s32(vilr * 16383 + 16384, 15); vili = math_asr_s32(vili * 16383 + 16384, 15); virr = math_asr_s32(virr * 16383 + 16384, 15); viri = math_asr_s32(viri * 16383 + 16384, 15); const int16_t vacc1r = vilr + virr; const int16_t vacc1i = vili - viri; const int16_t vacc2r = vilr - virr; const int16_t vacc2i = vili + viri; const int32_t vaccr = math_asr_s32(vacc2r * vtwr - vacc2i * vtwi + 16384, 15); const int32_t vacci = math_asr_s32(vacc2r * vtwi + vacc2i * vtwr + 16384, 15); outl[0] = math_asr_s32(vacc1r + vaccr, 1); outl[1] = math_asr_s32(vacc1i + vacci, 1); outl += 2; outr -= 2; outr[0] = math_asr_s32(vacc1r - vaccr, 1); outr[1] = math_asr_s32(vacci - vacc1i, 1); } while (--samples != 0); } class FftrMicrokernelTester { public: inline FftrMicrokernelTester& samples(size_t samples) { assert(samples != 0); this->samples_ = samples; return *this; } inline size_t samples() const { return this->samples_; } inline FftrMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_cs16_fftr_ukernel_fn fftr) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i16rng = std::bind(std::uniform_int_distribution<int16_t>(), std::ref(rng)); const size_t sample_size = samples() * 2 + 2; std::vector<int16_t> y(sample_size); std::vector<int16_t> y_ref(sample_size); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(y.begin(), y.end(), std::ref(i16rng)); std::copy(y.begin(), y.end(), y_ref.begin()); // Compute reference results. xnn_cs16_fftr_reference(samples(), y_ref.data(), y_ref.data(), xnn_reference_table_fftr_twiddle); // Call optimized micro-kernel. fftr(samples(), y.data(), xnn_reference_table_fftr_twiddle); // Verify results. for (size_t n = 0; n < sample_size; n++) { EXPECT_EQ(y[n], y_ref[n]) << "at sample " << n << " / " << sample_size; } } } private: size_t samples_{256}; size_t iterations_{15}; };
5,723
31.896552
103
h
XNNPACK
XNNPACK-master/test/fill-microkernel-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <array> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class FillMicrokernelTester { public: inline FillMicrokernelTester& rows(size_t rows) { assert(rows != 0); this->rows_ = rows; return *this; } inline size_t rows() const { return this->rows_; } inline FillMicrokernelTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline FillMicrokernelTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return channels(); } else { return this->output_stride_; } } inline FillMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_fill_ukernel_fn fill) const { ASSERT_GE(output_stride(), channels()); std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); std::vector<uint8_t> output((rows() - 1) * output_stride() + channels()); std::vector<uint8_t> output_copy(output.size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(output.begin(), output.end(), std::ref(u8rng)); std::copy(output.cbegin(), output.cend(), output_copy.begin()); std::array<uint8_t, 4> fill_pattern; std::generate(fill_pattern.begin(), fill_pattern.end(), std::ref(u8rng)); uint32_t fill_value = 0; memcpy(&fill_value, fill_pattern.data(), sizeof(fill_value)); // Call optimized micro-kernel. fill( rows(), channels() * sizeof(uint8_t), output.data(), output_stride() * sizeof(uint8_t), fill_value); // Verify results. for (size_t i = 0; i < rows(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(uint32_t(output[i * output_stride() + c]), uint32_t(fill_pattern[c % fill_pattern.size()])) << "at row " << i << " / " << rows() << ", channel " << c << " / " << channels() << ", fill value 0x" << std::hex << std::setw(8) << std::setfill('0') << fill_value << ", output value 0x" << std::hex << std::setw(8) << std::setfill('0') << output[i * output_stride() + c]; } } for (size_t i = 0; i + 1 < rows(); i++) { for (size_t c = channels(); c < output_stride(); c++) { EXPECT_EQ(uint32_t(output[i * output_stride() + c]), uint32_t(output_copy[i * output_stride() + c])) << "at row " << i << " / " << rows() << ", channel " << c << " / " << channels() << ", original value 0x" << std::hex << std::setw(8) << std::setfill('0') << output_copy[i * output_stride() + c] << ", output value 0x" << std::hex << std::setw(8) << std::setfill('0') << output[i * output_stride() + c]; } } } } private: size_t rows_{1}; size_t channels_{1}; size_t output_stride_{0}; size_t iterations_{15}; };
3,708
29.401639
119
h
XNNPACK
XNNPACK-master/test/filterbank-accumulate-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <numeric> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> class FilterbankAccumulateMicrokernelTester { public: inline FilterbankAccumulateMicrokernelTester& rows(size_t rows) { assert(rows != 0); this->rows_ = rows; return *this; } inline size_t rows() const { return this->rows_; } inline FilterbankAccumulateMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_u32_filterbank_accumulate_ukernel_fn filterbank_accumulate) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist(1, 10); std::uniform_int_distribution<uint16_t> u16dist; std::uniform_int_distribution<uint32_t> u32dist; std::vector<uint8_t> filterbank_widths(rows() + 1); std::vector<uint64_t> output(rows()); std::vector<uint64_t> output_ref(rows()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(filterbank_widths.begin(), filterbank_widths.end(), [&] { return u8dist(rng); }); const size_t num_channels = std::accumulate(filterbank_widths.cbegin(), filterbank_widths.cend(), 0); std::vector<uint32_t> input(num_channels); std::vector<uint16_t> weights(num_channels * 2); std::generate(input.begin(), input.end(), [&] { return u32dist(rng); }); std::generate(weights.begin(), weights.end(), [&] { return u16dist(rng); }); std::fill(output.begin(), output.end(), UINT64_C(0xCAFEB0BADEADBEAF)); uint64_t weight_accumulator = 0; uint64_t unweight_accumulator = 0; size_t i = 0; for (size_t m = 0; m <= rows(); m++) { const size_t weight_width = filterbank_widths[m]; for (size_t n = 0; n < weight_width; n++) { weight_accumulator += uint64_t(input[i]) * uint64_t(weights[i * 2]); unweight_accumulator += uint64_t(input[i]) * uint64_t(weights[i * 2 + 1]); i += 1; } if (m != 0) { output_ref[m - 1] = weight_accumulator; } weight_accumulator = unweight_accumulator; unweight_accumulator = 0; } // Call optimized micro-kernel. filterbank_accumulate(rows(), input.data(), filterbank_widths.data(), weights.data(), output.data()); // Verify results. for (size_t m = 0; m < rows(); m++) { EXPECT_EQ(output[m], output_ref[m]) << "at row " << m << " / " << rows(); } } } private: size_t rows_{1}; size_t iterations_{15}; };
3,029
29.918367
107
h
XNNPACK
XNNPACK-master/test/filterbank-subtract-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <numeric> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> class FilterbankSubtractMicrokernelTester { public: inline FilterbankSubtractMicrokernelTester& batch(size_t batch) { assert(batch != 0); this->batch_ = batch; return *this; } inline size_t batch() const { return this->batch_; } inline FilterbankSubtractMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline FilterbankSubtractMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_u32_filterbank_subtract_ukernel_fn filterbank_subtract) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u32rng = std::bind(std::uniform_int_distribution<uint32_t>(), std::ref(rng)); const uint32_t smoothing = 655; const uint32_t alternate_smoothing = 655; const uint32_t one_minus_smoothing = 15729; const uint32_t alternate_one_minus_smoothing = 15729; const uint32_t min_signal_remaining = 819; const uint32_t smoothing_bits = 0; const uint32_t spectral_subtraction_bits = 14; std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> x(batch() + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> noise(batch() + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> noise_ref(batch() + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> y(batch() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint32_t) : 0)); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> y_ref(batch()); const uint32_t* x_data = inplace() ? y.data() : x.data(); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(u32rng)); std::iota(noise.begin(), noise.end(), 0); std::iota(noise_ref.begin(), noise_ref.end(), 0); std::generate(y.begin(), y.end(), std::ref(u32rng)); std::generate(y_ref.begin(), y_ref.end(), std::ref(u32rng)); for (size_t n = 0; n < batch(); n += 2) { const uint32_t vinput0 = x_data[n + 0]; const uint32_t vinput1 = x_data[n + 1]; uint32_t vnoise_estimate0 = noise_ref[n + 0]; uint32_t vnoise_estimate1 = noise_ref[n + 1]; // Scale up signa for smoothing filter computation. const uint32_t vsignal_scaled_up0 = vinput0 << smoothing_bits; const uint32_t vsignal_scaled_up1 = vinput1 << smoothing_bits; vnoise_estimate0 = (((uint64_t) (vsignal_scaled_up0) * smoothing) + ((uint64_t) (vnoise_estimate0) * one_minus_smoothing)) >> spectral_subtraction_bits; vnoise_estimate1 = (((uint64_t) (vsignal_scaled_up1) * alternate_smoothing) + ((uint64_t) (vnoise_estimate1) * alternate_one_minus_smoothing)) >> spectral_subtraction_bits; noise_ref[n + 0] = vnoise_estimate0; noise_ref[n + 1] = vnoise_estimate1; // Make sure that we can't get a negative value for the signal - estimate. const uint32_t estimate_scaled_up0 = std::min(vnoise_estimate0, vsignal_scaled_up0); const uint32_t estimate_scaled_up1 = std::min(vnoise_estimate1, vsignal_scaled_up1); const uint32_t vsubtracted0 = (vsignal_scaled_up0 - estimate_scaled_up0) >> smoothing_bits; const uint32_t vsubtracted1 = (vsignal_scaled_up1 - estimate_scaled_up1) >> smoothing_bits; const uint32_t vfloor0 = ((uint64_t) (vinput0) * min_signal_remaining) >> spectral_subtraction_bits; const uint32_t vfloor1 = ((uint64_t) (vinput1) * min_signal_remaining) >> spectral_subtraction_bits; const uint32_t vout0 = std::max(vsubtracted0, vfloor0); const uint32_t vout1 = std::max(vsubtracted1, vfloor1); y_ref[n + 0] = vout0; y_ref[n + 1] = vout1; } // Call optimized micro-kernel. filterbank_subtract(batch(), x_data, smoothing, alternate_smoothing, one_minus_smoothing, alternate_one_minus_smoothing, min_signal_remaining, smoothing_bits, spectral_subtraction_bits, noise.data(), y.data()); // Verify results. for (size_t n = 0; n < batch(); n++) { EXPECT_EQ(y[n], y_ref[n]) << "at n " << n << " / " << batch(); EXPECT_EQ(noise[n], noise_ref[n]) << "at n " << n << " / " << batch(); } } } private: size_t batch_{48}; bool inplace_{false}; size_t iterations_{15}; };
5,103
36.529412
124
h
XNNPACK
XNNPACK-master/test/gemm-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <cstddef> #include <cstdint> #include <vector> #include <xnnpack/microfnptr.h> #include <xnnpack/post-operation.h> #include <xnnpack/requantization.h> class GemmMicrokernelTester { public: inline GemmMicrokernelTester& mr(size_t mr) { this->mr_ = mr; return *this; } inline size_t mr() const { return this->mr_; } inline GemmMicrokernelTester& nr(size_t nr) { this->nr_ = nr; return *this; } inline size_t nr() const { return this->nr_; } inline GemmMicrokernelTester& kr(size_t kr) { this->kr_ = kr; return *this; } inline size_t kr() const { return this->kr_; } inline GemmMicrokernelTester& sr(size_t sr) { this->sr_ = sr; return *this; } inline size_t sr() const { return this->sr_; } inline GemmMicrokernelTester& m(size_t m) { this->m_ = m; return *this; } inline size_t m() const { return this->m_; } inline GemmMicrokernelTester& n(size_t n) { this->n_ = n; return *this; } inline size_t n() const { return this->n_; } inline GemmMicrokernelTester& k(size_t k) { this->k_ = k; return *this; } inline size_t k() const { return this->k_; } inline GemmMicrokernelTester& ks(size_t ks) { this->ks_ = ks; return *this; } inline size_t ks() const { return this->ks_; } inline size_t packed_k() const { return round_up_po2(k(), kr() * sr()); } inline size_t packed_n() const { return round_up(n(), nr()); } inline GemmMicrokernelTester& a_stride(size_t a_stride) { this->a_stride_ = a_stride; return *this; } inline size_t a_stride() const { return this->a_stride_ == 0 ? k() : this->a_stride_; } inline GemmMicrokernelTester& cm_stride(size_t cm_stride) { this->cm_stride_ = cm_stride; return *this; } inline size_t cm_stride() const { return this->cm_stride_ == 0 ? cn_stride() * ((n() - 1) / nr()) + (n() - 1) % nr() + 1 : this->cm_stride_; } inline GemmMicrokernelTester& cn_stride(size_t cn_stride) { this->cn_stride_ = cn_stride; return *this; } inline size_t cn_stride() const { return this->cn_stride_ == 0 ? nr() : this->cn_stride_; } inline GemmMicrokernelTester& a_zero_point(uint8_t a_zero_point) { this->a_zero_point_ = a_zero_point; return *this; } inline uint8_t a_zero_point() const { return this->a_zero_point_; } inline GemmMicrokernelTester& b_zero_point(uint8_t b_zero_point) { this->b_zero_point_ = b_zero_point; return *this; } inline uint8_t b_zero_point() const { return this->b_zero_point_; } inline GemmMicrokernelTester& qc4w_zero_point(int16_t qc4w_zero_point) { this->qc4w_zero_point_ = qc4w_zero_point; return *this; } inline int16_t qc4w_zero_point() const { return this->qc4w_zero_point_; } inline GemmMicrokernelTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline GemmMicrokernelTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline GemmMicrokernelTester& a_offset(size_t a_offset) { this->a_offset_ = a_offset; return *this; } inline size_t a_offset() const { return this->a_offset_; } inline GemmMicrokernelTester& zero_index(size_t zero_index) { this->zero_index_ = zero_index; return *this; } inline size_t zero_index() const { return this->zero_index_; } inline GemmMicrokernelTester& extended_weights(bool extended_weights) { this->extended_weights_ = extended_weights; return *this; } inline bool extended_weights() const { return this->extended_weights_; } inline GemmMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test( xnn_qu8_gemm_minmax_ukernel_fn gemm, xnn_init_qu8_conv_minmax_params_fn init_params, xnn_qu8_requantize_fn requantize) const; void Test( xnn_qu8_igemm_minmax_ukernel_fn igemm, xnn_init_qu8_conv_minmax_params_fn init_params, xnn_qu8_requantize_fn requantize); void Test( xnn_qs8_qc8w_gemm_minmax_ukernel_fn gemm, xnn_init_qs8_qc8w_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_qs8_qc8w_igemm_minmax_ukernel_fn igemm, xnn_init_qs8_qc8w_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_qs8_gemm_minmax_ukernel_fn gemm, xnn_init_qs8_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_qd8_f32_qs8w_gemm_ukernel_fn gemm, xnn_init_f32_minmax_params_fn init_params) const; void Test( xnn_qs8_igemm_minmax_ukernel_fn igemm, xnn_init_qs8_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test(xnn_bf16_gemm_minmax_ukernel_fn gemm_minmax, xnn_init_bf16_minmax_params_fn init_params) const; void Test(xnn_f16_gemm_minmax_ukernel_fn gemm_minmax, xnn_init_f16_minmax_params_fn init_params) const; void Test(xnn_f16_igemm_minmax_ukernel_fn igemm_minmax, xnn_init_f16_minmax_params_fn init_params) const; void Test(xnn_f32_ppmm_minmax_ukernel_fn ppmm_minmax, xnn_init_f32_minmax_params_fn init_params) const; void Test(xnn_f32_gemm_ukernel_fn gemm) const; void Test(xnn_f32_gemm_relu_ukernel_fn gemm_relu) const; void Test(xnn_f32_gemm_minmax_ukernel_fn gemm_minmax, xnn_init_f32_minmax_params_fn init_params) const; void Test(xnn_f32_qc4w_gemm_minmax_ukernel_fn gemm_minmax, xnn_init_f32_qc4w_minmax_params_fn init_params) const; void Test(xnn_f32_qc8w_gemm_ukernel_fn gemm) const; void Test(xnn_f32_qc8w_gemm_relu_ukernel_fn gemm_relu) const; void Test(xnn_f32_qc8w_gemm_minmax_ukernel_fn gemm_minmax, xnn_init_f32_minmax_params_fn init_params) const; void Test(xnn_f32_gemminc_minmax_ukernel_fn gemminc, xnn_init_f32_minmax_params_fn init_params) const; void Test(xnn_f32_igemm_ukernel_fn igemm) const; void Test(xnn_f32_igemm_relu_ukernel_fn igemm_relu) const; void Test(xnn_f32_igemm_minmax_ukernel_fn igemm_minmax, xnn_init_f32_minmax_params_fn init_params) const; #if XNN_PLATFORM_JIT void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_f16_minmax_params_fn init_params) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_f16_minmax_params_fn init_params) const; void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_f32_minmax_params_fn init_params) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_f32_minmax_params_fn init_params) const; void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_qs8_qc8w_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_qs8_qc8w_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_qs8_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_qs8_conv_minmax_params_fn init_params, xnn_qs8_requantize_fn requantize) const; void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_f32_minmax_params_fn init_params, const std::vector<xnn_post_operation>& fused_operators) const; void Test( xnn_jit_igemm_code_generator_fn gemm_generator, xnn_init_f32_minmax_params_fn init_params, const std::vector<xnn_post_operation>& fused_operators) const; // Test that JIT generated code matches assembly. void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_f16_minmax_params_fn init_params, xnn_f16_gemm_minmax_ukernel_fn gemm_minmax) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_f16_minmax_params_fn init_params, xnn_f16_igemm_minmax_ukernel_fn igemm_minmax) const; void Test( xnn_jit_gemm_code_generator_fn gemm_generator, xnn_init_f32_minmax_params_fn init_params, xnn_f32_gemm_minmax_ukernel_fn gemm_minmax) const; void Test( xnn_jit_igemm_code_generator_fn igemm_generator, xnn_init_f32_minmax_params_fn init_params, xnn_f32_igemm_minmax_ukernel_fn igemm_minmax) const; #endif // XNN_PLATFORM_JIT private: size_t mr_{1}; size_t nr_{1}; size_t kr_{1}; size_t sr_{1}; size_t m_{1}; size_t n_{1}; size_t k_{1}; size_t ks_{1}; size_t a_stride_{0}; size_t cm_stride_{0}; size_t cn_stride_{0}; uint8_t a_zero_point_{127}; uint8_t b_zero_point_{127}; int16_t qc4w_zero_point_{0}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t a_offset_{0}; size_t zero_index_{SIZE_MAX}; bool extended_weights_{false}; size_t iterations_{15}; };
9,344
25.398305
115
h
XNNPACK
XNNPACK-master/test/leaky-relu-operator-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <limits> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> static uint16_t flush_fp16_denormal_to_zero(uint16_t v) { return (v & UINT16_C(0x7C00)) == 0 ? v & UINT16_C(0x8000) : v; }; class LeakyReLUOperatorTester { public: inline LeakyReLUOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline LeakyReLUOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline LeakyReLUOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline LeakyReLUOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline LeakyReLUOperatorTester& negative_slope(float negative_slope) { assert(std::isnormal(negative_slope)); this->negative_slope_ = negative_slope; return *this; } inline float negative_slope() const { return this->negative_slope_; } inline LeakyReLUOperatorTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline LeakyReLUOperatorTester& input_zero_point(int16_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline int16_t input_zero_point() const { return this->input_zero_point_; } inline LeakyReLUOperatorTester& output_scale(float output_scale) { assert(output_scale > 0.0f); assert(std::isnormal(output_scale)); this->output_scale_ = output_scale; return *this; } inline float output_scale() const { return this->output_scale_; } inline LeakyReLUOperatorTester& output_zero_point(int16_t output_zero_point) { this->output_zero_point_ = output_zero_point; return *this; } inline int16_t output_zero_point() const { return this->output_zero_point_; } inline LeakyReLUOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return flush_fp16_denormal_to_zero(fp16_ieee_from_fp32_value(f32dist(rng))); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); const uint16_t negative_slope_as_half = fp16_ieee_from_fp32_value(negative_slope()); const float negative_slope_as_float = fp16_ieee_to_fp32_value(negative_slope_as_half); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = fp16_ieee_to_fp32_value(input[i * input_stride() + c]); const float y = std::signbit(x) ? x * negative_slope_as_float : x; output_ref[i * channels() + c] = y; } } // Create, setup, run, and destroy Leaky ReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t leaky_relu_op = nullptr; const xnn_status status = xnn_create_leaky_relu_nc_f16( channels(), input_stride(), output_stride(), negative_slope(), 0, &leaky_relu_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, leaky_relu_op); // Smart pointer to automatically delete leaky_relu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_leaky_relu_op(leaky_relu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_leaky_relu_nc_f16(leaky_relu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_leaky_relu_nc_f16(leaky_relu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(leaky_relu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(2.0e-4f, std::abs(output_ref[i * channels() + c]) * 1.0e-3f)) << "at position " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input[i * input_stride() + c]; const float y = std::signbit(x) ? x * negative_slope() : x; output_ref[i * channels() + c] = y; } } // Create, setup, run, and destroy Leaky ReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t leaky_relu_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_leaky_relu_nc_f32( channels(), input_stride(), output_stride(), negative_slope(), 0, &leaky_relu_op)); ASSERT_NE(nullptr, leaky_relu_op); // Smart pointer to automatically delete leaky_relu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_leaky_relu_op(leaky_relu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_leaky_relu_nc_f32(leaky_relu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_leaky_relu_nc_f32(leaky_relu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(leaky_relu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output[i * output_stride() + c], output_ref[i * channels() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c] << ", negative slope " << negative_slope(); } } } } void TestQS8() const { ASSERT_GE(input_zero_point(), std::numeric_limits<int8_t>::min()); ASSERT_LE(input_zero_point(), std::numeric_limits<int8_t>::max()); ASSERT_GE(output_zero_point(), std::numeric_limits<int8_t>::min()); ASSERT_LE(output_zero_point(), std::numeric_limits<int8_t>::max()); std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> i8dist( std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); std::vector<int8_t> input(XNN_EXTRA_BYTES / sizeof(int8_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<int8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); std::fill(output.begin(), output.end(), INT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - input_zero_point()); float y = (x < 0.0f ? x * negative_slope() : x) / output_scale() + float(output_zero_point()); y = std::max<float>(y, std::numeric_limits<int8_t>::min()); y = std::min<float>(y, std::numeric_limits<int8_t>::max()); output_ref[i * channels() + c] = y; } } // Create, setup, run, and destroy Leaky ReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t leaky_relu_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_leaky_relu_nc_qs8( channels(), input_stride(), output_stride(), negative_slope(), input_zero_point(), input_scale(), output_zero_point(), output_scale(), 0, &leaky_relu_op)); ASSERT_NE(nullptr, leaky_relu_op); // Smart pointer to automatically delete leaky_relu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_leaky_relu_op(leaky_relu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_leaky_relu_nc_qs8(leaky_relu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_leaky_relu_nc_qs8(leaky_relu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(leaky_relu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.9f) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << int32_t(input[i * input_stride() + c]) << ", input zero point " << input_zero_point() << ", output zero point " << output_zero_point() << ", positive input-to-output ratio " << (input_scale() / output_scale()) << ", negative input-to-output ratio " << (input_scale() / output_scale() * negative_slope()); } } } } void TestQU8() const { ASSERT_GE(input_zero_point(), std::numeric_limits<uint8_t>::min()); ASSERT_LE(input_zero_point(), std::numeric_limits<uint8_t>::max()); ASSERT_GE(output_zero_point(), std::numeric_limits<uint8_t>::min()); ASSERT_LE(output_zero_point(), std::numeric_limits<uint8_t>::max()); std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - input_zero_point()); float y = (x < 0.0f ? x * negative_slope() : x) / output_scale() + float(output_zero_point()); y = std::max<float>(y, std::numeric_limits<uint8_t>::min()); y = std::min<float>(y, std::numeric_limits<uint8_t>::max()); output_ref[i * channels() + c] = y; } } // Create, setup, run, and destroy Leaky ReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t leaky_relu_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_leaky_relu_nc_qu8( channels(), input_stride(), output_stride(), negative_slope(), input_zero_point(), input_scale(), output_zero_point(), output_scale(), 0, &leaky_relu_op)); ASSERT_NE(nullptr, leaky_relu_op); // Smart pointer to automatically delete leaky_relu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_leaky_relu_op(leaky_relu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_leaky_relu_nc_qu8(leaky_relu_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_leaky_relu_nc_qu8(leaky_relu_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(leaky_relu_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.9f) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << int32_t(input[i * input_stride() + c]) << ", input zero point " << input_zero_point() << ", output zero point " << output_zero_point() << ", positive input-to-output ratio " << (input_scale() / output_scale()) << ", negative input-to-output ratio " << (input_scale() / output_scale() * negative_slope()); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input[i * input_stride() + c]; const float y = std::signbit(x) ? x * negative_slope() : x; output_ref[i * channels() + c] = y; } } // Create, setup, run, and destroy Leaky ReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_leaky_relu_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), negative_slope(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output[i * output_stride() + c], output_ref[i * channels() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c] << ", negative slope " << negative_slope(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; float negative_slope_{0.3f}; float output_scale_{0.75f}; int16_t output_zero_point_{53}; float input_scale_{1.25f}; int16_t input_zero_point_{41}; size_t iterations_{15}; };
17,305
38.692661
123
h
XNNPACK
XNNPACK-master/test/lut-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <array> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <limits> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class LUTMicrokernelTester { public: inline LUTMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline LUTMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline LUTMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_x8_lut_ukernel_fn lut) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind( std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), std::ref(rng)); std::vector<uint8_t> x(batch_size() + XNN_EXTRA_BYTES / sizeof(uint8_t)); XNN_ALIGN(64) std::array<uint8_t, 256> t; std::vector<uint8_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint8_t) : 0)); std::vector<uint8_t> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(u8rng)); std::generate(t.begin(), t.end(), std::ref(u8rng)); if (inplace()) { std::generate(y.begin(), y.end(), std::ref(u8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const uint8_t* x_data = x.data(); if (inplace()) { std::copy(y.cbegin(), y.cend(), x.begin()); x_data = y.data(); } // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = t[x_data[i]]; } // Call optimized micro-kernel. lut(batch_size(), x_data, y.data(), t.data()); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_EQ(uint32_t(y_ref[i]), uint32_t(y[i])) << "at position " << i << " / " << batch_size() << ", input " << uint32_t(x[i]); } } } private: size_t batch_size_{1}; bool inplace_{false}; size_t iterations_{15}; };
2,714
25.359223
102
h
XNNPACK
XNNPACK-master/test/lut-norm-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <limits> #include <random> #include <vector> #include <xnnpack/microfnptr.h> class LUTNormMicrokernelTester { public: inline LUTNormMicrokernelTester& n(size_t n) { assert(n != 0); this->n_ = n; return *this; } inline size_t n() const { return this->n_; } inline LUTNormMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline LUTNormMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_u8_lut32norm_ukernel_fn lutnorm) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); auto u32rng = std::bind( std::uniform_int_distribution<uint32_t>(1, std::numeric_limits<uint32_t>::max() / (257 * n())), rng); std::vector<uint8_t> x(n()); std::vector<uint32_t> t(256); std::vector<uint8_t> y(n()); std::vector<float> y_ref(n()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(u8rng)); std::generate(t.begin(), t.end(), std::ref(u32rng)); if (inplace()) { std::generate(y.begin(), y.end(), std::ref(u8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const uint8_t* x_data = inplace() ? y.data() : x.data(); // Compute reference results. uint32_t sum = 0; for (size_t i = 0; i < n(); i++) { sum += t[x_data[i]]; } for (size_t i = 0; i < n(); i++) { y_ref[i] = 256.0f * float(t[x_data[i]]) / float(sum); y_ref[i] = std::min(y_ref[i], 255.0f); } // Call optimized micro-kernel. lutnorm(n(), x_data, t.data(), y.data()); // Verify results. for (size_t i = 0; i < n(); i++) { EXPECT_NEAR(y_ref[i], float(y[i]), 0.5f) << "at position " << i << ", n = " << n() << ", sum = " << sum; } } } private: size_t n_{1}; bool inplace_{false}; size_t iterations_{15}; };
2,662
24.854369
113
h
XNNPACK
XNNPACK-master/test/mock-allocator.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <xnnpack/allocator.h> #include <gmock/gmock.h> namespace xnnpack { class MockAllocator : public xnn_allocator { public: MockAllocator() { // Setup calls to perform actuall memory alloc/realloc/free by delegating to // xnn_default_allocator. ON_CALL(*this, allocate).WillByDefault([](void* context, size_t size) { return xnn_default_allocator.allocate(context, size); }); ON_CALL(*this, reallocate) .WillByDefault([](void* context, void* pointer, size_t size) { return xnn_default_allocator.reallocate(context, pointer, size); }); ON_CALL(*this, deallocate) .WillByDefault([](void* context, void* pointer) { return xnn_default_allocator.deallocate(context, pointer); }); ON_CALL(*this, aligned_allocate) .WillByDefault([](void* context, size_t alignment, size_t size) { return xnn_default_allocator.aligned_allocate(context, alignment, size); }); ON_CALL(*this, aligned_deallocate) .WillByDefault([](void* context, void* pointer) { return xnn_default_allocator.aligned_deallocate(context, pointer); }); } MOCK_METHOD(void*, allocate, (void* context, size_t size)); MOCK_METHOD(void*, reallocate, (void* context, void* pointer, size_t size)); MOCK_METHOD(void, deallocate, (void* context, void* pointer)); MOCK_METHOD(void*, aligned_allocate, (void* context, size_t alignment, size_t size)); MOCK_METHOD(void, aligned_deallocate, (void* context, void* pointer)); }; static MockAllocator* mock_allocator_; const struct xnn_allocator mock_allocator_wrapper_ = { /*context=*/nullptr, /*allocate=*/[](void* context, size_t size) -> void* { return mock_allocator_->allocate(context, size); }, /*reallocate=*/[](void* context, void* pointer, size_t size) -> void* { return mock_allocator_->reallocate(context, pointer, size); }, /*deallocate=*/[](void* context, void* pointer) -> void { return mock_allocator_->deallocate(context, pointer); }, /*aligned_allocate=*/[](void* context, size_t alignment, size_t size) -> void* { return mock_allocator_->aligned_allocate(context, alignment, size); }, /*aligned_deallocate=*/[](void* context, void* pointer) -> void { return xnn_default_allocator.aligned_deallocate(context, pointer); }, }; /// Replaces the memory allocator with the given mock. /// The allocator must be restored as soon as the lifetime of the mock ends. inline void SetUpMockAllocator(MockAllocator* mock_allocator) { mock_allocator_ = mock_allocator; memcpy(&xnn_params.allocator, &mock_allocator_wrapper_, sizeof(struct xnn_allocator)); } /// Restores the default XNNPACK memory allocator. inline void RestoreDefaultAllocator(MockAllocator* mock_allocator) { memcpy(&xnn_params.allocator, &xnn_default_allocator, sizeof(struct xnn_allocator)); } } // namespace xnnpack
3,229
35.292135
80
h
XNNPACK
XNNPACK-master/test/negate-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class NegateOperatorTester { public: inline NegateOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline NegateOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline NegateOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline NegateOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline NegateOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<uint16_t> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = input[i * input_stride() + c] ^ UINT16_C(0x8000); } } // Create, setup, run, and destroy Negate operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t negate_op = nullptr; const xnn_status status = xnn_create_negate_nc_f16( channels(), input_stride(), output_stride(), 0, &negate_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, negate_op); // Smart pointer to automatically delete negate_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_negate_op(negate_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_negate_nc_f16(negate_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_negate_nc_f16(negate_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(negate_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = -input[i * input_stride() + c]; } } // Create, setup, run, and destroy Negate operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t negate_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_negate_nc_f32( channels(), input_stride(), output_stride(), 0, &negate_op)); ASSERT_NE(nullptr, negate_op); // Smart pointer to automatically delete negate_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_negate_op(negate_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_negate_nc_f32(negate_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_negate_nc_f32(negate_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(negate_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = -input[i * input_stride() + c]; } } // Initialize and run Negate operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_negate_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
7,892
33.168831
115
h
XNNPACK
XNNPACK-master/test/pack-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> class PackMicrokernelTester { public: inline PackMicrokernelTester& mr(size_t mr) { assert(mr != 0); this->mr_ = mr; return *this; } inline size_t mr() const { return this->mr_; } inline PackMicrokernelTester& m(size_t m) { assert(m != 0); this->m_ = m; return *this; } inline size_t m() const { return this->m_; } inline PackMicrokernelTester& k(size_t k) { assert(k != 0); this->k_ = k; return *this; } inline size_t k() const { return this->k_; } inline PackMicrokernelTester& x_stride(size_t x_stride) { assert(x_stride != 0); this->x_stride_ = x_stride; return *this; } inline size_t x_stride() const { if (this->x_stride_ == 0) { return k(); } else { assert(this->x_stride_ >= k()); return this->x_stride_; } } inline PackMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_x32_packx_ukernel_fn packx) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u32rng = std::bind(std::uniform_int_distribution<uint32_t>(), rng); const uint32_t c = u32rng(); std::vector<uint32_t> x(k() + (m() - 1) * x_stride() + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> y(mr() * k()); std::vector<uint32_t> y_ref(mr() * k()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(u32rng)); std::generate(y.begin(), y.end(), std::ref(u32rng)); // Compute reference results. std::fill(y_ref.begin(), y_ref.end(), c); for (size_t i = 0; i < mr(); i++) { for (size_t j = 0; j < k(); j++) { y_ref[j * mr() + i] = x[std::min(i, m() - 1) * x_stride() + j]; } } // Call optimized micro-kernel. packx( m(), k(), x.data(), x_stride() * sizeof(uint32_t), y.data()); // Verify results. for (size_t i = 0; i < mr(); i++) { for (size_t j = 0; j < k(); j++) { EXPECT_EQ(y_ref[j * mr() + i], y[j * mr() + i]) << "at pixel = " << i << ", channel = " << j << ", " << "m = " << m() << ", k = " << k(); } } } } private: size_t mr_{1}; size_t m_{1}; size_t k_{1}; size_t x_stride_{0}; size_t iterations_{1}; };
2,952
22.814516
95
h
XNNPACK
XNNPACK-master/test/packb-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <numeric> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/math.h> #include <xnnpack/microfnptr.h> #include <xnnpack/pack.h> // Reference bias packing function for f32. static void f32_packb_reference( size_t groups, size_t channels, size_t kernel_tile, size_t channel_tile, size_t channel_subtile, size_t channel_round, const float* weights, const float* bias, float* out, size_t per_tile_extra_bytes, size_t per_subtile_extra_bytes) { assert(groups > 0); // Group loop. do { // Channel tile loop. size_t c = round_up_po2(channels, channel_round); size_t tiled_c = round_down_po2(c, channel_tile); size_t cr_block_start = 0; for (; cr_block_start < tiled_c; cr_block_start += channel_tile) { const size_t cr_block_size = min(channels - cr_block_start, channel_tile); if (bias != nullptr) { for (size_t i = 0; i < cr_block_size; i++) { *out++ = bias[cr_block_start + i]; } } else { size_t i = cr_block_size; do { *out++ = 0.0f; } while (--i != 0); } out += channel_tile - cr_block_size; out += kernel_tile * channel_tile; out += per_tile_extra_bytes; } // Channel subtile loop. for (; cr_block_start < c; cr_block_start += channel_subtile) { const size_t cr_block_size = min(channels - cr_block_start, channel_subtile); if (bias != nullptr) { for (size_t i = 0; i < cr_block_size; i++) { *out++ = bias[cr_block_start + i]; } } else { size_t i = cr_block_size; do { *out++ = 0.0f; } while (--i != 0); } out += channel_subtile - cr_block_size; out += kernel_tile * channel_subtile; out += per_subtile_extra_bytes; } if (bias != nullptr) { bias += channels; } } while (--groups > 0); } class PackBMicrokernelTester { public: inline PackBMicrokernelTester& groups(size_t groups) { this->groups_ = groups; return *this; } inline size_t groups() const { return this->groups_; } inline PackBMicrokernelTester& channel_tile(size_t channel_tile) { this->channel_tile_ = channel_tile; return *this; } inline size_t channel_tile() const { return this->channel_tile_; } inline PackBMicrokernelTester& channel_subtile(size_t channel_subtile) { this->channel_subtile_ = channel_subtile; return *this; } inline size_t channel_subtile() const { return this->channel_subtile_; } inline PackBMicrokernelTester& channel_round(size_t channel_round) { this->channel_round_ = channel_round; return *this; } inline size_t channel_round() const { return this->channel_round_; } inline PackBMicrokernelTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline size_t packed_channels() const { return round_up(channels(), channel_subtile()); } inline PackBMicrokernelTester& kernel_tile(size_t kernel_tile) { this->kernel_tile_ = kernel_tile; return *this; } inline size_t kernel_tile() const { return this->kernel_tile_; } void Test(xnn_x32_packb_gemm_ukernel_fn packb) const { std::vector<uint32_t> weights(groups() * channels() * kernel_tile()); std::vector<uint32_t> bias(groups() * channels()); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> packed_w( groups() * (packed_channels() * kernel_tile() + packed_channels())); std::vector<uint32_t> packed_w_ref(groups() * (packed_channels() * kernel_tile() + packed_channels())); std::fill(weights.begin(), weights.end(), 0xDEADBEEF); std::iota(bias.begin(), bias.end(), UINT32_C(0x80000000)); std::fill(packed_w.begin(), packed_w.end(), UINT32_C(0x12345678)); std::fill(packed_w_ref.begin(), packed_w_ref.end(), UINT32_C(0xDEADBEEF)); // Compute reference results. f32_packb_reference( groups(), channels(), kernel_tile(), channel_tile(), channel_subtile(), channel_round(), reinterpret_cast<const float*>(weights.data()), reinterpret_cast<const float*>(bias.data()), reinterpret_cast<float*>(packed_w_ref.data()), /*per_tile_extra_bytes=*/0, /*per_subtile_extra_bytes=*/0); // Call optimized micro-kernel. packb( groups(), channels(), bias.data(), packed_w.data(), /*channel_tile_stride=*/sizeof(float) * (kernel_tile() * channel_tile() + channel_tile()), /*channel_subtile_stride=*/sizeof(float) * (kernel_tile() * channel_subtile() + channel_subtile()), nullptr); // Verify results. for (size_t i = 0; i < packed_w.size(); i++) { if (packed_w_ref[i] != UINT32_C(0xDEADBEEF)) { // Allow weights and padding to differ. EXPECT_EQ(packed_w[i], packed_w_ref[i]) << "at position " << i << " / " << packed_w.size() << ", channels " << channels() << ", kernel tile " << kernel_tile() << ", groups " << groups(); } else { // These are weights, and should be unmodified. EXPECT_EQ(packed_w[i], 0x12345678) << "at position " << i << " / " << packed_w.size() << ", channels " << channels() << ", kernel tile " << kernel_tile() << ", groups " << groups(); } } } void Test(xnn_x32_zerob_gemm_ukernel_fn zerob) const { std::vector<uint32_t> weights(groups() * channels() * kernel_tile()); std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> packed_w( groups() * (packed_channels() * kernel_tile() + packed_channels())); std::vector<uint32_t> packed_w_ref(groups() * (packed_channels() * kernel_tile() + packed_channels())); std::fill(weights.begin(), weights.end(), 0xDEADBEEF); std::fill(packed_w.begin(), packed_w.end(), UINT32_C(0x12345678)); std::fill(packed_w_ref.begin(), packed_w_ref.end(), UINT32_C(0xDEADBEEF)); // Compute reference results. f32_packb_reference( groups(), channels(), kernel_tile(), channel_tile(), channel_subtile(), channel_round(), reinterpret_cast<const float*>(weights.data()), nullptr, reinterpret_cast<float*>(packed_w_ref.data()), /*per_tile_extra_bytes=*/0, /*per_subtile_extra_bytes=*/0); // Call optimized micro-kernel. zerob( groups(), channels(), packed_w.data(), /*channel_tile_stride=*/sizeof(float) * (kernel_tile() * channel_tile() + channel_tile()), /*channel_subtile_stride=*/sizeof(float) * (kernel_tile() * channel_subtile() + channel_subtile()), nullptr); // Verify results. for (size_t i = 0; i < packed_w.size(); i++) { if (packed_w_ref[i] != UINT32_C(0xDEADBEEF)) { // Allow weights and padding to differ. EXPECT_EQ(packed_w[i], packed_w_ref[i]) << "at position " << i << " / " << packed_w.size() << ", channels " << channels() << ", kernel tile " << kernel_tile(); // Bias should be zero. EXPECT_EQ(packed_w[i], 0.0f) << "at position " << i << " / " << packed_w.size() << ", channels " << channels() << ", kernel tile " << kernel_tile(); } else { // These are weights, and should be unmodified. EXPECT_EQ(packed_w[i], 0x12345678) << "at position " << i << " / " << packed_w.size() << ", channels " << channels() << ", kernel tile " << kernel_tile(); } } } private: size_t groups_{1}; size_t channels_{1}; size_t channel_tile_{1}; size_t channel_subtile_{1}; size_t channel_round_{1}; size_t kernel_tile_{1}; };
8,146
33.965665
118
h
XNNPACK
XNNPACK-master/test/pad-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <array> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class PadMicrokernelTester { public: inline PadMicrokernelTester& rows(size_t rows) { assert(rows != 0); this->rows_ = rows; return *this; } inline size_t rows() const { return this->rows_; } inline PadMicrokernelTester& input_channels(size_t input_channels) { assert(input_channels != 0); this->input_channels_ = input_channels; return *this; } inline size_t input_channels() const { return this->input_channels_; } inline PadMicrokernelTester& pre_padding(size_t pre_padding) { this->pre_padding_ = pre_padding; return *this; } inline size_t pre_padding() const { return this->pre_padding_; } inline PadMicrokernelTester& post_padding(size_t post_padding) { this->post_padding_ = post_padding; return *this; } inline size_t post_padding() const { return this->post_padding_; } inline size_t output_channels() const { return pre_padding() + input_channels() + post_padding(); } inline PadMicrokernelTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return input_channels(); } else { assert(this->input_stride_ >= input_channels()); return this->input_stride_; } } inline PadMicrokernelTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return pre_padding() + input_channels() + post_padding(); } else { assert(this->output_stride_ >= pre_padding() + input_channels() + post_padding()); return this->output_stride_; } } inline PadMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_pad_ukernel_fn pad) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); std::vector<uint8_t> input(input_channels() + (rows() - 1) * input_stride() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> output((pre_padding() + input_channels() + post_padding()) + (rows() - 1) * output_stride()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), std::ref(u8rng)); std::generate(output.begin(), output.end(), std::ref(u8rng)); std::array<uint8_t, 4> fill_pattern; std::generate(fill_pattern.begin(), fill_pattern.end(), std::ref(u8rng)); uint32_t fill_value = 0; memcpy(&fill_value, fill_pattern.data(), sizeof(fill_value)); // Call optimized micro-kernel. pad( rows(), input_channels() * sizeof(uint8_t), pre_padding() * sizeof(uint8_t), post_padding() * sizeof(uint8_t), input.data(), input_stride() * sizeof(uint8_t), output.data(), output_stride() * sizeof(uint8_t), fill_value); // Verify results. for (size_t i = 0; i < rows(); i++) { for (size_t l = 0; l < pre_padding(); l++) { ASSERT_EQ( uint32_t(output[i * output_stride() + l]), uint32_t(fill_pattern[l % fill_pattern.size()])) << "at row " << i << " / " << rows() << ", channel " << i << " / " << output_channels() << " (" << pre_padding() << " + " << input_channels() << " + " << post_padding() << ")" << ", fill value 0x" << std::hex << std::setw(8) << std::setfill('0') << fill_value << ", output value 0x" << std::hex << std::setw(2) << std::setfill('0') << uint32_t(output[i * output_stride() + l]); } for (size_t c = 0; c < input_channels(); c++) { ASSERT_EQ( uint32_t(output[i * output_stride() + pre_padding() + c]), uint32_t(input[i * input_stride() + c])) << "at row " << i << " / " << rows() << ", channel " << i << " / " << output_channels() << " (" << pre_padding() << " + " << input_channels() << " + " << post_padding() << ")" << ", fill value 0x" << std::hex << std::setw(8) << std::setfill('0') << fill_value << ", output value 0x" << std::hex << std::setw(2) << std::setfill('0') << uint32_t(output[i * output_stride() + pre_padding() + c]); } for (size_t r = 0; r < post_padding(); r++) { ASSERT_EQ( uint32_t(output[i * output_stride() + pre_padding() + input_channels() + r]), uint32_t(fill_pattern[r % fill_pattern.size()])) << "at row " << i << " / " << rows() << ", channel " << i << " / " << output_channels() << " (" << pre_padding() << " + " << input_channels() << " + " << post_padding() << ")" << ", fill value 0x" << std::hex << std::setw(8) << std::setfill('0') << fill_value << ", output value 0x" << std::hex << std::setw(2) << std::setfill('0') << uint32_t(output[i * output_stride() + pre_padding() + input_channels() + r]); } } } } private: size_t rows_{1}; size_t input_channels_{1}; size_t pre_padding_{0}; size_t post_padding_{0}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
5,972
32.9375
118
h
XNNPACK
XNNPACK-master/test/prelu-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> class PReLUMicrokernelTester { public: inline PReLUMicrokernelTester& rows(size_t rows) { assert(rows != 0); this->rows_ = rows; return *this; } inline size_t rows() const { return this->rows_; } inline PReLUMicrokernelTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline PReLUMicrokernelTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return channels(); } else { assert(this->input_stride_ >= channels()); return this->input_stride_; } } inline PReLUMicrokernelTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return channels(); } else { assert(this->output_stride_ >= channels()); return this->output_stride_; } } inline PReLUMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline PReLUMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_prelu_ukernel_fn prelu) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::uniform_real_distribution<float> w32dist(0.25f, 0.75f); std::vector<uint16_t> x(channels() + (rows() - 1) * input_stride() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> w(channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> y(channels() + (rows() - 1) * output_stride() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<float> y_ref(channels() * rows()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::generate(w.begin(), w.end(), [&]() { return fp16_ieee_from_fp32_value(w32dist(rng)); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); } else { std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); } const uint16_t* x_data = inplace() ? y.data() : x.data(); // Compute reference results, without clamping. for (size_t n = 0; n < rows(); n++) { for (size_t c = 0; c < channels(); c++) { const float x_value = fp16_ieee_to_fp32_value(x_data[n * input_stride() + c]); y_ref[n * channels() + c] = std::signbit(x_value) ? fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(x_value * fp16_ieee_to_fp32_value(w[c]))) : x_value; } } // Call optimized micro-kernel. prelu(rows(), channels() * sizeof(uint16_t), x_data, input_stride() * sizeof(uint16_t), w.data(), y.data(), output_stride() * sizeof(uint16_t)); // Verify results. for (size_t n = 0; n < rows(); n++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(fp16_ieee_to_fp32_value(y[n * output_stride() + c]), y_ref[n * channels() + c]) << "at row " << n << " / " << rows() << ", channel " << c << " / " << channels(); } } } } void Test(xnn_f32_prelu_ukernel_fn prelu) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::uniform_real_distribution<float> w32dist(0.25f, 0.75f); std::vector<float> x(channels() + (rows() - 1) * input_stride() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float, AlignedAllocator<float, 64>> w(channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y(channels() + (rows() - 1) * output_stride() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y_ref(channels() * rows()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return f32dist(rng); }); std::generate(w.begin(), w.end(), [&]() { return w32dist(rng); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* x_data = inplace() ? y.data() : x.data(); // Compute reference results, without clamping. for (size_t n = 0; n < rows(); n++) { for (size_t c = 0; c < channels(); c++) { const float x_value = x_data[n * input_stride() + c]; y_ref[n * channels() + c] = std::signbit(x_value) ? x_value * w[c] : x_value; } } // Call optimized micro-kernel. prelu(rows(), channels() * sizeof(float), x_data, input_stride() * sizeof(float), w.data(), y.data(), output_stride() * sizeof(float)); // Verify results. for (size_t n = 0; n < rows(); n++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(y[n * output_stride() + c], y_ref[n * channels() + c]) << "at row " << n << " / " << rows() << ", channel " << c << " / " << channels(); } } } } private: size_t rows_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; bool inplace_{false}; size_t iterations_{15}; };
6,301
31.484536
116
h
XNNPACK
XNNPACK-master/test/prelu-operator-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <fp16/fp16.h> #include <algorithm> #include <cmath> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/cache.h> class PReLUOperatorTester { public: enum class WeightsType { Default, FP32, }; inline PReLUOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline PReLUOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline PReLUOperatorTester& x_stride(size_t x_stride) { assert(x_stride != 0); this->x_stride_ = x_stride; return *this; } inline size_t x_stride() const { if (this->x_stride_ == 0) { return this->channels_; } else { assert(this->x_stride_ >= this->channels_); return this->x_stride_; } } inline PReLUOperatorTester& y_stride(size_t y_stride) { assert(y_stride != 0); this->y_stride_ = y_stride; return *this; } inline size_t y_stride() const { if (this->y_stride_ == 0) { return this->channels_; } else { assert(this->y_stride_ >= this->channels_); return this->y_stride_; } } inline PReLUOperatorTester& weights_type(WeightsType weights_type) { this->weights_type_ = weights_type; return *this; } inline WeightsType weights_type() const { return this->weights_type_; } inline PReLUOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } inline PReLUOperatorTester& use_weights_cache(bool use_weights_cache) { this->use_weights_cache_ = use_weights_cache; return *this; } inline bool use_weights_cache() const { return this->use_weights_cache_; } void TestF16() const { switch (weights_type()) { case WeightsType::Default: break; case WeightsType::FP32: break; default: GTEST_FAIL() << "unexpected weights type"; } std::random_device random_device; auto rng = std::mt19937(random_device()); auto f32irng = std::uniform_real_distribution<float>(-1.0f, 1.0f); auto f32wrng = std::uniform_real_distribution<float>(0.25f, 0.75f); std::vector<uint16_t> x((batch_size() - 1) * x_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> w(channels()); std::vector<float> w_as_float(channels()); std::vector<uint16_t> y((batch_size() - 1) * y_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<float> y_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&] { return fp16_ieee_from_fp32_value(f32irng(rng)); }); std::generate(w.begin(), w.end(), [&] { return fp16_ieee_from_fp32_value(f32wrng(rng)); }); std::transform(w.cbegin(), w.cend(), w_as_float.begin(), fp16_ieee_to_fp32_value); std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results, without clamping. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x_value = fp16_ieee_to_fp32_value(x[i * x_stride() + c]); const float w_value = w_as_float[c]; y_ref[i * channels() + c] = std::signbit(x_value) ? x_value * w_value : x_value; } } // Create, setup, run, and destroy PReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t prelu_op = nullptr; xnn_weights_cache weights_cache; std::unique_ptr<xnn_weights_cache, decltype(&xnn_release_weights_cache)> auto_weights_cache( nullptr, xnn_release_weights_cache); if (use_weights_cache()) { xnn_init_weights_cache(&weights_cache); auto_weights_cache.reset(&weights_cache); } const void* negative_slope_data = w.data(); if (weights_type() == WeightsType::FP32) { negative_slope_data = w_as_float.data(); } uint32_t flags = 0; if (weights_type() == WeightsType::FP32) { flags |= XNN_FLAG_FP32_STATIC_WEIGHTS; } ASSERT_EQ(xnn_status_success, xnn_create_prelu_nc_f16( channels(), x_stride(), y_stride(), negative_slope_data, flags, /*code_cache=*/nullptr, auto_weights_cache.get(), &prelu_op)); ASSERT_NE(nullptr, prelu_op); if (use_weights_cache()) { ASSERT_EQ(xnn_status_success, xnn_finalize_weights_cache(&weights_cache, xnn_weights_cache_finalization_kind_soft)); } // Smart pointer to automatically delete prelu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_prelu_op(prelu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_prelu_nc_f16( prelu_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_prelu_nc_f16( prelu_op, x.data(), y.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(prelu_op, nullptr /* thread pool */)); VerifyF16(y, y_ref); if (use_weights_cache()) { xnn_operator_t prelu_op2 = nullptr; const size_t old_weights_cache_size = weights_cache.cache.weights.size; ASSERT_EQ(xnn_status_success, xnn_create_prelu_nc_f16( channels(), x_stride(), y_stride(), negative_slope_data, flags, /*code_cache=*/nullptr, auto_weights_cache.get(), &prelu_op2)); ASSERT_NE(nullptr, prelu_op2); // Smart pointer to automatically delete prelu_op2. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_prelu_op(prelu_op2, xnn_delete_operator); std::vector<uint16_t> y2(y.size(), UINT16_C(0x7E00) /* NaN */); ASSERT_EQ(xnn_status_success, xnn_reshape_prelu_nc_f16( prelu_op2, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_prelu_nc_f16( prelu_op2, x.data(), y2.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(prelu_op2, nullptr /* thread pool */)); VerifyF16(y2, y_ref); VerifyWeightsCache(weights_cache, old_weights_cache_size); } } } void VerifyF16(const std::vector<uint16_t>& y, const std::vector<float>& y_ref) const { for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(y[i * y_stride() + c]), y_ref[i * channels() + c], std::max(1.0e-4f, std::abs(y_ref[i * channels() + c]) * 1.0e-3f)) << "at position " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } void TestF32() const { ASSERT_EQ(weights_type(), WeightsType::Default); std::random_device random_device; auto rng = std::mt19937(random_device()); auto f32irng = std::uniform_real_distribution<float>(-1.0f, 1.0f); auto f32wrng = std::uniform_real_distribution<float>(0.25f, 0.75f); std::vector<float> x((batch_size() - 1) * x_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> w(channels()); std::vector<float> y((batch_size() - 1) * y_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&] { return f32irng(rng);} ); std::generate(w.begin(), w.end(), [&] { return f32wrng(rng);} ); std::fill(y.begin(), y.end(), nanf("")); // Compute reference results, without clamping. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { y_ref[i * channels() + c] = std::signbit(x[i * x_stride() + c]) ? x[i * x_stride() + c] * w[c] : x[i * x_stride() + c]; } } // Create, setup, run, and destroy PReLU operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t prelu_op = nullptr; xnn_weights_cache weights_cache; std::unique_ptr<xnn_weights_cache, decltype(&xnn_release_weights_cache)> auto_weights_cache( nullptr, xnn_release_weights_cache); if (use_weights_cache()) { xnn_init_weights_cache(&weights_cache); auto_weights_cache.reset(&weights_cache); } ASSERT_EQ(xnn_status_success, xnn_create_prelu_nc_f32( channels(), x_stride(), y_stride(), w.data(), 0, /*code_cache=*/nullptr, auto_weights_cache.get(), &prelu_op)); ASSERT_NE(nullptr, prelu_op); if (use_weights_cache()) { ASSERT_EQ(xnn_status_success, xnn_finalize_weights_cache(&weights_cache, xnn_weights_cache_finalization_kind_soft)); } // Smart pointer to automatically delete prelu_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_prelu_op(prelu_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_prelu_nc_f32( prelu_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_prelu_nc_f32( prelu_op, x.data(), y.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(prelu_op, nullptr /* thread pool */)); VerifyF32(y, y_ref); if (use_weights_cache()) { xnn_operator_t prelu_op2 = nullptr; const size_t old_weights_cache_size = weights_cache.cache.weights.size; ASSERT_EQ(xnn_status_success, xnn_create_prelu_nc_f32( channels(), x_stride(), y_stride(), w.data(), 0, /*code_cache=*/nullptr, auto_weights_cache.get(), &prelu_op2)); ASSERT_NE(nullptr, prelu_op2); // Smart pointer to automatically delete prelu_op2. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_prelu_op(prelu_op2, xnn_delete_operator); std::vector<float> y2(y.size(), nanf("")); ASSERT_EQ(xnn_status_success, xnn_reshape_prelu_nc_f32( prelu_op2, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_prelu_nc_f32( prelu_op2, x.data(), y2.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(prelu_op2, nullptr /* thread pool */)); VerifyF32(y, y_ref); VerifyWeightsCache(weights_cache, old_weights_cache_size); } } } void VerifyF32(const std::vector<float>& y, const std::vector<float>& y_ref) const { for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( y[i * y_stride() + c], y_ref[i * channels() + c], std::max(1.0e-6f, std::abs(y_ref[i * channels() + c]) * 1.0e-6f)) << "at position " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } void VerifyWeightsCache(const xnn_weights_cache& weights_cache, size_t old_size) const { ASSERT_EQ(weights_cache.cache.hits, 1); // Ensure that we did not write more weights to the cache because it was a cache hit. ASSERT_EQ(old_size, weights_cache.cache.weights.size); }; private: size_t batch_size_{1}; size_t channels_{1}; size_t x_stride_{0}; size_t y_stride_{0}; WeightsType weights_type_{WeightsType::Default}; bool use_weights_cache_{false}; size_t iterations_{15}; };
12,472
32.986376
129
h
XNNPACK
XNNPACK-master/test/raddexpminusmax-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class RAddExpMinusMaxMicrokernelTester { public: inline RAddExpMinusMaxMicrokernelTester& elements(size_t elements) { assert(elements != 0); this->elements_ = elements; return *this; } inline size_t elements() const { return this->elements_; } inline RAddExpMinusMaxMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f32_raddexpminusmax_ukernel_fn raddexpminusmax) const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that expf(x[i]) overflows, but expf(x[i] - x_max) doesn't. // However, the range is still narrow enough that single-precision exp doesn't overflow. auto f32rng = std::bind(std::uniform_real_distribution<float>(90.0f, 100.0f), rng); std::vector<float> x(elements() + XNN_EXTRA_BYTES / sizeof(float)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(f32rng)); // Compute reference results. double sum_ref = 0.0f; const float x_max = *std::max_element(x.begin(), x.begin() + elements()); for (size_t i = 0; i < elements(); i++) { sum_ref += exp(x[i] - x_max); } // Call optimized micro-kernel. float sum = std::nanf(""); raddexpminusmax(elements() * sizeof(float), x.data(), &sum, x_max); // Verify results. ASSERT_NEAR(sum_ref, double(sum), std::abs(sum_ref) * 1.0e-6) << "elements = " << elements() << ", x_max = " << x_max; } } private: size_t elements_{1}; size_t iterations_{15}; };
2,117
27.24
92
h
XNNPACK
XNNPACK-master/test/raddextexp-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class RAddExtExpMicrokernelTester { public: inline RAddExtExpMicrokernelTester& elements(size_t elements) { assert(elements != 0); this->elements_ = elements; return *this; } inline size_t elements() const { return this->elements_; } inline RAddExtExpMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f32_raddextexp_ukernel_fn raddextexp) const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that expf(x[i]) overflows, but double-precision exp doesn't overflow. auto f32rng = std::bind(std::uniform_real_distribution<float>(90.0f, 100.0f), rng); std::vector<float> x(elements() + XNN_EXTRA_BYTES / sizeof(float)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(f32rng)); // Compute reference results. double sum_ref = 0.0f; for (size_t i = 0; i < elements(); i++) { sum_ref += exp(double(x[i])); } // Call optimized micro-kernel. float sum[2] = { nanf(""), nanf("") }; raddextexp(elements() * sizeof(float), x.data(), sum); // Verify results. ASSERT_NEAR(sum_ref, exp2(double(sum[1])) * double(sum[0]), std::abs(sum_ref) * 1.0e-6) << "elements = " << elements() << ", y:value = " << sum[0] << ", y:exponent = " << sum[1]; } } private: size_t elements_{1}; size_t iterations_{15}; };
1,989
26.260274
98
h
XNNPACK
XNNPACK-master/test/raddstoreexpminusmax-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class RAddStoreExpMinusMaxMicrokernelTester { public: inline RAddStoreExpMinusMaxMicrokernelTester& elements(size_t elements) { assert(elements != 0); this->elements_ = elements; return *this; } inline size_t elements() const { return this->elements_; } inline RAddStoreExpMinusMaxMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_raddstoreexpminusmax_ukernel_fn raddstoreexpminusmax, xnn_init_f16_expminus_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that exph(x[i]) overflows, but exph(x[i] - x_max) doesn't. // However, the range is still narrow enough that double-precision exp doesn't overflow. std::uniform_real_distribution<float> f32dist(15.0f, 20.0f); std::vector<uint16_t> x(elements() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> y(elements()); std::vector<float> y_ref(elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. float sum_ref = 0.0f; float x_max_as_float = -std::numeric_limits<float>::infinity(); for (size_t i = 0; i < elements(); i++) { x_max_as_float = std::max(x_max_as_float, fp16_ieee_to_fp32_value(x[i])); } const uint16_t x_max_as_half = fp16_ieee_from_fp32_value(x_max_as_float); for (size_t i = 0; i < elements(); i++) { const float y_ref_value = exp(fp16_ieee_to_fp32_value(x[i]) - x_max_as_float); y_ref[i] = y_ref_value; sum_ref += y_ref_value; } // Call optimized micro-kernel. uint16_t sum = UINT16_C(0x7E00) /* NaN */; xnn_f16_expminus_params params; init_params(&params); raddstoreexpminusmax(elements() * sizeof(uint16_t), x.data(), &x_max_as_half, y.data(), &sum, &params); // Verify results. for (size_t i = 0; i < elements(); i++) { EXPECT_NEAR(y_ref[i], fp16_ieee_to_fp32_value(y[i]), std::abs(y_ref[i]) * 5.0e-3f) << "element " << i << " / " << elements() << ", x_max " << x_max_as_float; } ASSERT_NEAR(sum_ref, fp16_ieee_to_fp32_value(sum), std::abs(sum_ref) * 5.0e-3f) << "batch " << elements() << ", x_max " << x_max_as_float; } } void Test(xnn_f32_raddstoreexpminusmax_ukernel_fn raddstoreexpminusmax, xnn_init_f32_expminus_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that expf(x[i]) overflows, but expf(x[i] - x_max) doesn't. // However, the range is still narrow enough that double-precision exp doesn't overflow. std::uniform_real_distribution<float> f32dist(90.0f, 100.0f); std::vector<float> x(elements() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y(elements()); std::vector<double> y_ref(elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return f32dist(rng); }); std::fill(y.begin(), y.end(), std::nanf("")); // Compute reference results. double sum_ref = 0.0f; const float x_max = *std::max_element(x.begin(), x.begin() + elements()); for (size_t i = 0; i < elements(); i++) { const double y_ref_value = exp(double(x[i]) - double(x_max)); y_ref[i] = y_ref_value; sum_ref += y_ref_value; } // Call optimized micro-kernel. float sum = std::nanf(""); xnn_f32_expminus_params params; init_params(&params); raddstoreexpminusmax(elements() * sizeof(float), x.data(), &x_max, y.data(), &sum, &params); // Verify results. for (size_t i = 0; i < elements(); i++) { EXPECT_NEAR(y_ref[i], double(y[i]), std::abs(y_ref[i]) * 1.0e-6) << "element " << i << " / " << elements() << ", x_max " << x_max; } ASSERT_NEAR(sum_ref, double(sum), std::abs(sum_ref) * 1.0e-6) << "batch " << elements() << ", x_max " << x_max; } } private: size_t elements_{1}; size_t iterations_{15}; };
4,827
35.854962
126
h
XNNPACK
XNNPACK-master/test/reduce-microkernel-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class ReduceMicrokernelTester { using FloatIt = std::vector<float>::iterator; public: enum class OpType { Max, Min, MinMax, }; inline ReduceMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline ReduceMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f32_reduce_ukernel_fn reduce, OpType op_type, xnn_init_f32_default_params_fn init_params = nullptr) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); // Compute reference results. FloatIt min, max; std::tie(min, max) = std::minmax_element(input.begin(), input.begin() + batch_size()); // Prepare parameters. xnn_f32_default_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. float output[2] = {std::nanf(""), std::nanf("")}; reduce(batch_size() * sizeof(float), input.data(), output, init_params != nullptr ? &params : nullptr); // Verify results. switch (op_type) { case OpType::Max: EXPECT_EQ(output[0], *max) << "with batch " << batch_size(); break; case OpType::Min: EXPECT_EQ(output[0], *min) << "with batch " << batch_size(); break; case OpType::MinMax: EXPECT_EQ(output[0], *min) << "with batch " << batch_size(); EXPECT_EQ(output[1], *max) << "with batch " << batch_size(); break; } } } private: size_t batch_size_{1}; size_t iterations_{15}; };
2,657
25.058824
123
h
XNNPACK
XNNPACK-master/test/reduce-normalization-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <xnnpack.h> #include <xnnpack/normalization.h> class ReduceNormalizationTester { public: inline ReduceNormalizationTester& axes(const std::vector<size_t>& axes) { assert(axes.size() <= XNN_MAX_TENSOR_DIMS); this->axes_ = axes; return *this; } inline const std::vector<size_t>& axes() const { return this->axes_; } inline ReduceNormalizationTester& shape(const std::vector<size_t>& shape) { assert(shape.size() <= XNN_MAX_TENSOR_DIMS); this->shape_ = shape; return *this; } inline const std::vector<size_t>& shape() const { return this->shape_; } inline ReduceNormalizationTester& expected_axes(const std::vector<size_t>& expected_axes) { assert(expected_axes.size() <= XNN_MAX_TENSOR_DIMS); this->expected_axes_ = expected_axes; return *this; } inline const std::vector<size_t>& expected_axes() const { return this->expected_axes_; } inline ReduceNormalizationTester& expected_shape(const std::vector<size_t>& expected_shape) { assert(expected_shape.size() <= XNN_MAX_TENSOR_DIMS); this->expected_shape_ = expected_shape; return *this; } inline const std::vector<size_t>& expected_shape() const { return this->expected_shape_; } void Test() const { std::vector<size_t> input_dims{shape()}; size_t num_input_dims = input_dims.size(); std::vector<size_t> reduction_axes{axes()}; size_t num_reduction_axes = reduction_axes.size(); xnn_normalize_reduction( &num_reduction_axes, reduction_axes.data(), &num_input_dims, input_dims.data()); ASSERT_EQ(num_reduction_axes, expected_axes().size()); for (size_t i = 0; i < num_reduction_axes; i++) { ASSERT_EQ(expected_axes()[i], reduction_axes[i]) << " at index " << i; } ASSERT_EQ(num_input_dims, expected_shape().size()); for (size_t i = 0; i < num_input_dims; i++) { ASSERT_EQ(expected_shape()[i], input_dims[i]) << " at index " << i; } } private: std::vector<size_t> axes_; std::vector<size_t> shape_; std::vector<size_t> expected_axes_; std::vector<size_t> expected_shape_; };
2,340
26.541176
95
h
XNNPACK
XNNPACK-master/test/rmax-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <limits> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> class RMaxMicrokernelTester { public: inline RMaxMicrokernelTester& n(size_t n) { assert(n != 0); this->n_ = n; return *this; } inline size_t n() const { return this->n_; } inline RMaxMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_rmax_ukernel_fn rmax) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist; std::vector<uint16_t> x(n() + XNN_EXTRA_BYTES / sizeof(uint16_t)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); // Compute reference results. float y_ref = -std::numeric_limits<float>::infinity(); for (size_t i = 0; i < n(); i++) { y_ref = std::max(y_ref, fp16_ieee_to_fp32_value(x[i])); } // Call optimized micro-kernel. uint16_t y = UINT16_C(0x7E00) /* NaN */; rmax(n() * sizeof(uint16_t), x.data(), &y); // Verify results. ASSERT_EQ(fp16_ieee_to_fp32_value(y), y_ref) << "batch " << n() << " y = " << y; } } void Test(xnn_f32_rmax_ukernel_fn rmax) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist; std::vector<float> x(n()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return f32dist(rng); }); // Compute reference results. float y_ref = -std::numeric_limits<float>::infinity(); for (size_t i = 0; i < n(); i++) { y_ref = std::max(y_ref, x[i]); } // Call optimized micro-kernel. float y = std::nanf(""); rmax(n() * sizeof(float), x.data(), &y); // Verify results. ASSERT_EQ(y_ref, y) << "batch " << n(); } } void Test(xnn_u8_rmax_ukernel_fn rmax) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> x(n()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), [&]() { return u8dist(rng); }); // Compute reference results. uint8_t y_ref = 0; for (size_t i = 0; i < n(); i++) { y_ref = std::max(y_ref, x[i]); } // Call optimized micro-kernel. uint8_t y = u8dist(rng); rmax(n() * sizeof(uint8_t), x.data(), &y); // Verify results. ASSERT_EQ(int32_t(y_ref), int32_t(y)) << "batch " << n(); } } private: size_t n_{1}; size_t iterations_{15}; };
3,431
25.8125
99
h
XNNPACK
XNNPACK-master/test/rmaxabs-microkernel-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/aligned-allocator.h> #include <xnnpack/microfnptr.h> class RMaxAbsMicrokernelTester { public: inline RMaxAbsMicrokernelTester& batch(size_t batch) { assert(batch != 0); this->batch_ = batch; return *this; } inline size_t batch() const { return this->batch_; } inline RMaxAbsMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_s16_rmaxabs_ukernel_fn rmaxabs) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i16rng = std::bind(std::uniform_int_distribution<int16_t>(), std::ref(rng)); std::vector<int16_t> input(batch() + XNN_EXTRA_BYTES / sizeof(int16_t)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), std::ref(i16rng)); // Compute reference results. int32_t output_ref = 0; for (size_t n = 0; n < batch(); n++) { const int32_t input_value = static_cast<int32_t>(input[n]); const int32_t abs_value = std::abs(input_value); output_ref = std::max(output_ref, abs_value); } // Call optimized micro-kernel. uint16_t output = UINT16_C(0xDEAD); rmaxabs(batch() * sizeof(int16_t), input.data(), &output); // Verify results. ASSERT_EQ(static_cast<int32_t>(output), output_ref) << "batch " << batch(); } } private: size_t batch_{1}; size_t iterations_{15}; };
1,939
24.194805
85
h
XNNPACK
XNNPACK-master/test/rope-operator-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <cassert> #include <cstddef> #include <cstdlib> #include <algorithm> #include <cmath> #include <limits> #include <memory> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/cache.h> class RoPEOperatorTester { public: enum class WeightsType { Default, FP32, }; inline RoPEOperatorTester& channels(size_t channels) { assert(channels >= 1); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline RoPEOperatorTester& heads(size_t heads) { assert(heads >= 1); this->heads_ = heads; return *this; } inline size_t heads() const { return this->heads_; } inline RoPEOperatorTester& tokens(size_t tokens) { assert(tokens >= 1); this->tokens_ = tokens; return *this; } inline size_t tokens() const { return this->tokens_; } inline RoPEOperatorTester& batch_size(size_t batch_size) { assert(batch_size >= 1); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline RoPEOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF32() const { ASSERT_EQ(channels() % 2, 0); std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32rdist(1.0f, 10.0f); std::uniform_real_distribution<float> f32idist(0.01f, 0.1f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + batch_size() * tokens() * heads() * channels()); std::vector<float> weights(XNN_EXTRA_BYTES / sizeof(float) + tokens() * channels()); std::vector<float> output(batch_size() * tokens() * heads() * channels()); std::vector<double> output_ref(batch_size() * tokens() * heads() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { for (size_t n = 0; n < batch_size(); n++) { for (size_t t = 0; t < tokens(); t++) { for (size_t h = 0; h < heads(); h++) { std::generate_n(input.begin() + ((n * tokens() + t) * heads() + h) * channels(), channels() / 2, [&]() { return f32rdist(rng); }); std::generate_n(input.begin() + (((n * tokens() + t) * heads() + h) * channels() + channels() / 2), channels() / 2, [&]() { return f32idist(rng); }); } } } for (size_t t = 0; t < tokens(); t++) { std::generate_n(weights.begin() + t * channels(), channels() / 2, [&]() { return f32rdist(rng); }); std::generate_n(weights.begin() + (t * channels() + channels() / 2), channels() / 2, [&]() { return f32idist(rng); }); } std::fill(output.begin(), output.end(), std::nanf("")); std::fill(output_ref.begin(), output_ref.end(), std::nan("")); // Compute reference results for (size_t n = 0; n < batch_size(); n++) { for (size_t t = 0; t < tokens(); t++) { for (size_t h = 0; h < heads(); h++) { for (size_t c = 0; c < channels() / 2; c++) { output_ref[((n * tokens() + t) * heads() + h) * channels() + c] = double(input[((n * tokens() + t) * heads() + h) * channels() + c]) * double(weights[t * channels() + c]) - double(input[((n * tokens() + t) * heads() + h) * channels() + (c + channels() / 2)]) * double(weights[t * channels() + (c + channels() / 2)]); output_ref[((n * tokens() + t) * heads() + h) * channels() + (c + channels() / 2)] = double(input[((n * tokens() + t) * heads() + h) * channels() + c]) * double(weights[t * channels() + (c + channels() / 2)]) + double(input[((n * tokens() + t) * heads() + h) * channels() + (c + channels() / 2)]) * double(weights[t * channels() + c]); } } } } // Create, setup, run, and destroy RoPE operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t rope_op = nullptr; const xnn_status status = xnn_create_rope_nthc_f32( tokens(), channels(), weights.data(), /*flags=*/0, &rope_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, rope_op); // Smart pointer to automatically delete rope_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_rope_op(rope_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_rope_nthc_f32( rope_op, batch_size(), tokens(), heads(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_rope_nthc_f32( rope_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(rope_op, /*threadpool=*/nullptr)); // Verify results. for (size_t n = 0; n < batch_size(); n++) { for (size_t t = 0; t < tokens(); t++) { for (size_t h = 0; h < heads(); h++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR(output_ref[((n * tokens() + t) * heads() + h) * channels() + c], output[((n * tokens() + t) * heads() + h) * channels() + c], 1.0e-4 * std::abs(output_ref[((n * tokens() + t) * heads() + h) * channels() + c])) << "batch " << n << " / " << batch_size() << ", token " << t << " / " << tokens() << ", head " << h << " / " << heads() << ", channel " << c << " / " << channels(); } } } } } } private: size_t channels_{1}; size_t heads_{1}; size_t tokens_{1}; size_t batch_size_{1}; size_t iterations_{3}; };
6,445
31.887755
111
h
XNNPACK
XNNPACK-master/test/rsum-microkernel-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <numeric> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class RSumMicrokernelTester { public: inline RSumMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline RSumMicrokernelTester& scale(float scale) { this->scale_ = scale; return *this; } inline float scale() const { return this->scale_; } inline RSumMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_rsum_ukernel_fn rsum, xnn_init_f16_scale_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> input(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); // Compute reference results. float output_ref = 0.0f; for (size_t i = 0; i < batch_size(); i++) { output_ref += fp16_ieee_to_fp32_value(input[i]); } output_ref *= scale(); // Prepare parameters. xnn_f16_scale_params params; init_params(&params, fp16_ieee_from_fp32_value(scale())); // Call optimized micro-kernel. uint16_t output = UINT16_C(0x7E00); /* NaN */ rsum(batch_size() * sizeof(uint16_t), input.data(), &output, &params); // Verify results. EXPECT_NEAR(fp16_ieee_to_fp32_value(output), output_ref, std::abs(output_ref) * 2.0e-3f) << "with batch " << batch_size() << ", scale " << scale(); } } void Test(xnn_f16_f32acc_rsum_ukernel_fn rsum, xnn_init_f16_f32acc_scale_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> input(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); // Compute reference results. float output_ref = 0.0f; for (size_t i = 0; i < batch_size(); i++) { output_ref += fp16_ieee_to_fp32_value(input[i]); } output_ref *= scale(); // Prepare parameters. xnn_f16_f32acc_scale_params params; init_params(&params, scale()); // Call optimized micro-kernel. uint16_t output = UINT16_C(0x7E00); /* NaN */ rsum(batch_size() * sizeof(uint16_t), input.data(), &output, &params); // Verify results. EXPECT_NEAR(fp16_ieee_to_fp32_value(output), output_ref, std::abs(output_ref) * 1.0e-3f) << "with batch " << batch_size() << ", scale " << scale(); } } void Test(xnn_f32_rsum_ukernel_fn rsum, xnn_init_f32_scale_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<float> input(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); // Compute reference results. const double output_ref = std::accumulate(input.begin(), input.begin() + batch_size(), 0.0) * double(scale()); // Prepare parameters. xnn_f32_scale_params params; init_params(&params, scale()); // Call optimized micro-kernel. float output = std::nanf(""); rsum(batch_size() * sizeof(float), input.data(), &output, &params); // Verify results. EXPECT_NEAR(output, output_ref, std::abs(output_ref) * 1.0e-6f) << "with batch " << batch_size() << ", scale " << scale(); } } private: size_t batch_size_{1}; float scale_{1.0f}; size_t iterations_{15}; };
4,658
30.693878
116
h
XNNPACK
XNNPACK-master/test/runtime-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> #include <cstddef> #include <cstdlib> #include <cstring> #include <vector> #include <xnnpack.h> #include <xnnpack/subgraph.h> #include "subgraph-tester.h" namespace xnnpack { class RuntimeTester : public SubgraphTester { public: using SubgraphTester::SubgraphTester; template<typename T> inline std::vector<T> RunWithFusion() { Run(); std::vector<char>& tensor = this->external_tensors_.at(this->output_id_); std::vector<float> output = std::vector<float>(tensor.size() / sizeof(float)); std::memcpy(output.data(), tensor.data(), tensor.size()); return output; } template<typename T> inline std::vector<T> RunWithoutFusion() { Run(XNN_FLAG_NO_OPERATOR_FUSION); std::vector<char>& tensor = this->external_tensors_.at(this->output_id_); std::vector<float> output = std::vector<float>(tensor.size() / sizeof(float)); memcpy(output.data(), tensor.data(), tensor.size()); return output; } void CreateRuntime(uint32_t flags = 0) { xnn_runtime_t runtime = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(this->subgraph_.get(), nullptr, nullptr, flags, &runtime)); ASSERT_NE(nullptr, runtime); runtime_.reset(runtime); } void SetupRuntime() { std::vector<xnn_external_value> externals; for (auto it = this->external_tensors_.begin(); it != this->external_tensors_.end(); ++it) { if (it->first == this->output_id_) { // Scramble output tensor. std::fill(it->second.begin(), it->second.end(), 0xA8); } externals.push_back(xnn_external_value{it->first, it->second.data()}); } ASSERT_EQ(xnn_status_success, xnn_setup_runtime(Runtime(), externals.size(), externals.data())); externals_ = externals; } size_t NumOperators() { size_t count = 0; for (size_t i = 0; i < runtime_->num_ops; i++) { if (runtime_->opdata[i].operator_objects[0] != nullptr) { count++; } } return count; } xnn_runtime_t Runtime() const { return runtime_.get(); } private: void Run(uint32_t flags = 0) { CreateRuntime(flags); SetupRuntime(); ASSERT_EQ(xnn_status_success, xnn_setup_runtime(Runtime(), externals_.size(), externals_.data())); ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(Runtime())); }; std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> runtime_{nullptr, xnn_delete_runtime}; std::vector<xnn_external_value> externals_; }; } // namespace xnnpack
2,669
28.021739
115
h
XNNPACK
XNNPACK-master/test/sigmoid-operator-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <limits> #include <memory> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class SigmoidOperatorTester { public: inline SigmoidOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline SigmoidOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline SigmoidOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline SigmoidOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline SigmoidOperatorTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline SigmoidOperatorTester& input_zero_point(uint8_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline uint8_t input_zero_point() const { return this->input_zero_point_; } inline float output_scale() const { return 1.0f / 256.0f; } inline uint8_t output_zero_point() const { return 0; } inline SigmoidOperatorTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline SigmoidOperatorTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline SigmoidOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-25.0f, 25.0f); std::vector<uint16_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = fp16_ieee_to_fp32_value(input[i * input_stride() + c]); const float exp_x = std::exp(x); const float sigmoid_x = exp_x / (1.0 + exp_x); output_ref[i * channels() + c] = sigmoid_x; } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sigmoid_op = nullptr; const xnn_status status = xnn_create_sigmoid_nc_f16( channels(), input_stride(), output_stride(), 0, &sigmoid_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, sigmoid_op); // Smart pointer to automatically delete sigmoid_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sigmoid_op(sigmoid_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_sigmoid_nc_f16(sigmoid_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_sigmoid_nc_f16(sigmoid_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sigmoid_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(1.0e-4f, std::abs(output_ref[i * channels() + c]) * 5.0e-3f)); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-25.0f, 25.0f); std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = input[i * input_stride() + c]; const double exp_x = std::exp(x); const double sigmoid_x = exp_x / (1.0 + exp_x); output_ref[i * channels() + c] = sigmoid_x; } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sigmoid_op = nullptr; xnn_status status = xnn_create_sigmoid_nc_f32( channels(), input_stride(), output_stride(), 0, &sigmoid_op); ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, sigmoid_op); // Smart pointer to automatically delete sigmoid_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sigmoid_op(sigmoid_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_sigmoid_nc_f32(sigmoid_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_sigmoid_nc_f32(sigmoid_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sigmoid_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( output[i * output_stride() + c], output_ref[i * channels() + c], 5.0e-6); } } } } void TestQS8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> i8dist( std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); std::vector<int8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); std::fill(output.begin(), output.end(), INT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point() - 0x80)); const float sigmoid_x = 1.0f / (1.0f + std::exp(-x)); const float scaled_sigmoid_x = sigmoid_x / output_scale(); float y = scaled_sigmoid_x; y = std::min<float>(y, int32_t(qmax() - 0x80) - int32_t(output_zero_point() - 0x80)); y = std::max<float>(y, int32_t(qmin() - 0x80) - int32_t(output_zero_point() - 0x80)); output_ref[i * channels() + c] = y + int32_t(output_zero_point() - 0x80); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sigmoid_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_sigmoid_nc_qs8( channels(), input_stride(), output_stride(), int8_t(input_zero_point() - 0x80), input_scale(), int8_t(output_zero_point() - 0x80), output_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80), 0, &sigmoid_op)); ASSERT_NE(nullptr, sigmoid_op); // Smart pointer to automatically delete sigmoid_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sigmoid_op(sigmoid_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_sigmoid_nc_qs8(sigmoid_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_sigmoid_nc_qs8(sigmoid_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sigmoid_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } void TestQU8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point())); const float sigmoid_x = 1.0f / (1.0f + std::exp(-x)); const float scaled_sigmoid_x = sigmoid_x / output_scale(); float y = scaled_sigmoid_x; y = std::min<float>(y, int32_t(qmax()) - int32_t(output_zero_point())); y = std::max<float>(y, int32_t(qmin()) - int32_t(output_zero_point())); output_ref[i * channels() + c] = y + int32_t(output_zero_point()); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sigmoid_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_sigmoid_nc_qu8( channels(), input_stride(), output_stride(), input_zero_point(), input_scale(), output_zero_point(), output_scale(), qmin(), qmax(), 0, &sigmoid_op)); ASSERT_NE(nullptr, sigmoid_op); // Smart pointer to automatically delete sigmoid_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sigmoid_op(sigmoid_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_sigmoid_nc_qu8(sigmoid_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_sigmoid_nc_qu8(sigmoid_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sigmoid_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-25.0f, 25.0f); std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = input[i * input_stride() + c]; const double exp_x = std::exp(x); const double sigmoid_x = exp_x / (1.0 + exp_x); output_ref[i * channels() + c] = sigmoid_x; } } // Initialize and run Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_sigmoid_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( output[i * output_stride() + c], output_ref[i * channels() + c], 5.0e-6); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; float input_scale_{0.75f}; uint8_t input_zero_point_{121}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
14,906
35.270073
119
h
XNNPACK
XNNPACK-master/test/slice-normalization-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> #include <cassert> #include <cstddef> #include <vector> #include <xnnpack.h> #include <xnnpack/normalization.h> #include <gtest/gtest.h> class SliceNormalizationTester { public: SliceNormalizationTester() : expected_offsets_(XNN_MAX_TENSOR_DIMS, 0), expected_input_shape_(XNN_MAX_TENSOR_DIMS, 1), expected_output_shape_(XNN_MAX_TENSOR_DIMS, 1) {} inline SliceNormalizationTester& input_shape( const std::vector<size_t> input_shape) { assert(input_shape.size() <= XNN_MAX_TENSOR_DIMS); this->input_shape_ = input_shape; return *this; } inline std::vector<size_t> input_shape() { return input_shape_; } inline size_t num_dims() const { return input_shape_.size(); } inline SliceNormalizationTester& offsets(const std::vector<size_t> offsets) { assert(offsets.size() <= XNN_MAX_TENSOR_DIMS); this->offsets_ = offsets; return *this; } inline std::vector<size_t> offsets() { return offsets_; } inline SliceNormalizationTester& sizes(const std::vector<size_t> sizes) { assert(sizes.size() <= XNN_MAX_TENSOR_DIMS); this->sizes_ = sizes; return *this; } inline std::vector<size_t> sizes() { return sizes_; } inline SliceNormalizationTester& expected_offsets( const std::vector<size_t> expected_offsets) { assert(expected_offsets.size() <= XNN_MAX_TENSOR_DIMS); std::copy(expected_offsets.begin(), expected_offsets.end(), this->expected_offsets_.end() - expected_offsets.size()); return *this; } inline std::vector<size_t> expected_offsets() { return expected_offsets_; } inline SliceNormalizationTester& expected_input_shape( const std::vector<size_t> expected_input_shape) { assert(expected_input_shape.size() <= XNN_MAX_TENSOR_DIMS); std::copy(expected_input_shape.begin(), expected_input_shape.end(), this->expected_input_shape_.end() - expected_input_shape.size()); return *this; } inline std::vector<size_t> expected_input_shape() { return expected_input_shape_; } inline SliceNormalizationTester& expected_output_shape( const std::vector<size_t> expected_output_shape) { assert(expected_output_shape.size() <= XNN_MAX_TENSOR_DIMS); std::copy( expected_output_shape.begin(), expected_output_shape.end(), this->expected_output_shape_.end() - expected_output_shape.size()); expected_num_normalized_dims_ = expected_output_shape.size(); return *this; } inline std::vector<size_t> expected_output_shape() { return expected_output_shape_; } void Test() { std::vector<size_t> actual_normalized_offsets(XNN_MAX_TENSOR_DIMS); std::vector<size_t> actual_normalized_input_shape(XNN_MAX_TENSOR_DIMS); std::vector<size_t> actual_normalized_output_shape(XNN_MAX_TENSOR_DIMS); size_t actual_num_normalized_dims; xnn_normalize_slice(num_dims(), offsets().data(), sizes().data(), input_shape().data(), actual_normalized_offsets.data(), actual_normalized_input_shape.data(), actual_normalized_output_shape.data(), &actual_num_normalized_dims); EXPECT_EQ(expected_num_normalized_dims_, actual_num_normalized_dims); EXPECT_EQ(expected_offsets(), actual_normalized_offsets); EXPECT_EQ(expected_input_shape(), actual_normalized_input_shape); EXPECT_EQ(expected_output_shape(), actual_normalized_output_shape); } private: std::vector<size_t> input_shape_; std::vector<size_t> offsets_; std::vector<size_t> sizes_; std::vector<size_t> expected_offsets_{XNN_MAX_TENSOR_DIMS, 0}; std::vector<size_t> expected_input_shape_; std::vector<size_t> expected_output_shape_; size_t expected_num_normalized_dims_; };
3,969
33.824561
79
h
XNNPACK
XNNPACK-master/test/slice-operator-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <iostream> #include <algorithm> #include <array> #include <cstddef> #include <cstdlib> #include <functional> #include <initializer_list> #include <memory> #include <numeric> #include <vector> #include <gtest/gtest.h> #include <xnnpack.h> class SliceOperatorTester { public: inline SliceOperatorTester& input_shape(std::initializer_list<size_t> input_shape) { assert(input_shape.size() <= XNN_MAX_TENSOR_DIMS); input_shape_ = std::vector<size_t>(input_shape); return *this; } inline const std::vector<size_t>& input_shape() const { return input_shape_; } inline size_t input_dim(size_t i) const { return i < input_shape_.size() ? input_shape_[i] : 1; } inline size_t num_dims() const { return input_shape_.size(); } inline size_t num_input_elements() const { return std::accumulate( input_shape_.cbegin(), input_shape_.cend(), size_t(1), std::multiplies<size_t>()); } inline SliceOperatorTester& offsets(std::initializer_list<size_t> offsets) { assert(offsets.size() <= XNN_MAX_TENSOR_DIMS); offsets_ = std::vector<size_t>(offsets); return *this; } inline const std::vector<size_t>& offsets() const { return offsets_; } inline size_t offset(size_t i) const { return i < offsets_.size() ? offsets_[i] : 0; } inline size_t num_offsets() const { return offsets_.size(); } inline SliceOperatorTester& sizes(std::initializer_list<size_t> sizes) { assert(sizes.size() <= XNN_MAX_TENSOR_DIMS); sizes_ = std::vector<size_t>(sizes); return *this; } inline const std::vector<size_t>& sizes() const { return sizes_; } inline size_t size(size_t i) const { return i < sizes_.size() ? sizes_[i] : 1; } inline size_t num_sizes() const { return sizes_.size(); } inline size_t output_dim(size_t i) const { return size(i); } inline size_t num_output_elements() const { size_t elements = 1; for (size_t i = 0; i < num_dims(); i++) { elements *= output_dim(i); } return elements; } inline SliceOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestX8() const { ASSERT_EQ(num_dims(), num_offsets()); ASSERT_EQ(num_dims(), num_sizes()); // Compute generalized shapes. std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> input_offsets; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_sizes; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; std::fill(input_dims.begin(), input_dims.end(), 1); std::fill(input_offsets.begin(), input_offsets.end(), 0); std::fill(output_sizes.begin(), output_sizes.end(), 0); std::fill(output_dims.begin(), output_dims.end(), 1); for (size_t i = 0; i < num_dims(); i++) { input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); input_offsets[XNN_MAX_TENSOR_DIMS - num_dims() + i] = offset(i); output_sizes[XNN_MAX_TENSOR_DIMS - num_dims() + i] = size(i); output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); } std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + num_input_elements()); std::vector<uint8_t> output(num_output_elements()); std::vector<uint8_t> output_ref(num_output_elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), UINT8_C(0)); std::fill(output.begin(), output.end(), UINT32_C(0xAA)); ComputeReference(input_dims, output_dims, input_offsets, input, output_ref); // Create, setup, run, and destroy a binary elementwise operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t slice_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_slice_nd_x8( 0, &slice_op)); ASSERT_NE(nullptr, slice_op); // Smart pointer to automatically delete slice_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_slice_op(slice_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_slice_nd_x8( slice_op, num_dims(), input_shape().data(), offsets().data(), sizes().data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_slice_nd_x8( slice_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(slice_op, nullptr /* thread pool */)); ASSERT_EQ(output, output_ref); } } void TestX16() const { ASSERT_EQ(num_dims(), num_offsets()); ASSERT_EQ(num_dims(), num_sizes()); // Compute generalized shapes. std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> input_offsets; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_sizes; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; std::fill(input_dims.begin(), input_dims.end(), 1); std::fill(input_offsets.begin(), input_offsets.end(), 0); std::fill(output_sizes.begin(), output_sizes.end(), 0); std::fill(output_dims.begin(), output_dims.end(), 1); for (size_t i = 0; i < num_dims(); i++) { input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); input_offsets[XNN_MAX_TENSOR_DIMS - num_dims() + i] = offset(i); output_sizes[XNN_MAX_TENSOR_DIMS - num_dims() + i] = size(i); output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); } std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + num_input_elements()); std::vector<uint16_t> output(num_output_elements()); std::vector<uint16_t> output_ref(num_output_elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), UINT16_C(0)); std::fill(output.begin(), output.end(), UINT16_C(0xDEAD)); ComputeReference(input_dims, output_dims, input_offsets, input, output_ref); // Create, setup, run, and destroy a binary elementwise operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t slice_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_slice_nd_x16( 0, &slice_op)); ASSERT_NE(nullptr, slice_op); // Smart pointer to automatically delete slice_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_slice_op(slice_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_slice_nd_x16( slice_op, num_dims(), input_shape().data(), offsets().data(), sizes().data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_slice_nd_x16( slice_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(slice_op, nullptr /* thread pool */)); ASSERT_EQ(output, output_ref); } } void TestX32() const { ASSERT_EQ(num_dims(), num_offsets()); ASSERT_EQ(num_dims(), num_sizes()); // Compute generalized shapes. std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> input_offsets; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_sizes; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; std::fill(input_dims.begin(), input_dims.end(), 1); std::fill(input_offsets.begin(), input_offsets.end(), 0); std::fill(output_sizes.begin(), output_sizes.end(), 0); std::fill(output_dims.begin(), output_dims.end(), 1); for (size_t i = 0; i < num_dims(); i++) { input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); input_offsets[XNN_MAX_TENSOR_DIMS - num_dims() + i] = offset(i); output_sizes[XNN_MAX_TENSOR_DIMS - num_dims() + i] = size(i); output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); } std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + num_input_elements()); std::vector<uint32_t> output(num_output_elements()); std::vector<uint32_t> output_ref(num_output_elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), UINT32_C(0)); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); ComputeReference(input_dims, output_dims, input_offsets, input, output_ref); // Create, setup, run, and destroy a binary elementwise operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t slice_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_slice_nd_x32( 0, &slice_op)); ASSERT_NE(nullptr, slice_op); // Smart pointer to automatically delete slice_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_slice_op(slice_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_slice_nd_x32( slice_op, num_dims(), input_shape().data(), offsets().data(), sizes().data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_slice_nd_x32( slice_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(slice_op, nullptr /* thread pool */)); ASSERT_EQ(output, output_ref); } } void TestRunX32() const { ASSERT_EQ(num_dims(), num_offsets()); ASSERT_EQ(num_dims(), num_sizes()); // Compute generalized shapes. std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> input_offsets; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_sizes; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; std::fill(input_dims.begin(), input_dims.end(), 1); std::fill(input_offsets.begin(), input_offsets.end(), 0); std::fill(output_sizes.begin(), output_sizes.end(), 0); std::fill(output_dims.begin(), output_dims.end(), 1); for (size_t i = 0; i < num_dims(); i++) { input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); input_offsets[XNN_MAX_TENSOR_DIMS - num_dims() + i] = offset(i); output_sizes[XNN_MAX_TENSOR_DIMS - num_dims() + i] = size(i); output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); } std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + num_input_elements()); std::vector<uint32_t> output(num_output_elements()); std::vector<uint32_t> output_ref(num_output_elements()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), UINT32_C(0)); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); ComputeReference(input_dims, output_dims, input_offsets, input, output_ref); // Create, setup, run, and destroy a binary elementwise operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_slice_nd_x32( num_dims(), input_shape().data(), offsets().data(), sizes().data(), input.data(), output.data(), 0, nullptr /* thread pool */)); ASSERT_EQ(output, output_ref); } } private: template <typename T> void ComputeReference( const std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims, const std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims, const std::array<size_t, XNN_MAX_TENSOR_DIMS> input_offsets, const std::vector<T>& input, std::vector<T>& output) const { for (size_t i = 0; i < output_dims[0]; i++) { for (size_t j = 0; j < output_dims[1]; j++) { for (size_t k = 0; k < output_dims[2]; k++) { for (size_t l = 0; l < output_dims[3]; l++) { for (size_t m = 0; m < output_dims[4]; m++) { for (size_t n = 0; n < output_dims[5]; n++) { const size_t output_index = ((((i * output_dims[1] + j) * output_dims[2] + k) * output_dims[3] + l) * output_dims[4] + m) * output_dims[5] + n; const size_t input_index = (((((input_offsets[0] + i) * input_dims[1] + (input_offsets[1] + j)) * input_dims[2] + (input_offsets[2] + k)) * input_dims[3] + (input_offsets[3] + l)) * input_dims[4] + (input_offsets[4] + m)) * input_dims[5] + (input_offsets[5] + n); output[output_index] = input[input_index]; } } } } } } } std::vector<size_t> input_shape_; std::vector<size_t> offsets_; std::vector<size_t> sizes_; size_t iterations_{1}; // Use less iteration because we test a lot of dimensions. };
13,267
34.571046
113
h
XNNPACK
XNNPACK-master/test/softmax-operator-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <limits> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class SoftMaxOperatorTester { public: inline SoftMaxOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline SoftMaxOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline SoftMaxOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline SoftMaxOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline SoftMaxOperatorTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline SoftMaxOperatorTester& input_zero_point(uint8_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline uint8_t input_zero_point() const { return this->input_zero_point_; } inline float output_scale() const { return 1.0f / 256.0f; } inline uint8_t output_zero_point() const { return 0; } inline SoftMaxOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that exph(x[i]) overflows, but exph(x[i] - x_max) doesn't. // However, the range is still narrow enough that single-precision exp doesn't overflow. std::uniform_real_distribution<float> f32dist(15.0f, 20.0f); std::vector<uint16_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { float sum_exp = 0.0; for (size_t c = 0; c < channels(); c++) { sum_exp += std::exp(fp16_ieee_to_fp32_value(input[i * input_stride() + c])); } for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::exp(fp16_ieee_to_fp32_value(input[i * input_stride() + c])) / sum_exp; } } // Create, setup, run, and destroy SoftMax operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t softmax_op = nullptr; const xnn_status status = xnn_create_softmax_nc_f16( channels(), input_stride(), output_stride(), 0, &softmax_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, softmax_op); // Smart pointer to automatically delete softmax_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_softmax_op(softmax_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_softmax_nc_f16(softmax_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_softmax_nc_f16(softmax_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(softmax_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(1.0e-4f, std::abs(output_ref[i * channels() + c]) * 5.0e-3f)) << "element " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); // Choose such range that expf(x[i]) overflows, but expf(x[i] - x_max) doesn't. // However, the range is still narrow enough that single-precision exp doesn't overflow. std::uniform_real_distribution<float> f32dist(90.0f, 100.0f); std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> output((batch_size() - 1) * output_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { double sum_exp = 0.0; for (size_t c = 0; c < channels(); c++) { sum_exp += std::exp(double(input[i * input_stride() + c])); } for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::exp(double(input[i * input_stride() + c])) / sum_exp; } } // Create, setup, run, and destroy SoftMax operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t softmax_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_softmax_nc_f32( channels(), input_stride(), output_stride(), 0, &softmax_op)); ASSERT_NE(nullptr, softmax_op); // Smart pointer to automatically delete softmax_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_softmax_op(softmax_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_softmax_nc_f32(softmax_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_softmax_nc_f32(softmax_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(softmax_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( double(output[i * output_stride() + c]), output_ref[i * channels() + c], output_ref[i * channels() + c] * 1.0e-5) << "element " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestQU8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> u8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input((batch_size() - 1) * input_stride() + channels()); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { const int32_t max_input = *std::max_element( input.data() + i * input_stride(), input.data() + i * input_stride() + channels()); float sum_exp = 0.0f; for (size_t c = 0; c < channels(); c++) { sum_exp += std::exp((int32_t(input[i * input_stride() + c]) - max_input) * input_scale()); } for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::exp((int32_t(input[i * input_stride() + c]) - max_input) * input_scale()) / (sum_exp * output_scale()); output_ref[i * channels() + c] = std::min(output_ref[i * channels() + c], 255.0f); } } // Create, setup, run, and destroy SoftMax operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t softmax_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_softmax_nc_qu8( channels(), input_stride(), output_stride(), input_scale(), output_zero_point(), output_scale(), 0, &softmax_op)); ASSERT_NE(nullptr, softmax_op); // Smart pointer to automatically delete softmax_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_softmax_op(softmax_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_softmax_nc_qu8(softmax_op, batch_size(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_softmax_nc_qu8(softmax_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(softmax_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; float input_scale_{0.176080093}; uint8_t input_zero_point_{121}; size_t iterations_{15}; };
10,931
34.493506
121
h
XNNPACK
XNNPACK-master/test/space-to-depth-operator-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <functional> #include <numeric> #include <random> #include <vector> #include <xnnpack.h> class SpaceToDepthOperatorTester { public: inline SpaceToDepthOperatorTester& input_size(size_t input_height, size_t input_width) { assert(input_height >= 1); assert(input_width >= 1); this->input_height_ = input_height; this->input_width_ = input_width; return *this; } inline SpaceToDepthOperatorTester& input_height(size_t input_height) { assert(input_height >= 1); this->input_height_ = input_height; return *this; } inline size_t input_height() const { return this->input_height_; } inline SpaceToDepthOperatorTester& input_width(size_t input_width) { assert(input_width >= 1); this->input_width_ = input_width; return *this; } inline size_t input_width() const { return this->input_width_; } inline size_t output_height() const { assert(input_height() % block_size() == 0); return input_height() / block_size(); } inline size_t output_width() const { assert(input_width() % block_size() == 0); return input_width() / block_size(); } inline SpaceToDepthOperatorTester& block_size(size_t block_size) { assert(block_size >= 2); this->block_size_ = block_size; return *this; } inline size_t block_size() const { return this->block_size_; } inline SpaceToDepthOperatorTester& input_channels(size_t input_channels) { assert(input_channels != 0); this->input_channels_ = input_channels; return *this; } inline size_t input_channels() const { return this->input_channels_; } inline size_t output_channels() const { return input_channels() * block_size() * block_size(); } inline SpaceToDepthOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline SpaceToDepthOperatorTester& input_channels_stride(size_t input_channels_stride) { assert(input_channels_stride >= 1); this->input_channels_stride_ = input_channels_stride; return *this; } inline size_t input_channels_stride() const { if (this->input_channels_stride_ == 0) { return input_channels(); } else { assert(this->input_channels_stride_ >= input_channels()); return this->input_channels_stride_; } } inline SpaceToDepthOperatorTester& output_channels_stride(size_t output_channels_stride) { assert(output_channels_stride >= 1); this->output_channels_stride_ = output_channels_stride; return *this; } inline size_t output_channels_stride() const { if (this->output_channels_stride_ == 0) { return output_channels(); } else { assert(this->output_channels_stride_ >= output_channels()); return this->output_channels_stride_; } } inline SpaceToDepthOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestNHWCxX8() const { std::vector<int8_t> input( (batch_size() * input_height() * input_width() - 1) * input_channels_stride() + input_channels() + XNN_EXTRA_BYTES / (sizeof(int8_t))); std::vector<int8_t> output( (batch_size() * output_height() * output_width() - 1) * output_channels_stride() + output_channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), INT8_C(0xAF)); // Create, setup, run, and destroy Depth To Space operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t space_to_depth_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_space_to_depth_nhwc_x8( input_channels(), input_channels_stride(), output_channels_stride(), block_size(), 0, &space_to_depth_op)); ASSERT_NE(nullptr, space_to_depth_op); // Smart pointer to automatically delete space_to_depth_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_space_to_depth_op(space_to_depth_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_space_to_depth_nhwc_x8( space_to_depth_op, batch_size(), input_height(), input_width(), /*output_height_out=*/nullptr, /*output_width_out=*/nullptr, /*output_channels_out=*/nullptr, nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_space_to_depth_nhwc_x8( space_to_depth_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(space_to_depth_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t iy = 0; iy < output_height(); iy++) { for (size_t ix = 0; ix < output_width(); ix++) { for (size_t by = 0; by < block_size(); by++) { for (size_t bx = 0; bx < block_size(); bx++) { for (size_t oc = 0; oc < input_channels(); oc++) { const size_t input_index = oc + bx * input_channels_stride() + ix * block_size() * input_channels_stride() + by * output_width() * block_size() * input_channels_stride() + iy * block_size() * output_width() * block_size() * input_channels_stride() + i * output_height() * block_size() * output_width() * block_size() * input_channels_stride(); const size_t output_index = oc + bx * input_channels() + by * input_channels() * block_size() + ix * output_channels_stride() + iy * output_width() * output_channels_stride() + i * output_height() * output_width() * output_channels_stride(); ASSERT_EQ(int32_t(output[output_index]), int32_t(input[input_index])) << "batch: " << i << " / " << batch_size() << ", output x: " << ix << " / " << output_width() << ", output y: " << iy << " / " << output_height() << ", block x: " << bx << " / " << block_size() << ", block y: " << by << " / " << block_size() << ", input channel: " << oc << " / " << input_channels() << ", input stride: " << input_channels_stride() << ", output stride: " << output_channels_stride(); } } } } } } } } void TestNHWCxX16() const { std::vector<int16_t> input( (batch_size() * input_height() * input_width() - 1) * input_channels_stride() + input_channels() + XNN_EXTRA_BYTES / (sizeof(int16_t))); std::vector<int16_t> output( (batch_size() * output_height() * output_width() - 1) * output_channels_stride() + output_channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), INT16_C(0xDEAD)); // Create, setup, run, and destroy Depth To Space operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t space_to_depth_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_space_to_depth_nhwc_x16( input_channels(), input_channels_stride(), output_channels_stride(), block_size(), 0, &space_to_depth_op)); ASSERT_NE(nullptr, space_to_depth_op); // Smart pointer to automatically delete space_to_depth_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_space_to_depth_op(space_to_depth_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_space_to_depth_nhwc_x16( space_to_depth_op, batch_size(), input_height(), input_width(), /*output_height_out=*/nullptr, /*output_width_out=*/nullptr, /*output_channels_out=*/nullptr, nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_space_to_depth_nhwc_x16( space_to_depth_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(space_to_depth_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t iy = 0; iy < output_height(); iy++) { for (size_t ix = 0; ix < output_width(); ix++) { for (size_t by = 0; by < block_size(); by++) { for (size_t bx = 0; bx < block_size(); bx++) { for (size_t oc = 0; oc < input_channels(); oc++) { const size_t input_index = oc + bx * input_channels_stride() + ix * block_size() * input_channels_stride() + by * output_width() * block_size() * input_channels_stride() + iy * block_size() * output_width() * block_size() * input_channels_stride() + i * output_height() * block_size() * output_width() * block_size() * input_channels_stride(); const size_t output_index = oc + bx * input_channels() + by * input_channels() * block_size() + ix * output_channels_stride() + iy * output_width() * output_channels_stride() + i * output_height() * output_width() * output_channels_stride(); ASSERT_EQ(int32_t(output[output_index]), int32_t(input[input_index])) << "batch: " << i << " / " << batch_size() << ", output x: " << ix << " / " << output_width() << ", output y: " << iy << " / " << output_height() << ", block x: " << bx << " / " << block_size() << ", block y: " << by << " / " << block_size() << ", input channel: " << oc << " / " << input_channels() << ", input stride: " << input_channels_stride() << ", output stride: " << output_channels_stride(); } } } } } } } } void TestNHWCxX32() const { std::vector<int32_t> input( (batch_size() * input_height() * input_width() - 1) * input_channels_stride() + input_channels() + XNN_EXTRA_BYTES / (sizeof(int32_t))); std::vector<int32_t> output( (batch_size() * output_height() * output_width() - 1) * output_channels_stride() + output_channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), INT32_C(0xDEADBEEF)); // Create, setup, run, and destroy Depth To Space operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t space_to_depth_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_space_to_depth_nhwc_x32( input_channels(), input_channels_stride(), output_channels_stride(), block_size(), 0, &space_to_depth_op)); ASSERT_NE(nullptr, space_to_depth_op); // Smart pointer to automatically delete space_to_depth_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_space_to_depth_op(space_to_depth_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_space_to_depth_nhwc_x32( space_to_depth_op, batch_size(), input_height(), input_width(), /*output_height_out=*/nullptr, /*output_width_out=*/nullptr, /*output_channels_out=*/nullptr, nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_space_to_depth_nhwc_x32( space_to_depth_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(space_to_depth_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t iy = 0; iy < output_height(); iy++) { for (size_t ix = 0; ix < output_width(); ix++) { for (size_t by = 0; by < block_size(); by++) { for (size_t bx = 0; bx < block_size(); bx++) { for (size_t oc = 0; oc < input_channels(); oc++) { const size_t input_index = oc + bx * input_channels_stride() + ix * block_size() * input_channels_stride() + by * output_width() * block_size() * input_channels_stride() + iy * block_size() * output_width() * block_size() * input_channels_stride() + i * output_height() * block_size() * output_width() * block_size() * input_channels_stride(); const size_t output_index = oc + bx * input_channels() + by * input_channels() * block_size() + ix * output_channels_stride() + iy * output_width() * output_channels_stride() + i * output_height() * output_width() * output_channels_stride(); ASSERT_EQ(int32_t(output[output_index]), int32_t(input[input_index])) << "batch: " << i << " / " << batch_size() << ", output x: " << ix << " / " << output_width() << ", output y: " << iy << " / " << output_height() << ", block x: " << bx << " / " << block_size() << ", block y: " << by << " / " << block_size() << ", input channel: " << oc << " / " << input_channels() << ", input stride: " << input_channels_stride() << ", output stride: " << output_channels_stride(); } } } } } } } } private: size_t input_height_{1}; size_t input_width_{1}; size_t input_channels_{1}; size_t block_size_{2}; size_t batch_size_{1}; size_t input_channels_stride_{0}; size_t output_channels_stride_{0}; size_t iterations_{1}; };
15,104
39.387701
131
h
XNNPACK
XNNPACK-master/test/square-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class SquareOperatorTester { public: inline SquareOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline SquareOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline SquareOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline SquareOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline SquareOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float value = fp16_ieee_to_fp32_value(input[i * input_stride() + c]); output_ref[i * channels() + c] = value * value; } } // Create, setup, run, and destroy Square operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t square_op = nullptr; const xnn_status status = xnn_create_square_nc_f16( channels(), input_stride(), output_stride(), 0, &square_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, square_op); // Smart pointer to automatically delete square_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_square_op(square_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_square_nc_f16(square_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_square_nc_f16(square_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(square_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(1.0e-4f, std::abs(output_ref[i * channels() + c]) * 5.0e-3f)) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float value = input[i * input_stride() + c]; output_ref[i * channels() + c] = value * value; } } // Create, setup, run, and destroy Square operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t square_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_square_nc_f32( channels(), input_stride(), output_stride(), 0, &square_op)); ASSERT_NE(nullptr, square_op); // Smart pointer to automatically delete square_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_square_op(square_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_square_nc_f32(square_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_square_nc_f32(square_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(square_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float value = input[i * input_stride() + c]; output_ref[i * channels() + c] = value * value; } } // Initialize and run Square operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_square_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels(); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
8,169
33.472574
115
h
XNNPACK
XNNPACK-master/test/square-root-operator-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class SquareRootOperatorTester { public: inline SquareRootOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline SquareRootOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline SquareRootOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline SquareRootOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline SquareRootOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.1f, 5.0f); std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + (batch_size() - 1) * input_stride() + channels()); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::sqrt(fp16_ieee_to_fp32_value(input[i * input_stride() + c])); } } // Create, setup, run, and destroy Square operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sqrt_op = nullptr; const xnn_status status = xnn_create_square_root_nc_f16( channels(), input_stride(), output_stride(), 0, &sqrt_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, sqrt_op); // Smart pointer to automatically delete sqrt_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sqrt_op(sqrt_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_square_root_nc_f16(sqrt_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_square_root_nc_f16(sqrt_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sqrt_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::abs(output_ref[i * channels() + c]) * 5.0e-3f) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << fp16_ieee_to_fp32_value(input[i * input_stride() + c]); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.0f, 5.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::sqrt(input[i * input_stride() + c]); } } // Create, setup, run, and destroy Square operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t sqrt_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_square_root_nc_f32( channels(), input_stride(), output_stride(), 0, &sqrt_op)); ASSERT_NE(nullptr, sqrt_op); // Smart pointer to automatically delete sqrt_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_sqrt_op(sqrt_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_square_root_nc_f32(sqrt_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_square_root_nc_f32(sqrt_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(sqrt_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c]; } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.0f, 5.0f); std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels()); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { output_ref[i * channels() + c] = std::sqrt(input[i * input_stride() + c]); } } // Initialize and run Square Root operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); ASSERT_EQ(xnn_status_success, xnn_run_square_root_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels() << ", input " << input[i * input_stride() + c]; } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; size_t iterations_{15}; };
8,275
33.919831
115
h
XNNPACK
XNNPACK-master/test/subgraph-binary-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> #include <array> #include <functional> #include <limits> #include <memory> #include <numeric> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/node-type.h> #include <xnnpack/operator.h> #include <xnnpack/requantization.h> #include <xnnpack/subgraph.h> #include <gtest/gtest.h> template <typename T> class BinaryTest : public ::testing::Test { protected: BinaryTest() { random_device = std::make_unique<std::random_device>(); rng = std::mt19937((*random_device)()); shape_dist = std::uniform_int_distribution<size_t>(0, XNN_MAX_TENSOR_DIMS); dim_dist = std::uniform_int_distribution<size_t>(1, 9); f32dist = std::uniform_real_distribution<float>(0.01f, 1.0f); i8dist = std::uniform_int_distribution<int32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); u8dist = std::uniform_int_distribution<int32_t>(std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); scale_dist = std::uniform_real_distribution<float>(0.1f, 5.0f); } void SetUp() override { std::vector<size_t> input1_shape = RandomShape(); std::vector<size_t> input2_shape; std::vector<size_t> output_shape; // Create input dimensions. // Create input 2 with an equal or larger number of dimensions. const size_t input2_num_dims = std::uniform_int_distribution<size_t>(input1_shape.size(), XNN_MAX_TENSOR_DIMS)(rng); input2_shape = RandomShape(input2_num_dims); // Ensure that the inputs dimensions match. std::copy_backward(input1_shape.begin(), input1_shape.end(), input2_shape.end()); // Choose a random dimension to broadcast for each input. const size_t input1_broadcast_dim = std::uniform_int_distribution<size_t>(0, input1_shape.size())(rng); if (input1_broadcast_dim < input1_shape.size()) { input1_shape[input1_broadcast_dim] = 1; } const size_t input2_broadcast_dim = std::uniform_int_distribution<size_t>(0, input2_shape.size())(rng); if (input2_broadcast_dim < input2_shape.size()) { input2_shape[input2_broadcast_dim] = 1; } // Calculate generalized shapes. std::fill(input1_dims.begin(), input1_dims.end(), 1); std::fill(input2_dims.begin(), input2_dims.end(), 1); std::fill(output_dims.begin(), output_dims.end(), 1); std::copy_backward(input1_shape.cbegin(), input1_shape.cend(), input1_dims.end()); std::copy_backward(input2_shape.cbegin(), input2_shape.cend(), input2_dims.end()); for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS; i++) { if (input1_dims[i] != 1 && input2_dims[i] != 1) { ASSERT_EQ(input1_dims[i], input2_dims[i]) << "i: " << i; } output_dims[i] = std::max(input1_dims[i], input2_dims[i]); } input1 = std::vector<T>(XNN_EXTRA_BYTES / sizeof(T) + NumElements(input1_shape)); input2 = std::vector<T>(XNN_EXTRA_BYTES / sizeof(T) + NumElements(input2_shape)); operator_output = std::vector<T>(NumElements(output_dims)); subgraph_output = std::vector<T>(operator_output.size()); } std::vector<size_t> RandomShape(size_t num_dims) { std::vector<size_t> dims(num_dims); std::generate(dims.begin(), dims.end(), [&] { return dim_dist(rng); }); return dims; } std::vector<size_t> RandomShape() { return RandomShape(shape_dist(rng)); } size_t NumElements(std::vector<size_t>& dims) { return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>()); } size_t NumElements(std::array<size_t, XNN_MAX_TENSOR_DIMS>& dims) { return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>()); } std::unique_ptr<std::random_device> random_device; std::mt19937 rng; std::uniform_int_distribution<size_t> shape_dist; std::uniform_int_distribution<size_t> dim_dist; std::uniform_real_distribution<float> f32dist; std::uniform_real_distribution<float> scale_dist; std::uniform_int_distribution<int32_t> i8dist; std::uniform_int_distribution<int32_t> u8dist; float output_min = -std::numeric_limits<float>::infinity(); float output_max = std::numeric_limits<float>::infinity(); std::array<size_t, XNN_MAX_TENSOR_DIMS> input1_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> input2_dims; std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; std::vector<T> input1; std::vector<T> input2; std::vector<T> operator_output; std::vector<T> subgraph_output; };
4,621
37.198347
120
h
XNNPACK
XNNPACK-master/test/subgraph-tester.h
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <unordered_map> #include <numeric> #include <random> #include <vector> #include <type_traits> #include <xnnpack.h> #include <xnnpack/subgraph.h> #include <gtest/gtest.h> namespace xnnpack { enum class TensorType { kDense, kSparse, }; struct Padding { uint32_t top; uint32_t right; uint32_t bottom; uint32_t left; }; struct HeightWidth { uint32_t height; uint32_t width; }; using Kernel = HeightWidth; using Subsampling = HeightWidth; using Dilation = HeightWidth; using Upsampling = HeightWidth; using Adjustment = HeightWidth; struct ConvolutionParams { Padding padding; Kernel kernel; Subsampling subsampling; Dilation dilation; uint32_t groups; uint32_t group_input_channels; uint32_t group_output_channels; }; struct DeconvolutionParams { Padding padding; Adjustment adjustment; Kernel kernel; Upsampling upsampling; Dilation dilation; uint32_t groups; uint32_t group_input_channels; uint32_t group_output_channels; }; struct DepthwiseConvolutionParams { Padding padding; Kernel kernel; Subsampling subsampling; Dilation dilation; uint32_t depth_multiplier; uint32_t input_channels; }; class SubgraphTester { public: explicit SubgraphTester(uint32_t external_value_ids) { xnn_status status = xnn_initialize(nullptr); EXPECT_EQ(status, xnn_status_success); xnn_subgraph_t subgraph_ptr = nullptr; status = xnn_create_subgraph(external_value_ids, 0 /* flags */, &subgraph_ptr); EXPECT_EQ(status, xnn_status_success); subgraph_.reset(subgraph_ptr); std::random_device random_device; rng_ = std::mt19937(random_device()); } inline SubgraphTester& AddDynamicTensorF32(const std::vector<size_t>& dims, uint32_t external_id, uint32_t flags = 0) { uint32_t id_out = 0; const xnn_status status = xnn_define_tensor_value(subgraph_.get(), xnn_datatype_fp32, dims.size(), dims.data(), nullptr, external_id, flags, &id_out); EXPECT_EQ(status, xnn_status_success); EXPECT_EQ(id_out, external_id); return *this; } inline SubgraphTester& AddStaticTensorF32(const std::vector<size_t>& dims, TensorType tensor_type, uint32_t external_id, uint32_t flags = 0, float* data = nullptr) { if (data == nullptr) { const size_t num_elements = NumElements(dims); static_data_.emplace_back(num_elements * sizeof(float)); data = reinterpret_cast<float*>(static_data_.back().data()); if (tensor_type == TensorType::kDense) { std::generate(data, data + num_elements, [&]() { return f32dist(rng_); }); } else { // Create tensor with 90% sparsity in two steps: // 1. Generate non-zero elements in the beginning of the vector // 2. Randomize positions of non-zero elements const size_t num_nonzero_elements = num_elements / 10; std::generate(data, data + num_nonzero_elements, [&]() { return f32dist(rng_); }); std::shuffle(data, data + num_elements, rng_); } } uint32_t id_out; const xnn_status status = xnn_define_tensor_value(subgraph_.get(), xnn_datatype_fp32, dims.size(), dims.data(), data, external_id, flags, &id_out); EXPECT_EQ(status, xnn_status_success); EXPECT_EQ(id_out, external_id); return *this; } inline SubgraphTester& AddInputTensorF32(const std::vector<size_t>& dims, uint32_t external_id) { AddDynamicTensorF32(dims, external_id, XNN_VALUE_FLAG_EXTERNAL_INPUT); size_t num_elements = NumElements(dims); auto input = std::vector<char>(num_elements * sizeof(float) + XNN_EXTRA_BYTES * sizeof(char)); float* data = reinterpret_cast<float*>(input.data()); std::generate(data, data + num_elements, [&]() { return f32dist(rng_); }); auto it = external_tensors_.insert({external_id, input}); EXPECT_TRUE(it.second); return *this; } inline SubgraphTester& AddOutputTensorF32(const std::vector<size_t>& dims, uint32_t external_id) { output_id_ = external_id; AddDynamicTensorF32(dims, external_id, XNN_VALUE_FLAG_EXTERNAL_OUTPUT); size_t num_elements = NumElements(dims); auto output = std::vector<char>(num_elements * sizeof(float)); float* data = reinterpret_cast<float*>(output.data()); std::fill(data, data + num_elements, std::nanf("")); auto it = external_tensors_.insert({external_id, output}); EXPECT_TRUE(it.second); return *this; } inline SubgraphTester& AddConcatenate2(size_t axis, uint32_t input1_id, uint32_t input2_id, uint32_t output_id) { const xnn_status status = xnn_define_concatenate2( subgraph_.get(), axis, input1_id, input2_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddConstantPad( const size_t *pre_paddings, const size_t *post_paddings, float padding_value, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_static_constant_pad( subgraph_.get(), pre_paddings, post_paddings, padding_value, input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddConstantPad( const std::vector<size_t>& pre_paddings, const std::vector<size_t>& post_paddings, float padding_value, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_static_constant_pad( subgraph_.get(), pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddConvolution2D( ConvolutionParams params, uint32_t input_id, uint32_t filter_id, uint32_t bias_id, uint32_t output_id) { const xnn_status status = xnn_define_convolution_2d( subgraph_.get(), params.padding.top, params.padding.right, params.padding.bottom, params.padding.left, params.kernel.height, params.kernel.width, params.subsampling.height, params.subsampling.width, params.dilation.height, params.dilation.width, params.groups, params.group_input_channels, params.group_output_channels, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddCopy(uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_copy( subgraph_.get(), input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddDepthwiseConvolution2D( DepthwiseConvolutionParams params, uint32_t input_id, uint32_t filter_id, uint32_t bias_id, uint32_t output_id) { const xnn_status status = xnn_define_depthwise_convolution_2d( subgraph_.get(), params.padding.top, params.padding.right, params.padding.bottom, params.padding.left, params.kernel.height, params.kernel.width, params.subsampling.height, params.subsampling.width, params.dilation.height, params.dilation.width, params.depth_multiplier, params.input_channels, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddAddition(uint32_t input_id1, uint32_t input_id2, uint32_t output_id) { const xnn_status status = xnn_define_add2(subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id1, input_id2, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddAveragePooling2D( uint32_t input_padding_top, uint32_t input_padding_right, uint32_t input_padding_bottom, uint32_t input_padding_left, uint32_t pooling_height, uint32_t pooling_width, uint32_t stride_height, uint32_t stride_width, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_average_pooling_2d( subgraph_.get(), input_padding_top, input_padding_right, input_padding_bottom, input_padding_left, pooling_height, pooling_width, stride_height, stride_width, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddClamp(float output_min, float output_max, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_clamp(subgraph_.get(), output_min, output_max, input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddDeconvolution2D( uint32_t input_padding_top, uint32_t input_padding_right, uint32_t input_padding_bottom, uint32_t input_padding_left, uint32_t adjustment_height, uint32_t adjustment_width, uint32_t kernel_height, uint32_t kernel_width, uint32_t upsampling_height, uint32_t upsampling_width, uint32_t dilation_height, uint32_t dilation_width, uint32_t groups, size_t group_input_channels, size_t group_output_channels, uint32_t input_id, uint32_t filter_id, uint32_t bias_id, uint32_t output_id) { const xnn_status status = xnn_define_deconvolution_2d( subgraph_.get(), input_padding_top, input_padding_right, input_padding_bottom, input_padding_left, adjustment_height, adjustment_width, kernel_height, kernel_width, upsampling_height, upsampling_width, dilation_height, dilation_width, groups, group_input_channels, group_output_channels, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddDeconvolution2D( DeconvolutionParams params, uint32_t input_id, uint32_t filter_id, uint32_t bias_id, uint32_t output_id) { const xnn_status status = xnn_define_deconvolution_2d( subgraph_.get(), params.padding.top, params.padding.right, params.padding.bottom, params.padding.left, params.adjustment.height, params.adjustment.width, params.kernel.height, params.kernel.width, params.upsampling.height, params.upsampling.width, params.dilation.height, params.dilation.width, params.groups, params.group_input_channels, params.group_output_channels, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddDivide(uint32_t input_id1, uint32_t input_id2, uint32_t output_id) { const xnn_status status = xnn_define_divide(subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id1, input_id2, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddEvenSplit2(size_t split_dim, uint32_t input_id, uint32_t output1_id, uint32_t output2_id) { const xnn_status status = xnn_define_even_split2( subgraph_.get(), split_dim, input_id, output1_id, output2_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddFullyConnected( uint32_t input_id, uint32_t filter_id, uint32_t bias_id, uint32_t output_id) { const xnn_status status = xnn_define_fully_connected( subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddGlobalAveragePooling(uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_global_average_pooling_2d( subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddEvenSplit3(uint32_t input_id, uint32_t output_id0, uint32_t output_id1, uint32_t output_id2) { const xnn_status status = xnn_define_even_split3( subgraph_.get(), 0, input_id, output_id0, output_id1, output_id2, 0 /*flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddHardSwish(uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_hardswish(subgraph_.get(), input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddLeakyRelu(float negative_slope, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_leaky_relu(subgraph_.get(), negative_slope, input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddMaxPooling2D( uint32_t input_padding_top, uint32_t input_padding_right, uint32_t input_padding_bottom, uint32_t input_padding_left, uint32_t pooling_height, uint32_t pooling_width, uint32_t stride_height, uint32_t stride_width, uint32_t dilation_height, uint32_t dilation_width, uint32_t input_id, uint32_t output_id) { const xnn_status status = xnn_define_max_pooling_2d( subgraph_.get(), input_padding_top, input_padding_right, input_padding_bottom, input_padding_left, pooling_height, pooling_width, stride_height, stride_width, dilation_height, dilation_width, -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddMultiply(uint32_t input_id1, uint32_t input_id2, uint32_t output_id) { const xnn_status status = xnn_define_multiply2(subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id1, input_id2, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddPrelu(uint32_t input_id, uint32_t slope_id, uint32_t output_id) { const xnn_status status = xnn_define_prelu(subgraph_.get(), input_id, slope_id, output_id, /*flags=*/0); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& AddSubtract(uint32_t input_id1, uint32_t input_id2, uint32_t output_id) { const xnn_status status = xnn_define_subtract(subgraph_.get(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), input_id1, input_id2, output_id, 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& Optimize() { const xnn_status status = xnn_subgraph_optimize(subgraph_.get(), 0 /* flags */); EXPECT_EQ(status, xnn_status_success); return *this; } inline SubgraphTester& RewriteForNchw() { xnn_subgraph_rewrite_for_nchw(subgraph_.get()); return *this; } inline SubgraphTester& RewriteForFp16() { EXPECT_TRUE(xnn_subgraph_rewrite_for_fp16(subgraph_.get())); return *this; } inline SubgraphTester& RewriteForFp16WithFailure() { EXPECT_FALSE(xnn_subgraph_rewrite_for_fp16(subgraph_.get())); return *this; } inline xnn_layout_type GetLayout(uint32_t value_id) const { return subgraph_->values[value_id].layout; } inline const xnn_value* const Value(uint32_t value_id) const { return &subgraph_->values[value_id]; } inline const xnn_node* const Node(uint32_t node_id) const { return &subgraph_->nodes[node_id]; } inline size_t NumNodes() const { return subgraph_->num_nodes; } inline size_t NumValues() const { return subgraph_->num_values; } inline xnn_subgraph* Subgraph() const { return subgraph_.get(); } protected: std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> subgraph_{nullptr, xnn_delete_subgraph}; std::unordered_map<uint32_t, std::vector<char>> external_tensors_; uint32_t output_id_; private: static inline size_t NumElements(const std::vector<size_t>& dims) { return std::accumulate(std::begin(dims), std::end(dims), size_t(1), std::multiplies<size_t>()); } std::vector<std::vector<char>> static_data_; std::mt19937 rng_; std::uniform_real_distribution<float> f32dist = std::uniform_real_distribution<float>(-1.0f, +1.0f); }; } // namespace xnnpack
17,718
35.837838
122
h
XNNPACK
XNNPACK-master/test/subgraph-unary-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <algorithm> #include <array> #include <cstdint> #include <cstddef> #include <limits> #include <memory> #include <numeric> #include <random> #include <xnnpack.h> #include <xnnpack/node-type.h> #include <xnnpack/operator.h> #include <xnnpack/requantization.h> #include <xnnpack/subgraph.h> #include <gtest/gtest.h> template < typename InputType, typename OutputType = InputType, size_t min_dim = 0, size_t max_dim = XNN_MAX_TENSOR_DIMS, bool pad_output = false> class UnaryTest : public ::testing::Test { protected: UnaryTest() { random_device = std::make_unique<std::random_device>(); rng = std::mt19937((*random_device)()); shape_dist = std::uniform_int_distribution<size_t>(min_dim, max_dim); dim_dist = std::uniform_int_distribution<size_t>(1, 9); i8dist = std::uniform_int_distribution<int32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); u8dist = std::uniform_int_distribution<int32_t>(std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); u32dist = std::uniform_int_distribution<uint32_t>(); scale_dist = std::uniform_real_distribution<float>(0.1f, 10.0f); f32dist = std::uniform_real_distribution<float>(0.01f, 1.0f); dims = RandomShape(); AllocateInputsAndOutputs(); }; void AllocateInputsAndOutputs() { channels = dims.empty() ? 1 : dims.back(); xnn_shape shape = {}; shape.num_dims = dims.size(); memcpy(shape.dim, dims.data(), dims.size() * sizeof(size_t)); batch_size = xnn_shape_multiply_non_channel_dims(&shape); num_output_elements = batch_size * channels; scale = scale_dist(rng); signed_zero_point = i8dist(rng); unsigned_zero_point = u8dist(rng); input = std::vector<InputType>(num_output_elements + XNN_EXTRA_BYTES / sizeof(InputType)); const size_t output_padding = pad_output ? (XNN_EXTRA_BYTES / sizeof(InputType)) : 0; operator_output = std::vector<OutputType>(num_output_elements + output_padding); subgraph_output = std::vector<OutputType>(num_output_elements + output_padding); } std::vector<size_t> RandomShape() { std::vector<size_t> dims(shape_dist(rng)); std::generate(dims.begin(), dims.end(), [&] { return dim_dist(rng); }); return dims; } static size_t NumElements(const std::vector<size_t>& dims) { return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>()); } std::unique_ptr<std::random_device> random_device; std::mt19937 rng; std::uniform_int_distribution<size_t> shape_dist; std::uniform_int_distribution<size_t> dim_dist; std::uniform_real_distribution<float> scale_dist; std::uniform_int_distribution<int32_t> i8dist; std::uniform_int_distribution<int32_t> u8dist; std::uniform_int_distribution<uint32_t> u32dist; std::uniform_real_distribution<float> f32dist; std::vector<size_t> dims; uint32_t input_id; uint32_t output_id; size_t channels; size_t batch_size; size_t num_output_elements; float scale; int32_t signed_zero_point; int32_t unsigned_zero_point; std::vector<InputType> input; std::vector<OutputType> operator_output; std::vector<OutputType> subgraph_output; };
3,374
32.088235
119
h
XNNPACK
XNNPACK-master/test/tanh-operator-tester.h
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdlib> #include <functional> #include <limits> #include <memory> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> class TanhOperatorTester { public: inline TanhOperatorTester& channels(size_t channels) { assert(channels != 0); this->channels_ = channels; return *this; } inline size_t channels() const { return this->channels_; } inline TanhOperatorTester& input_stride(size_t input_stride) { assert(input_stride != 0); this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { if (this->input_stride_ == 0) { return this->channels_; } else { assert(this->input_stride_ >= this->channels_); return this->input_stride_; } } inline TanhOperatorTester& output_stride(size_t output_stride) { assert(output_stride != 0); this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { if (this->output_stride_ == 0) { return this->channels_; } else { assert(this->output_stride_ >= this->channels_); return this->output_stride_; } } inline TanhOperatorTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline TanhOperatorTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline TanhOperatorTester& input_zero_point(uint8_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline uint8_t input_zero_point() const { return this->input_zero_point_; } inline float output_scale() const { return 1.0f / 128.0f; } inline uint8_t output_zero_point() const { return 128; } inline TanhOperatorTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline TanhOperatorTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline TanhOperatorTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void TestF16() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-5.0f, 5.0f); std::vector<uint16_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::fill(output.begin(), output.end(), UINT16_C(0x7E00) /* NaN */); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = fp16_ieee_to_fp32_value(input[i * input_stride() + c]); output_ref[i * channels() + c] = std::tanh(x); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t tanh_op = nullptr; const xnn_status status = xnn_create_tanh_nc_f16( channels(), input_stride(), output_stride(), 0, &tanh_op); if (status == xnn_status_unsupported_hardware) { GTEST_SKIP(); } ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, tanh_op); // Smart pointer to automatically delete tanh_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_tanh_op(tanh_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_tanh_nc_f16(tanh_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_tanh_nc_f16(tanh_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(tanh_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + c], std::max(1.0e-4f, std::abs(output_ref[i * channels() + c]) * 5.0e-3f)); } } } } void TestF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-10.0f, 10.0f); std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = input[i * input_stride() + c]; output_ref[i * channels() + c] = std::tanh(x); } } // Create, setup, run, and destroy Tanh operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t tanh_op = nullptr; xnn_status status = xnn_create_tanh_nc_f32( channels(), input_stride(), output_stride(), 0, &tanh_op); ASSERT_EQ(xnn_status_success, status); ASSERT_NE(nullptr, tanh_op); // Smart pointer to automatically delete tanh_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_tanh_op(tanh_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_tanh_nc_f32(tanh_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_tanh_nc_f32(tanh_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(tanh_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( output[i * output_stride() + c], output_ref[i * channels() + c], 5.0e-6); } } } } void TestRunF32() const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-10.0f, 10.0f); std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> output((batch_size() - 1) * output_stride() + channels()); std::vector<double> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); std::fill(output.begin(), output.end(), std::nanf("")); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const double x = input[i * input_stride() + c]; output_ref[i * channels() + c] = std::tanh(x); } } ASSERT_EQ(xnn_status_success, xnn_run_tanh_nc_f32( channels(), input_stride(), output_stride(), batch_size(), input.data(), output.data(), 0, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { ASSERT_NEAR( output[i * output_stride() + c], output_ref[i * channels() + c], 5.0e-6); } } } } void TestQS8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i8rng = std::bind( std::uniform_int_distribution<int32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), std::ref(rng)); std::vector<int8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), std::ref(i8rng)); std::fill(output.begin(), output.end(), 0xA5); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point() - 0x80)); const float tanh_x = std::tanh(x); const float scaled_tanh_x = tanh_x / output_scale(); float y = scaled_tanh_x; y = std::min<float>(y, int32_t(qmax() - 0x80) - int32_t(output_zero_point() - 0x80)); y = std::max<float>(y, int32_t(qmin() - 0x80) - int32_t(output_zero_point() - 0x80)); output_ref[i * channels() + c] = y + int32_t(output_zero_point() - 0x80); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t tanh_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_tanh_nc_qs8( channels(), input_stride(), output_stride(), int8_t(input_zero_point() - 0x80), input_scale(), int8_t(output_zero_point() - 0x80), output_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80), 0, &tanh_op)); ASSERT_NE(nullptr, tanh_op); // Smart pointer to automatically delete tanh_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_tanh_op(tanh_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_tanh_nc_qs8(tanh_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_tanh_nc_qs8(tanh_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(tanh_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } void TestQU8() const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); std::vector<uint8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); std::vector<float> output_ref(batch_size() * channels()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), std::ref(u8rng)); std::fill(output.begin(), output.end(), 0xA5); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { const float x = input_scale() * (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point())); const float tanh_x = std::tanh(x); const float scaled_tanh_x = tanh_x / output_scale(); float y = scaled_tanh_x; y = std::min<float>(y, int32_t(qmax()) - int32_t(output_zero_point())); y = std::max<float>(y, int32_t(qmin()) - int32_t(output_zero_point())); output_ref[i * channels() + c] = y + int32_t(output_zero_point()); } } // Create, setup, run, and destroy Sigmoid operator. ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t tanh_op = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_tanh_nc_qu8( channels(), input_stride(), output_stride(), input_zero_point(), input_scale(), output_zero_point(), output_scale(), qmin(), qmax(), 0, &tanh_op)); ASSERT_NE(nullptr, tanh_op); // Smart pointer to automatically delete tanh_op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_tanh_op(tanh_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_tanh_nc_qu8(tanh_op, batch_size(), /*threadpool=*/nullptr)); ASSERT_EQ(xnn_status_success, xnn_setup_tanh_nc_qu8(tanh_op, input.data(), output.data())); ASSERT_EQ(xnn_status_success, xnn_run_operator(tanh_op, /*threadpool=*/nullptr)); // Verify results. for (size_t i = 0; i < batch_size(); i++) { for (size_t c = 0; c < channels(); c++) { EXPECT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.6f); } } } } private: size_t batch_size_{1}; size_t channels_{1}; size_t input_stride_{0}; size_t output_stride_{0}; float input_scale_{0.75f}; uint8_t input_zero_point_{121}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
14,147
34.547739
119
h
XNNPACK
XNNPACK-master/test/transpose-microkernel-tester.h
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <cassert> #include <cstddef> #include <cstdlib> #include <cstring> #include <numeric> #include <vector> #include <gtest/gtest.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> #include <xnnpack/params.h> class TransposeMicrokernelTester { public: inline TransposeMicrokernelTester& element_size(size_t element_size) { assert(element_size != 0); this->element_size_ = element_size; return *this; } inline size_t element_size() const { return this->element_size_; } inline TransposeMicrokernelTester& block_height(size_t block_height) { assert(block_height != 0); this->block_height_ = block_height; return *this; } inline size_t block_height() const { return this->block_height_; } inline TransposeMicrokernelTester& block_width(size_t block_width) { assert(block_width != 0); this->block_width_ = block_width; return *this; } inline size_t block_width() const { return this->block_width_; } inline TransposeMicrokernelTester& input_stride(size_t input_stride) { this->input_stride_ = input_stride; return *this; } inline size_t input_stride() const { return this->input_stride_; } inline TransposeMicrokernelTester& output_stride(size_t output_stride) { this->output_stride_ = output_stride; return *this; } inline size_t output_stride() const { return this->output_stride_; } inline TransposeMicrokernelTester& input_element_stride(size_t input_element_stride) { assert(input_element_stride >= element_size_); this->input_element_stride_ = input_element_stride; return *this; } inline size_t input_element_stride() const { if (input_element_stride_ == 0) { return element_size_; } else { return input_element_stride_; } } inline TransposeMicrokernelTester& output_element_stride(size_t output_element_stride) { assert(output_element_stride >= element_size_); this->output_element_stride_ = output_element_stride; return *this; } inline size_t output_element_stride() const { if (output_element_stride_ == 0) { return element_size_; } else { return output_element_stride_; } } inline TransposeMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_transposev_ukernel_fn transpose) const { std::vector<uint8_t> input(input_stride() * block_height() * input_element_stride() + XNN_EXTRA_BYTES); std::vector<uint8_t> output(output_stride() * block_width() * output_element_stride()); std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * input_element_stride(), output_stride() * output_element_stride(), input_element_stride(), output_element_stride(), element_size(), block_width(), block_height()); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { EXPECT_EQ(std::memcmp(&input[input_element_stride() * (c+ r * input_stride())], &output[output_element_stride() * (r + c * output_stride())], element_size()), 0) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } void Test(xnn_x64_transposec_ukernel_fn transpose, const xnn_init_x64_transpose_params_fn init_params = nullptr) const { std::vector<uint64_t> input(input_stride() * output_stride() + XNN_EXTRA_BYTES / sizeof(uint64_t)); std::vector<uint64_t> output(input_stride() * output_stride()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT64_C(0xBADC0FFEE0DDF00D)); union xnn_x64_transpose_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * sizeof(uint64_t), output_stride() * sizeof(uint64_t), block_width(), block_height(), &params); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { EXPECT_EQ(input[c + r * input_stride()], output[r + c * output_stride()]) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } } void Test(xnn_x32_transposec_ukernel_fn transpose, const xnn_init_x32_transpose_params_fn init_params = nullptr) const { std::vector<uint32_t> input(input_stride() * output_stride() + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t> output(input_stride() * output_stride()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); union xnn_x32_transpose_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * sizeof(uint32_t), output_stride() * sizeof(uint32_t), block_width(), block_height(), &params); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { EXPECT_EQ(input[c + r * input_stride()], output[r + c * output_stride()]) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } } void Test(xnn_x24_transposec_ukernel_fn transpose, const xnn_init_x24_transpose_params_fn init_params = nullptr) const { std::vector<uint8_t> input(input_stride() * output_stride() * element_size() + XNN_EXTRA_BYTES); std::vector<uint8_t> output(input_stride() * output_stride() * element_size()); std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); union xnn_x24_transpose_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * element_size(), output_stride() * element_size(), block_width(), block_height(), &params); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { EXPECT_EQ(std::memcmp(&input[element_size() * (c+ r * input_stride())], &output[element_size() * (r + c * output_stride())], element_size()), 0) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } void Test(xnn_x16_transposec_ukernel_fn transpose, const xnn_init_x16_transpose_params_fn init_params = nullptr) const { std::vector<uint16_t> input(input_stride() * output_stride() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output(input_stride() * output_stride()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT16_C(0xDEAD)); union xnn_x16_transpose_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * sizeof(uint16_t), output_stride() * sizeof(uint16_t), block_width(), block_height(), &params); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { ASSERT_EQ(input[c + r * input_stride()], output[r + c * output_stride()]) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } } void Test(xnn_x8_transposec_ukernel_fn transpose, const xnn_init_x8_transpose_params_fn init_params = nullptr) const { std::vector<uint8_t> input(input_stride() * output_stride() + XNN_EXTRA_BYTES); std::vector<uint8_t> output(input_stride() * output_stride()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); union xnn_x8_transpose_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. transpose(input.data(), output.data(), input_stride() * sizeof(uint8_t), output_stride() * sizeof(uint8_t), block_width(), block_height(), &params); // Verify results. for (size_t c = 0; c < block_width(); c++) { for (size_t r = 0; r < block_height(); r++) { ASSERT_EQ((int)input[c + r * input_stride()], (int)output[r + c * output_stride()]) << "at row " << r << " / " << block_height() << ", at column " << c << " / " << block_width(); } } } } private: size_t element_size_ = 1; size_t input_stride_ = 1; size_t output_stride_ = 1; size_t input_element_stride_ = 0; size_t output_element_stride_ = 0; size_t block_height_ = 1; size_t block_width_ = 1; size_t iterations_ = 15; };
10,378
34.544521
122
h
XNNPACK
XNNPACK-master/test/transpose-normalization-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <xnnpack.h> #include <xnnpack/normalization.h> class TransposeNormalizationTester { public: inline TransposeNormalizationTester& num_dims(size_t num_dims) { assert(num_dims != 0); this->num_dims_ = num_dims; return *this; } inline size_t num_dims() const { return this->num_dims_; } inline TransposeNormalizationTester& element_size(size_t element_size) { this->element_size_ = element_size; return *this; } inline size_t element_size() const { return this->element_size_; } inline TransposeNormalizationTester& expected_dims(size_t expected_dims) { this->expected_dims_ = expected_dims; return *this; } inline size_t expected_dims() const { return this->expected_dims_; } inline TransposeNormalizationTester& expected_element_size(size_t expected_element_size) { this->expected_element_size_ = expected_element_size; return *this; } inline size_t expected_element_size() const { return this->expected_element_size_; } inline TransposeNormalizationTester& shape(const std::vector<size_t> shape) { assert(shape.size() <= XNN_MAX_TENSOR_DIMS); this->shape_ = shape; return *this; } inline TransposeNormalizationTester& perm(const std::vector<size_t> perm) { assert(perm.size() <= XNN_MAX_TENSOR_DIMS); this->perm_ = perm; return *this; } inline TransposeNormalizationTester& input_stride(const std::vector<size_t> input_stride) { assert(input_stride.size() <= XNN_MAX_TENSOR_DIMS); this->input_stride_ = input_stride; return *this; } inline TransposeNormalizationTester& output_stride(const std::vector<size_t> output_stride) { assert(output_stride.size() <= XNN_MAX_TENSOR_DIMS); this->output_stride_ = output_stride; return *this; } inline TransposeNormalizationTester& expected_shape(const std::vector<size_t> expected_shape) { this->expected_shape_ = expected_shape; return *this; } inline const std::vector<size_t>& expected_shape() const { return this->expected_shape_; } inline TransposeNormalizationTester& expected_perm(const std::vector<size_t> expected_perm) { this->expected_perm_ = expected_perm; return *this; } inline const std::vector<size_t>& expected_perm() const { return this->expected_perm_; } inline TransposeNormalizationTester& expected_input_stride(const std::vector<size_t> expected_input_stride) { this->expected_input_stride_ = expected_input_stride; return *this; } inline TransposeNormalizationTester& expected_output_stride(const std::vector<size_t> expected_output_stride) { this->expected_output_stride_ = expected_output_stride; return *this; } inline const std::vector<size_t>& expected_input_stride() const { return this->expected_input_stride_; } inline const std::vector<size_t>& expected_output_stride() const { return this->expected_output_stride_; } inline TransposeNormalizationTester& calculate_expected_input_stride() { expected_input_stride_.resize(expected_dims()); expected_input_stride_[expected_dims() - 1] = expected_element_size(); for(size_t i = expected_dims() - 1; i-- != 0;) { expected_input_stride_[i] = expected_input_stride_[i + 1] * expected_shape_[i + 1]; } return *this; } inline TransposeNormalizationTester& calculate_expected_output_stride() { expected_output_stride_.resize(expected_dims()); expected_output_stride_[expected_dims() - 1] = expected_element_size(); for(size_t i = expected_dims() - 1; i-- != 0;) { expected_output_stride_[i] = expected_output_stride_[i + 1] * expected_shape_[expected_perm_[i + 1]]; } return *this; } void Test() const { size_t actual_element_size; size_t actual_normalized_dims; std::vector<size_t> actual_normalized_shape(num_dims()); std::vector<size_t> actual_normalized_perm(num_dims()); std::vector<size_t> actual_normalized_input_stride(num_dims()); std::vector<size_t> actual_normalized_output_stride(num_dims()); xnn_normalize_transpose_permutation(num_dims(), element_size(), perm_.data(), shape_.data(), input_stride_.empty() ? nullptr : input_stride_.data(), output_stride_.empty() ? nullptr : output_stride_.data(), &actual_normalized_dims, &actual_element_size, actual_normalized_perm.data(), actual_normalized_shape.data(), actual_normalized_input_stride.data(), actual_normalized_output_stride.data()); EXPECT_EQ(expected_element_size(), actual_element_size); EXPECT_EQ(expected_dims(), actual_normalized_dims); for (size_t i = 0; i < expected_dims(); ++i) { EXPECT_EQ(expected_shape()[i], actual_normalized_shape[i]); EXPECT_EQ(expected_perm()[i], actual_normalized_perm[i]); EXPECT_EQ(expected_input_stride()[i], actual_normalized_input_stride[i]); EXPECT_EQ(expected_output_stride()[i], actual_normalized_output_stride[i]); } } private: size_t num_dims_; size_t element_size_; size_t expected_dims_; size_t expected_element_size_; std::vector<size_t> shape_; std::vector<size_t> perm_; std::vector<size_t> input_stride_; std::vector<size_t> output_stride_; std::vector<size_t> expected_shape_; std::vector<size_t> expected_perm_; std::vector<size_t> expected_input_stride_; std::vector<size_t> expected_output_stride_; };
5,725
35.941935
117
h
XNNPACK
XNNPACK-master/test/transpose-operator-tester.h
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <numeric> #include <vector> #include <xnnpack.h> #include <gtest/gtest.h> inline size_t reference_index( const size_t* input_stride, const size_t* output_stride, const size_t* perm, const size_t num_dims, size_t pos) { size_t in_pos = 0; for (size_t j = 0; j < num_dims; ++j) { const size_t idx = pos / output_stride[j]; pos = pos % output_stride[j]; in_pos += idx * input_stride[perm[j]]; } return in_pos; } class TransposeOperatorTester { public: inline TransposeOperatorTester& num_dims(size_t num_dims) { assert(num_dims != 0); this->num_dims_ = num_dims; return *this; } inline size_t num_dims() const { return this->num_dims_; } inline TransposeOperatorTester& shape(std::vector<size_t> shape) { assert(shape.size() <= XNN_MAX_TENSOR_DIMS); this->shape_ = shape; return *this; } inline const std::vector<size_t>& dims() const { return this->shape_; } inline TransposeOperatorTester& perm(std::vector<size_t> perm) { assert(perm.size() <= XNN_MAX_TENSOR_DIMS); this->perm_ = perm; return *this; } inline const std::vector<size_t>& perm() const { return this->perm_; } void TestX8() const { size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint8_t> input(count + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> output(count); std::vector<size_t> input_stride(num_dims(), 1); std::vector<size_t> output_stride(num_dims(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t transpose_op = nullptr; std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); ASSERT_EQ(xnn_status_success, xnn_create_transpose_nd_x8(0, &transpose_op)); ASSERT_NE(nullptr, transpose_op); // Smart pointer to automatically delete convert op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_transpose_op(transpose_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_transpose_nd_x8( transpose_op, num_dims(), shape_.data(), perm_.data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_transpose_nd_x8( transpose_op, input.data(), output.data())); // Run operator. ASSERT_EQ(xnn_status_success, xnn_run_operator(transpose_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } void TestRunX8() const { const size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint8_t> input(count + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> output(count); std::vector<size_t> input_stride(input.size(), 1); std::vector<size_t> output_stride(input.size(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT8_C(0xA5)); // Call transpose eager API ASSERT_EQ(xnn_status_success, xnn_run_transpose_nd_x8( input.data(), output.data(), num_dims(), shape_.data(), perm_.data(), 0 /* flags */, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } void TestX16() const { size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint16_t> input(count + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output(count); std::vector<size_t> input_stride(num_dims(), 1); std::vector<size_t> output_stride(num_dims(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t transpose_op = nullptr; std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT16_C(0xDEAD)); ASSERT_EQ(xnn_status_success, xnn_create_transpose_nd_x16(0, &transpose_op)); ASSERT_NE(nullptr, transpose_op); // Smart pointer to automatically delete convert op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_transpose_op(transpose_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_transpose_nd_x16( transpose_op, num_dims(), shape_.data(), perm_.data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_transpose_nd_x16( transpose_op, input.data(), output.data())); // Run operator. ASSERT_EQ(xnn_status_success, xnn_run_operator(transpose_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } void TestRunX16() const { const size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint16_t> input(count + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> output(count); std::vector<size_t> input_stride(input.size(), 1); std::vector<size_t> output_stride(input.size(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT16_C(0xDEADBEEF)); // Call transpose eager API ASSERT_EQ(xnn_status_success, xnn_run_transpose_nd_x16( input.data(), output.data(), num_dims(), shape_.data(), perm_.data(), 0 /* flags */, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } void TestX32() const { size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint32_t> input(count + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t> output(count); std::vector<size_t> input_stride(num_dims(), 1); std::vector<size_t> output_stride(num_dims(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); xnn_operator_t transpose_op = nullptr; std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); ASSERT_EQ(xnn_status_success, xnn_create_transpose_nd_x32(0, &transpose_op)); ASSERT_NE(nullptr, transpose_op); // Smart pointer to automatically delete convert op. std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_transpose_op(transpose_op, xnn_delete_operator); ASSERT_EQ(xnn_status_success, xnn_reshape_transpose_nd_x32( transpose_op, num_dims(), shape_.data(), perm_.data(), nullptr /* thread pool */)); ASSERT_EQ(xnn_status_success, xnn_setup_transpose_nd_x32( transpose_op, input.data(), output.data())); // Run operator. ASSERT_EQ(xnn_status_success, xnn_run_operator(transpose_op, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } void TestRunX32() const { const size_t count = std::accumulate(dims().cbegin(), dims().cend(), size_t{1}, std::multiplies<size_t>()); std::vector<uint32_t> input(count + XNN_EXTRA_BYTES / sizeof(uint32_t)); std::vector<uint32_t> output(count); std::vector<size_t> input_stride(input.size(), 1); std::vector<size_t> output_stride(input.size(), 1); for (size_t i = num_dims() - 1; i > 0; --i) { input_stride[i - 1] = input_stride[i] * shape_[i]; output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; } std::iota(input.begin(), input.end(), 0); std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); // Call transpose eager API ASSERT_EQ(xnn_status_success, xnn_run_transpose_nd_x32( input.data(), output.data(), num_dims(), shape_.data(), perm_.data(), 0 /* flags */, nullptr /* thread pool */)); // Verify results. for (size_t i = 0; i < count; ++i) { const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num_dims(), i); ASSERT_EQ(input[in_idx], output[i]); } } private: size_t num_dims_ = 1; std::vector<size_t> shape_; std::vector<size_t> perm_; };
10,573
36.363958
119
h
XNNPACK
XNNPACK-master/test/unpool-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <xnnpack/microfnptr.h> class UnpoolMicrokernelTester { public: inline UnpoolMicrokernelTester& p(size_t p) { assert(p != 0); this->p_ = p; return *this; } inline size_t p() const { return this->p_; } inline UnpoolMicrokernelTester& c(size_t c) { assert(c != 0); this->c_ = c; return *this; } inline size_t c() const { return this->c_; } inline UnpoolMicrokernelTester& f(uint32_t f) { this->f_ = f; return *this; } inline uint32_t f() const { return this->f_; } inline UnpoolMicrokernelTester& y_stride(size_t y_stride) { assert(y_stride != 0); this->y_stride_ = y_stride; return *this; } inline size_t y_stride() const { if (this->y_stride_ == 0) { return c(); } else { assert(this->y_stride_ >= c()); return this->y_stride_; } } inline UnpoolMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_x32_unpool_ukernel_fn unpool) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto x_rng = std::bind(std::uniform_int_distribution<uint32_t>(), std::ref(rng)); auto i_rng = std::bind(std::uniform_int_distribution<uint32_t>(0, uint32_t(p() - 1)), std::ref(rng)); std::vector<uint32_t> x(c()); std::vector<uint32_t> i(c()); std::vector<uint32_t> y((p() - 1) * y_stride() + c()); std::vector<uint32_t*> indirect_y(p()); std::vector<uint32_t> y_ref((p() - 1) * y_stride() + c()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(x.begin(), x.end(), std::ref(x_rng)); std::generate(i.begin(), i.end(), std::ref(i_rng)); std::generate(y.begin(), y.end(), std::ref(x_rng)); for (size_t i = 0; i < indirect_y.size(); i++) { indirect_y[i] = y.data() + i * y_stride(); } std::shuffle(indirect_y.begin(), indirect_y.end(), rng); // Compute reference output. std::fill(y_ref.begin(), y_ref.end(), f()); for (size_t k = 0; k < c(); k++) { const uint32_t idx = i[k]; (indirect_y[idx] - y.data() + y_ref.data())[k] = x[k]; } // Call optimized micro-kernel. unpool(p(), c(), f(), x.data(), i.data(), indirect_y.data()); // Verify results. for (size_t i = 0; i < p(); i++) { for (size_t k = 0; k < c(); k++) { EXPECT_EQ(y_ref[i * y_stride() + k], y[i * y_stride() + k]) << "at pixel " << i << ", channel " << k << ", p = " << p() << ", c = " << c(); } } } } private: size_t p_{1}; size_t c_{1}; uint32_t f_{0}; size_t y_stride_{0}; size_t iterations_{15}; };
3,162
24.304
105
h
XNNPACK
XNNPACK-master/test/vadd-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <limits> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> #include <xnnpack/requantization.h> class VAddMicrokernelTester { public: inline VAddMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline VAddMicrokernelTester& inplace_a(bool inplace_a) { this->inplace_a_ = inplace_a; return *this; } inline bool inplace_a() const { return this->inplace_a_; } inline VAddMicrokernelTester& inplace_b(bool inplace_b) { this->inplace_b_ = inplace_b; return *this; } inline bool inplace_b() const { return this->inplace_b_; } inline VAddMicrokernelTester& a_scale(float a_scale) { assert(a_scale > 0.0f); assert(std::isnormal(a_scale)); this->a_scale_ = a_scale; return *this; } inline float a_scale() const { return this->a_scale_; } inline VAddMicrokernelTester& a_zero_point(uint8_t a_zero_point) { this->a_zero_point_ = a_zero_point; return *this; } inline uint8_t a_zero_point() const { return this->a_zero_point_; } inline VAddMicrokernelTester& b_scale(float b_scale) { assert(b_scale > 0.0f); assert(std::isnormal(b_scale)); this->b_scale_ = b_scale; return *this; } inline float b_scale() const { return this->b_scale_; } inline VAddMicrokernelTester& b_zero_point(uint8_t b_zero_point) { this->b_zero_point_ = b_zero_point; return *this; } inline uint8_t b_zero_point() const { return this->b_zero_point_; } inline VAddMicrokernelTester& y_scale(float y_scale) { assert(y_scale > 0.0f); assert(std::isnormal(y_scale)); this->y_scale_ = y_scale; return *this; } inline float y_scale() const { return this->y_scale_; } inline VAddMicrokernelTester& y_zero_point(uint8_t y_zero_point) { this->y_zero_point_ = y_zero_point; return *this; } inline uint8_t y_zero_point() const { return this->y_zero_point_; } inline VAddMicrokernelTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline VAddMicrokernelTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline VAddMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_qu8_vadd_minmax_ukernel_fn vadd_minmax, xnn_init_qu8_add_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); std::vector<uint8_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(uint8_t) : 0)); std::vector<float> y_fp(batch_size()); std::vector<uint8_t> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), std::ref(u8rng)); std::generate(b.begin(), b.end(), std::ref(u8rng)); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), std::ref(u8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const uint8_t* a_data = inplace_a() ? y.data() : a.data(); const uint8_t* b_data = inplace_b() ? y.data() : b.data(); // Prepare parameters. xnn_qu8_add_minmax_params quantization_params; init_params( &quantization_params, a_zero_point(), b_zero_point(), y_zero_point(), a_scale() / y_scale(), b_scale() / y_scale(), qmin(), qmax()); xnn_qu8_add_minmax_params scalar_quantization_params; xnn_init_qu8_add_minmax_scalar_params( &scalar_quantization_params, a_zero_point(), b_zero_point(), y_zero_point(), a_scale() / y_scale(), b_scale() / y_scale(), qmin(), qmax()); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { y_fp[i] = float(y_zero_point()) + float(int32_t(a_data[i]) - int32_t(a_zero_point())) * (a_scale() / y_scale()) + float(int32_t(b_data[i]) - int32_t(b_zero_point())) * (b_scale() / y_scale()); y_fp[i] = std::min<float>(y_fp[i], float(qmax())); y_fp[i] = std::max<float>(y_fp[i], float(qmin())); y_ref[i] = xnn_qu8_quantize_add(a_data[i], b_data[i], scalar_quantization_params); } // Call optimized micro-kernel. vadd_minmax(batch_size(), a_data, b_data, y.data(), &quantization_params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_LE(uint32_t(y[i]), uint32_t(qmax())) << "at element " << i << " / " << batch_size(); EXPECT_GE(uint32_t(y[i]), uint32_t(qmin())) << "at element " << i << " / " << batch_size(); EXPECT_NEAR(float(int32_t(y[i])), y_fp[i], 0.6f) << "at element " << i << " / " << batch_size(); EXPECT_EQ(uint32_t(y_ref[i]), uint32_t(y[i])) << "at element " << i << " / " << batch_size(); } } } void Test(xnn_qs8_vadd_minmax_ukernel_fn vadd_minmax, xnn_init_qs8_add_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i8rng = std::bind( std::uniform_int_distribution<int32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), rng); std::vector<int8_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(int8_t) : 0)); std::vector<float> y_fp(batch_size()); std::vector<int8_t> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), std::ref(i8rng)); std::generate(b.begin(), b.end(), std::ref(i8rng)); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), std::ref(i8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const int8_t* a_data = inplace_a() ? y.data() : a.data(); const int8_t* b_data = inplace_b() ? y.data() : b.data(); // Prepare parameters. xnn_qs8_add_minmax_params quantization_params; init_params( &quantization_params, int8_t(a_zero_point() - 0x80), int8_t(b_zero_point() - 0x80), int8_t(y_zero_point() - 0x80), a_scale() / y_scale(), b_scale() / y_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80)); xnn_qs8_add_minmax_params scalar_quantization_params; xnn_init_qs8_add_minmax_scalar_params( &scalar_quantization_params, int8_t(a_zero_point() - 0x80), int8_t(b_zero_point() - 0x80), int8_t(y_zero_point() - 0x80), a_scale() / y_scale(), b_scale() / y_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { y_fp[i] = float(int32_t(y_zero_point() - 0x80)) + float(int32_t(a_data[i]) - int32_t(a_zero_point() - 0x80)) * (a_scale() / y_scale()) + float(int32_t(b_data[i]) - int32_t(b_zero_point() - 0x80)) * (b_scale() / y_scale()); y_fp[i] = std::min<float>(y_fp[i], float(int32_t(qmax() - 0x80))); y_fp[i] = std::max<float>(y_fp[i], float(int32_t(qmin() - 0x80))); y_ref[i] = xnn_qs8_quantize_add(a_data[i], b_data[i], scalar_quantization_params); } // Call optimized micro-kernel. vadd_minmax(batch_size(), a_data, b_data, y.data(), &quantization_params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_LE(int32_t(y[i]), int32_t(qmax() - 0x80)) << "at element " << i << " / " << batch_size(); EXPECT_GE(int32_t(y[i]), int32_t(qmin() - 0x80)) << "at element " << i << " / " << batch_size(); EXPECT_EQ(int32_t(y_ref[i]), int32_t(y[i])) << "at element " << i << " / " << batch_size(); EXPECT_NEAR(float(int32_t(y[i])), y_fp[i], 0.6f) << "at element " << i << " / " << batch_size(); } } } private: size_t batch_size_{1}; bool inplace_a_{false}; bool inplace_b_{false}; float a_scale_{0.75f}; float b_scale_{1.25f}; float y_scale_{0.96875f}; uint8_t a_zero_point_{121}; uint8_t b_zero_point_{127}; uint8_t y_zero_point_{133}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
9,407
32.126761
123
h
XNNPACK
XNNPACK-master/test/vaddc-microkernel-tester.h
// Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <limits> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> #include <xnnpack/requantization.h> class VAddCMicrokernelTester { public: inline VAddCMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline VAddCMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline VAddCMicrokernelTester& a_scale(float a_scale) { assert(a_scale > 0.0f); assert(std::isnormal(a_scale)); this->a_scale_ = a_scale; return *this; } inline float a_scale() const { return this->a_scale_; } inline VAddCMicrokernelTester& a_zero_point(uint8_t a_zero_point) { this->a_zero_point_ = a_zero_point; return *this; } inline uint8_t a_zero_point() const { return this->a_zero_point_; } inline VAddCMicrokernelTester& b_scale(float b_scale) { assert(b_scale > 0.0f); assert(std::isnormal(b_scale)); this->b_scale_ = b_scale; return *this; } inline float b_scale() const { return this->b_scale_; } inline VAddCMicrokernelTester& b_zero_point(uint8_t b_zero_point) { this->b_zero_point_ = b_zero_point; return *this; } inline uint8_t b_zero_point() const { return this->b_zero_point_; } inline VAddCMicrokernelTester& y_scale(float y_scale) { assert(y_scale > 0.0f); assert(std::isnormal(y_scale)); this->y_scale_ = y_scale; return *this; } inline float y_scale() const { return this->y_scale_; } inline VAddCMicrokernelTester& y_zero_point(uint8_t y_zero_point) { this->y_zero_point_ = y_zero_point; return *this; } inline uint8_t y_zero_point() const { return this->y_zero_point_; } inline VAddCMicrokernelTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline VAddCMicrokernelTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline VAddCMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_qu8_vadd_minmax_ukernel_fn vaddc_minmax, xnn_init_qu8_add_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); std::vector<uint8_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint8_t)); std::vector<uint8_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint8_t) : 0)); std::vector<float> y_fp(batch_size()); std::vector<uint8_t> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), std::ref(u8rng)); if (inplace()) { std::generate(y.begin(), y.end(), std::ref(u8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const uint8_t* a_data = inplace() ? y.data() : a.data(); const uint8_t b = u8rng(); // Prepare parameters. xnn_qu8_add_minmax_params quantization_params; init_params( &quantization_params, a_zero_point(), b_zero_point(), y_zero_point(), a_scale() / y_scale(), b_scale() / y_scale(), qmin(), qmax()); xnn_qu8_add_minmax_params scalar_quantization_params; xnn_init_qu8_add_minmax_scalar_params( &scalar_quantization_params, a_zero_point(), b_zero_point(), y_zero_point(), a_scale() / y_scale(), b_scale() / y_scale(), qmin(), qmax()); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { y_fp[i] = float(y_zero_point()) + float(int32_t(a_data[i]) - int32_t(a_zero_point())) * (a_scale() / y_scale()) + float(int32_t(b) - int32_t(b_zero_point())) * (b_scale() / y_scale()); y_fp[i] = std::min<float>(y_fp[i], float(qmax())); y_fp[i] = std::max<float>(y_fp[i], float(qmin())); y_ref[i] = xnn_qu8_quantize_add(a_data[i], b, scalar_quantization_params); } // Call optimized micro-kernel. vaddc_minmax(batch_size(), a_data, &b, y.data(), &quantization_params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_LE(uint32_t(y[i]), uint32_t(qmax())) << "at element " << i << " / " << batch_size(); EXPECT_GE(uint32_t(y[i]), uint32_t(qmin())) << "at element " << i << " / " << batch_size(); EXPECT_NEAR(float(int32_t(y[i])), y_fp[i], 0.6f) << "at element " << i << " / " << batch_size(); EXPECT_EQ(uint32_t(y_ref[i]), uint32_t(y[i])) << "at element " << i << " / " << batch_size(); } } } void Test(xnn_qs8_vadd_minmax_ukernel_fn vaddc_minmax, xnn_init_qs8_add_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); auto i8rng = std::bind( std::uniform_int_distribution<int32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), rng); std::vector<int8_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(int8_t) : 0)); std::vector<float> y_fp(batch_size()); std::vector<int8_t> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), std::ref(i8rng)); if (inplace()) { std::generate(y.begin(), y.end(), std::ref(i8rng)); } else { std::fill(y.begin(), y.end(), 0xA5); } const int8_t* a_data = inplace() ? y.data() : a.data(); const int8_t b = i8rng(); // Prepare parameters. xnn_qs8_add_minmax_params quantization_params; init_params( &quantization_params, int8_t(a_zero_point() - 0x80), int8_t(b_zero_point() - 0x80), int8_t(y_zero_point() - 0x80), a_scale() / y_scale(), b_scale() / y_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80)); xnn_qs8_add_minmax_params scalar_quantization_params; xnn_init_qs8_add_minmax_scalar_params( &scalar_quantization_params, int8_t(a_zero_point() - 0x80), int8_t(b_zero_point() - 0x80), int8_t(y_zero_point() - 0x80), a_scale() / y_scale(), b_scale() / y_scale(), int8_t(qmin() - 0x80), int8_t(qmax() - 0x80)); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { y_fp[i] = float(int32_t(y_zero_point() - 0x80)) + float(int32_t(a_data[i]) - int32_t(a_zero_point() - 0x80)) * (a_scale() / y_scale()) + float(int32_t(b) - int32_t(b_zero_point() - 0x80)) * (b_scale() / y_scale()); y_fp[i] = std::min<float>(y_fp[i], float(int32_t(qmax() - 0x80))); y_fp[i] = std::max<float>(y_fp[i], float(int32_t(qmin() - 0x80))); y_ref[i] = xnn_qs8_quantize_add(a_data[i], b, scalar_quantization_params); } // Call optimized micro-kernel. vaddc_minmax(batch_size(), a_data, &b, y.data(), &quantization_params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_LE(int32_t(y[i]), int32_t(qmax() - 0x80)) << "at element " << i << " / " << batch_size(); EXPECT_GE(int32_t(y[i]), int32_t(qmin() - 0x80)) << "at element " << i << " / " << batch_size(); EXPECT_EQ(int32_t(y_ref[i]), int32_t(y[i])) << "at element " << i << " / " << batch_size(); EXPECT_NEAR(float(int32_t(y[i])), y_fp[i], 0.6f) << "at element " << i << " / " << batch_size(); } } } private: size_t batch_size_{1}; bool inplace_{false}; float a_scale_{0.75f}; float b_scale_{1.25f}; float y_scale_{0.96875f}; uint8_t a_zero_point_{121}; uint8_t b_zero_point_{127}; uint8_t y_zero_point_{133}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
8,752
31.418519
123
h
XNNPACK
XNNPACK-master/test/vbinary-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class VBinaryMicrokernelTester { public: enum class OpType { Add, Div, Max, Min, Mul, Sub, SqrDiff, }; inline VBinaryMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline VBinaryMicrokernelTester& inplace_a(bool inplace_a) { this->inplace_a_ = inplace_a; return *this; } inline bool inplace_a() const { return this->inplace_a_; } inline VBinaryMicrokernelTester& inplace_b(bool inplace_b) { this->inplace_b_ = inplace_b; return *this; } inline bool inplace_b() const { return this->inplace_b_; } inline VBinaryMicrokernelTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline VBinaryMicrokernelTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline VBinaryMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_vbinary_ukernel_fn vbinary, OpType op_type) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::generate(b.begin(), b.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); } else { std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); } const uint16_t* a_data = inplace_a() ? y.data() : a.data(); const uint16_t* b_data = inplace_b() ? y.data() : b.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::Add: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::Div: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::Max: y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i])); break; case OpType::Min: y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i])); break; case OpType::Mul: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::SqrDiff: { const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]); y_ref[i] = diff * diff; break; } case OpType::Sub: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]); break; } } // Call optimized micro-kernel. vbinary(batch_size() * sizeof(uint16_t), a_data, b_data, y.data(), nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::max(1.0e-4f, std::abs(y_ref[i]) * 1.0e-2f)) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f16_vbinary_minmax_ukernel_fn vbinary_minmax, OpType op_type, xnn_init_f16_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); std::vector<uint16_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); std::generate(b.begin(), b.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); } else { std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); } const uint16_t* a_data = inplace_a() ? y.data() : a.data(); const uint16_t* b_data = inplace_b() ? y.data() : b.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::Add: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::Div: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::Max: y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i])); break; case OpType::Min: y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i])); break; case OpType::Mul: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]); break; case OpType::SqrDiff: { const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]); y_ref[i] = diff * diff; break; } case OpType::Sub: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]); break; } } const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_range = accumulated_max - accumulated_min; const float y_max = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ? (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) : +std::numeric_limits<float>::infinity())); const float y_min = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ? (accumulated_min + accumulated_range / 255.0f * float(qmin())) : -std::numeric_limits<float>::infinity())); for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min); } // Prepare parameters. xnn_f16_minmax_params params; init_params(&params, fp16_ieee_from_fp32_value(y_min), fp16_ieee_from_fp32_value(y_max)); // Call optimized micro-kernel. vbinary_minmax(batch_size() * sizeof(uint16_t), a_data, b_data, y.data(), &params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::max(1.0e-4f, std::abs(y_ref[i]) * 1.0e-2f)) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_ukernel_fn vbinary, OpType op_type, xnn_init_f32_default_params_fn init_params = nullptr) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> b(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return f32dist(rng); }); std::generate(b.begin(), b.end(), [&]() { return f32dist(rng); }); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace_a() ? y.data() : a.data(); const float* b_data = inplace_b() ? y.data() : b.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::Add: y_ref[i] = a_data[i] + b_data[i]; break; case OpType::Div: y_ref[i] = a_data[i] / b_data[i]; break; case OpType::Max: y_ref[i] = std::max<float>(a_data[i], b_data[i]); break; case OpType::Min: y_ref[i] = std::min<float>(a_data[i], b_data[i]); break; case OpType::Mul: y_ref[i] = a_data[i] * b_data[i]; break; case OpType::SqrDiff: { const float diff = a_data[i] - b_data[i]; y_ref[i] = diff * diff; break; } case OpType::Sub: y_ref[i] = a_data[i] - b_data[i]; break; } } // Prepare parameters. xnn_f32_default_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. vbinary(batch_size() * sizeof(float), a_data, b_data, y.data(), init_params != nullptr ? &params : nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_relu_ukernel_fn vbinary_relu, OpType op_type) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> lhs_f32dist(-1.0f, 1.0f); // For denominator, avoid 0 so we don't get Infinity as the result. std::uniform_real_distribution<float> rhs_f32dist(0.1, 1.0f); std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> b(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return lhs_f32dist(rng); }); std::generate(b.begin(), b.end(), [&]() { return rhs_f32dist(rng); }); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), [&]() { return lhs_f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace_a() ? y.data() : a.data(); const float* b_data = inplace_b() ? y.data() : b.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::Add: y_ref[i] = a_data[i] + b_data[i]; break; case OpType::Div: y_ref[i] = a_data[i] / b_data[i]; break; case OpType::Max: y_ref[i] = std::max<float>(a_data[i], b_data[i]); break; case OpType::Min: y_ref[i] = std::min<float>(a_data[i], b_data[i]); break; case OpType::Mul: y_ref[i] = a_data[i] * b_data[i]; break; case OpType::SqrDiff: { const float diff = a_data[i] - b_data[i]; y_ref[i] = diff * diff; break; } case OpType::Sub: y_ref[i] = a_data[i] - b_data[i]; break; } } for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max(y_ref[i], 0.0f); } // Call optimized micro-kernel. vbinary_relu(batch_size() * sizeof(float), a_data, b_data, y.data(), nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_GE(y[i], 0.0f) << "at " << i << " / " << batch_size(); EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_minmax_ukernel_fn vbinary_minmax, OpType op_type, xnn_init_f32_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> b(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); std::vector<float> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return f32dist(rng); }); std::generate(b.begin(), b.end(), [&]() { return f32dist(rng); }); if (inplace_a() || inplace_b()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace_a() ? y.data() : a.data(); const float* b_data = inplace_b() ? y.data() : b.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::Add: y_ref[i] = a_data[i] + b_data[i]; break; case OpType::Div: y_ref[i] = a_data[i] / b_data[i]; break; case OpType::Max: y_ref[i] = std::max<float>(a_data[i], b_data[i]); break; case OpType::Min: y_ref[i] = std::min<float>(a_data[i], b_data[i]); break; case OpType::Mul: y_ref[i] = a_data[i] * b_data[i]; break; case OpType::SqrDiff: { const float diff = a_data[i] - b_data[i]; y_ref[i] = diff * diff; break; } case OpType::Sub: y_ref[i] = a_data[i] - b_data[i]; break; } } const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_range = accumulated_max - accumulated_min; const float y_max = accumulated_range > 0.0f ? (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) : +std::numeric_limits<float>::infinity(); const float y_min = accumulated_range > 0.0f ? (accumulated_min + accumulated_range / 255.0f * float(qmin())) : -std::numeric_limits<float>::infinity(); for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min); } // Prepare parameters. xnn_f32_minmax_params params; init_params(&params, y_min, y_max); // Call optimized micro-kernel. vbinary_minmax(batch_size() * sizeof(float), a_data, b_data, y.data(), &params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } private: size_t batch_size_{1}; bool inplace_a_{false}; bool inplace_b_{false}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
16,834
36.245575
128
h
XNNPACK
XNNPACK-master/test/vbinaryc-microkernel-tester.h
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cstddef> #include <cstdlib> #include <functional> #include <random> #include <vector> #include <fp16/fp16.h> #include <xnnpack.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class VBinaryCMicrokernelTester { public: enum class OpType { AddC, DivC, RDivC, MaxC, MinC, MulC, SqrDiffC, SubC, RSubC, }; inline VBinaryCMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline VBinaryCMicrokernelTester& inplace(bool inplace) { this->inplace_ = inplace; return *this; } inline bool inplace() const { return this->inplace_; } inline VBinaryCMicrokernelTester& qmin(uint8_t qmin) { this->qmin_ = qmin; return *this; } inline uint8_t qmin() const { return this->qmin_; } inline VBinaryCMicrokernelTester& qmax(uint8_t qmax) { this->qmax_ = qmax; return *this; } inline uint8_t qmax() const { return this->qmax_; } inline VBinaryCMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_f16_vbinary_ukernel_fn vbinaryc, OpType op_type) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); const uint16_t b = fp16_ieee_from_fp32_value(f32dist(rng)); std::vector<uint16_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); } else { std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); } const uint16_t* a_data = inplace() ? y.data() : a.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::AddC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b); break; case OpType::DivC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b); break; case OpType::RDivC: y_ref[i] = fp16_ieee_to_fp32_value(b) / fp16_ieee_to_fp32_value(a_data[i]); break; case OpType::MaxC: y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b)); break; case OpType::MinC: y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b)); break; case OpType::MulC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b); break; case OpType::SqrDiffC: { const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b); y_ref[i] = diff * diff; break; } case OpType::SubC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b); break; case OpType::RSubC: y_ref[i] = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]); break; } } // Call optimized micro-kernel. vbinaryc(batch_size() * sizeof(uint16_t), a_data, &b, y.data(), nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::max(1.0e-4f, std::abs(y_ref[i]) * 1.0e-2f)) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f16_vbinary_minmax_ukernel_fn vbinaryc_minmax, OpType op_type, xnn_init_f16_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t)); const uint16_t b = fp16_ieee_from_fp32_value(f32dist(rng)); std::vector<uint16_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); }); } else { std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */); } const uint16_t* a_data = inplace() ? y.data() : a.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::AddC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b); break; case OpType::DivC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b); break; case OpType::RDivC: y_ref[i] = fp16_ieee_to_fp32_value(b) / fp16_ieee_to_fp32_value(a_data[i]); break; case OpType::MaxC: y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b)); break; case OpType::MinC: y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b)); break; case OpType::MulC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b); break; case OpType::SqrDiffC: { const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b); y_ref[i] = diff * diff; break; } case OpType::SubC: y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b); break; case OpType::RSubC: y_ref[i] = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]); break; } } const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_range = accumulated_max - accumulated_min; const float y_max = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ? (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) : +std::numeric_limits<float>::infinity())); const float y_min = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ? (accumulated_min + accumulated_range / 255.0f * float(qmin())) : -std::numeric_limits<float>::infinity())); for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min); } // Prepare parameters. xnn_f16_minmax_params params; init_params(&params, fp16_ieee_from_fp32_value(y_min), fp16_ieee_from_fp32_value(y_max)); // Call optimized micro-kernel. vbinaryc_minmax(batch_size() * sizeof(uint16_t), a_data, &b, y.data(), &params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::max(1.0e-4f, std::abs(y_ref[i]) * 1.0e-2f)) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_ukernel_fn vbinaryc, OpType op_type, xnn_init_f32_default_params_fn init_params = nullptr) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(0.01f, 1.0f); std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); const float b = f32dist(rng); std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return f32dist(rng); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace() ? y.data() : a.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::AddC: y_ref[i] = a_data[i] + b; break; case OpType::DivC: y_ref[i] = a_data[i] / b; break; case OpType::RDivC: y_ref[i] = b / a_data[i]; break; case OpType::MaxC: y_ref[i] = std::max<float>(a_data[i], b); break; case OpType::MinC: y_ref[i] = std::min<float>(a_data[i], b); break; case OpType::MulC: y_ref[i] = a_data[i] * b; break; case OpType::SqrDiffC: { const float diff = a_data[i] - b; y_ref[i] = diff * diff; break; } case OpType::SubC: y_ref[i] = a_data[i] - b; break; case OpType::RSubC: y_ref[i] = b - a_data[i]; break; } } // Prepare parameters. xnn_f32_default_params params; if (init_params != nullptr) { init_params(&params); } // Call optimized micro-kernel. vbinaryc(batch_size() * sizeof(float), a_data, &b, y.data(), init_params != nullptr ? &params : nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_relu_ukernel_fn vbinaryc_relu, OpType op_type) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); const float b = f32dist(rng); std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return f32dist(rng); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace() ? y.data() : a.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::AddC: y_ref[i] = a_data[i] + b; break; case OpType::DivC: y_ref[i] = a_data[i] / b; break; case OpType::RDivC: y_ref[i] = b / a_data[i]; break; case OpType::MaxC: y_ref[i] = std::max<float>(a_data[i], b); break; case OpType::MinC: y_ref[i] = std::min<float>(a_data[i], b); break; case OpType::MulC: y_ref[i] = a_data[i] * b; break; case OpType::SqrDiffC: { const float diff = a_data[i] - b; y_ref[i] = diff * diff; break; } case OpType::SubC: y_ref[i] = a_data[i] - b; break; case OpType::RSubC: y_ref[i] = b - a_data[i]; break; } } for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max(y_ref[i], 0.0f); } // Call optimized micro-kernel. vbinaryc_relu(batch_size() * sizeof(float), a_data, &b, y.data(), nullptr); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_GE(y[i], 0.0f) << "at " << i << " / " << batch_size(); EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } void Test(xnn_f32_vbinary_minmax_ukernel_fn vbinaryc_minmax, OpType op_type, xnn_init_f32_minmax_params_fn init_params) const { std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_real_distribution<float> f32dist; std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float)); const float b = f32dist(rng); std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0)); std::vector<float> y_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(a.begin(), a.end(), [&]() { return f32dist(rng); }); if (inplace()) { std::generate(y.begin(), y.end(), [&]() { return f32dist(rng); }); } else { std::fill(y.begin(), y.end(), nanf("")); } const float* a_data = inplace() ? y.data() : a.data(); // Compute reference results. for (size_t i = 0; i < batch_size(); i++) { switch (op_type) { case OpType::AddC: y_ref[i] = a_data[i] + b; break; case OpType::DivC: y_ref[i] = a_data[i] / b; break; case OpType::RDivC: y_ref[i] = b / a_data[i]; break; case OpType::MaxC: y_ref[i] = std::max<float>(a_data[i], b); break; case OpType::MinC: y_ref[i] = std::min<float>(a_data[i], b); break; case OpType::MulC: y_ref[i] = a_data[i] * b; break; case OpType::SqrDiffC: { const float diff = a_data[i] - b; y_ref[i] = diff * diff; break; } case OpType::SubC: y_ref[i] = a_data[i] - b; break; case OpType::RSubC: y_ref[i] = b - a_data[i]; break; } } const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend()); const float accumulated_range = accumulated_max - accumulated_min; const float y_max = accumulated_range > 0.0f ? (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) : +std::numeric_limits<float>::infinity(); const float y_min = accumulated_range > 0.0f ? (accumulated_min + accumulated_range / 255.0f * float(qmin())) : -std::numeric_limits<float>::infinity(); for (size_t i = 0; i < batch_size(); i++) { y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min); } // Prepare parameters. xnn_f32_minmax_params params; init_params(&params, y_min, y_max); // Call optimized micro-kernel. vbinaryc_minmax(batch_size() * sizeof(float), a_data, &b, y.data(), &params); // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f) << "at " << i << " / " << batch_size(); } } } private: size_t batch_size_{1}; bool inplace_{false}; uint8_t qmin_{0}; uint8_t qmax_{255}; size_t iterations_{15}; };
16,215
34.252174
129
h
XNNPACK
XNNPACK-master/test/vhswish-microkernel-tester.h
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include <gtest/gtest.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <stdio.h> #include <cstdlib> #include <functional> #include <limits> #include <random> #include <vector> #include <xnnpack.h> #include <xnnpack/math.h> #include <xnnpack/microfnptr.h> #include <xnnpack/microparams-init.h> class VHSwishMicrokernelTester { public: inline VHSwishMicrokernelTester& batch_size(size_t batch_size) { assert(batch_size != 0); this->batch_size_ = batch_size; return *this; } inline size_t batch_size() const { return this->batch_size_; } inline VHSwishMicrokernelTester& input_scale(float input_scale) { assert(input_scale > 0.0f); assert(std::isnormal(input_scale)); this->input_scale_ = input_scale; return *this; } inline float input_scale() const { return this->input_scale_; } inline VHSwishMicrokernelTester& input_zero_point(int16_t input_zero_point) { this->input_zero_point_ = input_zero_point; return *this; } inline int16_t input_zero_point() const { return this->input_zero_point_; } inline VHSwishMicrokernelTester& output_scale(float output_scale) { assert(output_scale > 0.0f); assert(std::isnormal(output_scale)); this->output_scale_ = output_scale; return *this; } inline float output_scale() const { return this->output_scale_; } inline VHSwishMicrokernelTester& output_zero_point(int16_t output_zero_point) { this->output_zero_point_ = output_zero_point; return *this; } inline int16_t output_zero_point() const { return this->output_zero_point_; } inline VHSwishMicrokernelTester& iterations(size_t iterations) { this->iterations_ = iterations; return *this; } inline size_t iterations() const { return this->iterations_; } void Test(xnn_qs8_vhswish_ukernel_fn vhswish, xnn_init_qs8_hswish_params_fn init_params) const { ASSERT_GE(input_zero_point(), std::numeric_limits<int8_t>::min()); ASSERT_LE(input_zero_point(), std::numeric_limits<int8_t>::max()); ASSERT_GE(output_zero_point(), std::numeric_limits<int8_t>::min()); ASSERT_LE(output_zero_point(), std::numeric_limits<int8_t>::max()); std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> i8dist( std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()); std::vector<int8_t> input(batch_size() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<int8_t> output(batch_size()); std::vector<int8_t> output_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); std::fill(output.begin(), output.end(), INT8_C(0xA5)); for (int i = 0; i < batch_size(); i++) { input[i] = i; } union xnn_qs8_hswish_params params; init_params(&params, input_zero_point(), output_zero_point(), input_scale(), output_scale()); // Call optimized micro-kernel. vhswish(batch_size() * sizeof(int8_t), input.data(), output.data(), &params); // Compute reference results const int32_t input_scale_div = (int32_t) lrintf(256.0f * input_scale() / 6.0f); const int32_t scale_ratio = (int32_t) lrintf(256.0f * input_scale() / output_scale()); for (size_t i = 0; i < batch_size(); i++) { const int32_t input_value = int32_t(uint32_t(input_zero_point() - input[i]) << 7); int32_t in = input_value * input_scale_div; in -= 16384; // subtract 0.5 in Q15 in = std::min(in, 0); in = std::max(in, -32768); const int32_t out = math_asr_s32(input_value * scale_ratio, 15); int32_t output_value = math_asr_s32(in * out, 15) + output_zero_point(); output_value = std::min<int32_t>(output_value, std::numeric_limits<int8_t>::max()); output_value = std::max<int32_t>(output_value, std::numeric_limits<int8_t>::min()); output_ref[i] = static_cast<int8_t>(output_value); } // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_EQ(int32_t(output[i]), int32_t(output_ref[i])) << "at " << i << " / " << batch_size() << ", x[" << i << "] = " << int32_t(input[i]); } } } void Test(xnn_qu8_vhswish_ukernel_fn vhswish, xnn_init_qu8_hswish_params_fn init_params) const { ASSERT_GE(input_zero_point(), std::numeric_limits<uint8_t>::min()); ASSERT_LE(input_zero_point(), std::numeric_limits<uint8_t>::max()); ASSERT_GE(output_zero_point(), std::numeric_limits<uint8_t>::min()); ASSERT_LE(output_zero_point(), std::numeric_limits<uint8_t>::max()); std::random_device random_device; auto rng = std::mt19937(random_device()); std::uniform_int_distribution<int32_t> i8dist( std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max()); std::vector<uint8_t> input(batch_size() + XNN_EXTRA_BYTES / sizeof(int8_t)); std::vector<uint8_t> output(batch_size()); std::vector<uint8_t> output_ref(batch_size()); for (size_t iteration = 0; iteration < iterations(); iteration++) { std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); std::fill(output.begin(), output.end(), INT8_C(0xA5)); for (int i = 0; i < batch_size(); i++) { input[i] = i; } union xnn_qu8_hswish_params params; init_params(&params, input_zero_point(), output_zero_point(), input_scale(), output_scale()); // Call optimized micro-kernel. vhswish(batch_size() * sizeof(uint8_t), input.data(), output.data(), &params); // Compute reference results const int32_t input_scale_div = (int32_t) lrintf(256.0f * input_scale() / 6.0f); const int32_t scale_ratio = (int32_t) lrintf(256.0f * input_scale() / output_scale()); for (size_t i = 0; i < batch_size(); i++) { const int32_t input_value = int32_t(uint32_t(input_zero_point() - input[i]) << 7); int32_t in = input_value * input_scale_div; in -= 16384; // subtract 0.5 in Q15 in = std::min(in, 0); in = std::max(in, -32768); const int32_t out = math_asr_s32(input_value * scale_ratio, 15); int32_t output_value = math_asr_s32(in * out, 15) + output_zero_point(); output_value = std::min<int32_t>(output_value, std::numeric_limits<uint8_t>::max()); output_value = std::max<int32_t>(output_value, std::numeric_limits<uint8_t>::min()); output_ref[i] = static_cast<uint8_t>(output_value); } // Verify results. for (size_t i = 0; i < batch_size(); i++) { EXPECT_EQ(int32_t(output[i]), int32_t(output_ref[i])) << "at " << i << " / " << batch_size() << ", x[" << i << "] = " << int32_t(input[i]); } } } private: float input_scale_ = 128.0f; float output_scale_ = 128.0f; int16_t input_zero_point_ = 1; int16_t output_zero_point_ = 5; size_t batch_size_ = 1; size_t iterations_ = 15; };
7,263
35.686869
99
h