repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-relu-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_5x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); k -= sizeof(float); } while (k != 0); } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
12,037
40.653979
82
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-relu-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); k -= sizeof(float); } while (k != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
6,390
31.607143
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-relu-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); k -= sizeof(float); } while (k != 0); } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,876
40.096886
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); k -= sizeof(float); } while (k != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,857
30.664865
78
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); k -= sizeof(float); } while (k != 0); } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,463
40.23741
82
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); k -= sizeof(float); } while (k != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,816
30.443243
76
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); k -= sizeof(float); } while (k != 0); } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,302
39.658273
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-minmax-sse.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-shuffle.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_5x8s4__sse( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { __m128 vacc0x0123 = _mm_load_ps(w + 0); __m128 vacc0x4567 = _mm_load_ps(w + 4); __m128 vacc1x0123 = vacc0x0123; __m128 vacc1x4567 = vacc0x4567; __m128 vacc2x0123 = vacc0x0123; __m128 vacc2x4567 = vacc0x4567; __m128 vacc3x0123 = vacc0x0123; __m128 vacc3x4567 = vacc0x4567; __m128 vacc4x0123 = vacc0x0123; __m128 vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { __m128 va0 = _mm_loadu_ps(a0); a0 += 4; __m128 va1 = _mm_loadu_ps(a1); a1 += 4; __m128 va2 = _mm_loadu_ps(a2); a2 += 4; __m128 va3 = _mm_loadu_ps(a3); a3 += 4; __m128 va4 = _mm_loadu_ps(a4); a4 += 4; const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { __m128 va0 = _mm_loadu_ps(a0); a0 = (const float*) ((uintptr_t) a0 + k); __m128 va1 = _mm_loadu_ps(a1); a1 = (const float*) ((uintptr_t) a1 + k); __m128 va2 = _mm_loadu_ps(a2); a2 = (const float*) ((uintptr_t) a2 + k); __m128 va3 = _mm_loadu_ps(a3); a3 = (const float*) ((uintptr_t) a3 + k); __m128 va4 = _mm_loadu_ps(a4); a4 = (const float*) ((uintptr_t) a4 + k); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1)); va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3)); vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3)); vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3)); vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3)); vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3)); w += 32; } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc3x0123 = _mm_min_ps(vacc3x0123, vmax); vacc4x0123 = _mm_min_ps(vacc4x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); vacc3x4567 = _mm_min_ps(vacc3x4567, vmax); vacc4x4567 = _mm_min_ps(vacc4x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc3x0123 = _mm_max_ps(vacc3x0123, vmin); vacc4x0123 = _mm_max_ps(vacc4x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); vacc3x4567 = _mm_max_ps(vacc3x4567, vmin); vacc4x4567 = _mm_max_ps(vacc4x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
16,843
46.852273
126
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-minmax-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
17,948
48.997214
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-minmax-wasmrelaxedsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
17,704
48.317549
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-minmax-wasmsimd-arm.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_arm( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); w += 32; } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
17,341
47.306407
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-minmax-wasmsimd-x86.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_x86( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); w += 32; } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
17,361
47.362117
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-relu-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); w += 32; } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
16,999
47.991354
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-relu-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_5x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); w += 32; } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
16,748
47.268012
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); w += 32; } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
16,425
47.886905
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8s4-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_5x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); w += 32; } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
16,174
47.139881
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w); __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF; w += 16; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); const __m512 va1 = _mm512_set1_ps(*a1); vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF); const __m512 va2 = _mm512_set1_ps(*a2); vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF); const __m512 va3 = _mm512_set1_ps(*a3); vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF); const __m512 va4 = _mm512_set1_ps(*a4); vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF); const __m512 va5 = _mm512_set1_ps(*a5); vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF); a0 += 1; a1 += 1; a2 += 1; a3 += 1; a4 += 1; a5 += 1; k -= sizeof(float); } while (k != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF); _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
5,942
35.237805
106
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x2-minmax-aarch64-neonfma-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/MRx2-neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x2__aarch64_neonfma_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x2_t vacc0x01 = vld1_f32(w); w += 2; float32x2_t vacc1x01 = vacc0x01; float32x2_t vacc2x01 = vacc0x01; float32x2_t vacc3x01 = vacc0x01; float32x2_t vacc4x01 = vacc0x01; float32x2_t vacc5x01 = vacc0x01; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb01c01 = vld1q_f32(w); w += 4; const float32x2_t vb01c0 = vget_low_f32(vb01c01); const float32x2_t vb01c1 = vget_high_f32(vb01c01); #if XNN_ARCH_ARM64 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0); vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0); vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0); vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0); vacc4x01 = vfma_lane_f32(vacc4x01, vb01c0, va4, 0); vacc5x01 = vfma_lane_f32(vacc5x01, vb01c0, va5, 0); #else const float32x2_t va0c0 = vdup_lane_f32(va0, 0); const float32x2_t va1c0 = vdup_lane_f32(va1, 0); const float32x2_t va2c0 = vdup_lane_f32(va2, 0); const float32x2_t va3c0 = vdup_lane_f32(va3, 0); const float32x2_t va4c0 = vdup_lane_f32(va4, 0); const float32x2_t va5c0 = vdup_lane_f32(va5, 0); vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0); vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0); vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0); vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0); vacc4x01 = vfma_f32(vacc4x01, va4c0, vb01c0); vacc5x01 = vfma_f32(vacc5x01, va5c0, vb01c0); #endif #if XNN_ARCH_ARM64 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1); vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1); vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1); vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1); vacc4x01 = vfma_lane_f32(vacc4x01, vb01c1, va4, 1); vacc5x01 = vfma_lane_f32(vacc5x01, vb01c1, va5, 1); #else const float32x2_t va0c1 = vdup_lane_f32(va0, 1); const float32x2_t va1c1 = vdup_lane_f32(va1, 1); const float32x2_t va2c1 = vdup_lane_f32(va2, 1); const float32x2_t va3c1 = vdup_lane_f32(va3, 1); const float32x2_t va4c1 = vdup_lane_f32(va4, 1); const float32x2_t va5c1 = vdup_lane_f32(va5, 1); vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1); vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1); vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1); vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1); vacc4x01 = vfma_f32(vacc4x01, va4c1, vb01c1); vacc5x01 = vfma_f32(vacc5x01, va5c1, vb01c1); #endif } if XNN_UNLIKELY(k != 0) { const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1; const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1; const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1; const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1; const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1; const float32x2_t va5 = vld1_dup_f32(a5); a5 += 1; const float32x2_t vb01 = vld1_f32(w); w += 2; vacc0x01 = vfma_f32(vacc0x01, va0, vb01); vacc1x01 = vfma_f32(vacc1x01, va1, vb01); vacc2x01 = vfma_f32(vacc2x01, va2, vb01); vacc3x01 = vfma_f32(vacc3x01, va3, vb01); vacc4x01 = vfma_f32(vacc4x01, va4, vb01); vacc5x01 = vfma_f32(vacc5x01, va5, vb01); } const float32x2_t vmax = vld1_dup_f32(&params->scalar.max); vacc0x01 = vmin_f32(vacc0x01, vmax); vacc1x01 = vmin_f32(vacc1x01, vmax); vacc2x01 = vmin_f32(vacc2x01, vmax); vacc3x01 = vmin_f32(vacc3x01, vmax); vacc4x01 = vmin_f32(vacc4x01, vmax); vacc5x01 = vmin_f32(vacc5x01, vmax); const float32x2_t vmin = vld1_dup_f32(&params->scalar.min); vacc0x01 = vmax_f32(vacc0x01, vmin); vacc1x01 = vmax_f32(vacc1x01, vmin); vacc2x01 = vmax_f32(vacc2x01, vmin); vacc3x01 = vmax_f32(vacc3x01, vmin); vacc4x01 = vmax_f32(vacc4x01, vmin); vacc5x01 = vmax_f32(vacc5x01, vmin); if XNN_LIKELY(nc >= 2) { vst1_f32(c0, vacc0x01); c0 = (float*) ((uintptr_t) c0 + cn_stride); vst1_f32(c1, vacc1x01); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1_f32(c2, vacc2x01); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1_f32(c3, vacc3x01); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1_f32(c4, vacc4x01); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1_f32(c5, vacc5x01); c5 = (float*) ((uintptr_t) c5 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); nc -= 2; } else { assert(nc == 1); vst1_lane_f32(c0, vacc0x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c5, vacc5x01, 0); nc = 0; } } while (nc != 0); }
7,271
34.82266
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x2-minmax-neon-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/MRx2-neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x2_t vacc0x01 = vld1_f32(w); w += 2; float32x2_t vacc1x01 = vacc0x01; float32x2_t vacc2x01 = vacc0x01; float32x2_t vacc3x01 = vacc0x01; float32x2_t vacc4x01 = vacc0x01; float32x2_t vacc5x01 = vacc0x01; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb01c01 = vld1q_f32(w); w += 4; const float32x2_t vb01c0 = vget_low_f32(vb01c01); const float32x2_t vb01c1 = vget_high_f32(vb01c01); vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0); vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0); vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0); vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0); vacc4x01 = vmla_lane_f32(vacc4x01, vb01c0, va4, 0); vacc5x01 = vmla_lane_f32(vacc5x01, vb01c0, va5, 0); vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1); vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1); vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1); vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1); vacc4x01 = vmla_lane_f32(vacc4x01, vb01c1, va4, 1); vacc5x01 = vmla_lane_f32(vacc5x01, vb01c1, va5, 1); } if XNN_UNLIKELY(k != 0) { const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1; const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1; const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1; const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1; const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1; const float32x2_t va5 = vld1_dup_f32(a5); a5 += 1; const float32x2_t vb01 = vld1_f32(w); w += 2; vacc0x01 = vmla_f32(vacc0x01, va0, vb01); vacc1x01 = vmla_f32(vacc1x01, va1, vb01); vacc2x01 = vmla_f32(vacc2x01, va2, vb01); vacc3x01 = vmla_f32(vacc3x01, va3, vb01); vacc4x01 = vmla_f32(vacc4x01, va4, vb01); vacc5x01 = vmla_f32(vacc5x01, va5, vb01); } const float32x2_t vmax = vld1_dup_f32(&params->scalar.max); vacc0x01 = vmin_f32(vacc0x01, vmax); vacc1x01 = vmin_f32(vacc1x01, vmax); vacc2x01 = vmin_f32(vacc2x01, vmax); vacc3x01 = vmin_f32(vacc3x01, vmax); vacc4x01 = vmin_f32(vacc4x01, vmax); vacc5x01 = vmin_f32(vacc5x01, vmax); const float32x2_t vmin = vld1_dup_f32(&params->scalar.min); vacc0x01 = vmax_f32(vacc0x01, vmin); vacc1x01 = vmax_f32(vacc1x01, vmin); vacc2x01 = vmax_f32(vacc2x01, vmin); vacc3x01 = vmax_f32(vacc3x01, vmin); vacc4x01 = vmax_f32(vacc4x01, vmin); vacc5x01 = vmax_f32(vacc5x01, vmin); if XNN_LIKELY(nc >= 2) { vst1_f32(c0, vacc0x01); c0 = (float*) ((uintptr_t) c0 + cn_stride); vst1_f32(c1, vacc1x01); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1_f32(c2, vacc2x01); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1_f32(c3, vacc3x01); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1_f32(c4, vacc4x01); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1_f32(c5, vacc5x01); c5 = (float*) ((uintptr_t) c5 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); nc -= 2; } else { assert(nc == 1); vst1_lane_f32(c0, vacc0x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c5, vacc5x01, 0); nc = 0; } } while (nc != 0); }
5,804
32.554913
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-aarch64-neonfma-lane-ld128.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld128.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld128( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(a0); a0 += 4; const float32x4_t va1 = vld1q_f32(a1); a1 += 4; const float32x4_t va2 = vld1q_f32(a2); a2 += 4; const float32x4_t va3 = vld1q_f32(a3); a3 += 4; const float32x4_t va4 = vld1q_f32(a4); a4 += 4; const float32x4_t va5 = vld1q_f32(a5); a5 += 4; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1); const float32x4_t vb0123c2 = vld1q_f32(w); w += 4; const float32x4_t vb4567c2 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0); const float32x4_t vb0123c3 = vld1q_f32(w); w += 4; const float32x4_t vb4567c3 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1); } if XNN_UNLIKELY(k != 0) { do { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567); k -= sizeof(float); } while (k != 0); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
12,269
40.59322
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-aarch64-neonfma-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, va5, 0); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, va5, 0); vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1); vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1); vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1); vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1); vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, va5, 1); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1); vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1); vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1); vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1); vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, va5, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
9,666
36.761719
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__avx_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { __m256 vacc0x01234567 = _mm256_load_ps(w + 0); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc4x01234567 = vacc0x01234567; __m256 vacc5x01234567 = vacc0x01234567; w += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; const __m256 va4 = _mm256_broadcast_ss(a4); a4 += 1; const __m256 va5 = _mm256_broadcast_ss(a5); a5 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567)); vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567)); vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567)); vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567)); vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567)); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567); vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567); vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c5, vacc5x01234567); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm256_storeu_ps(c4, vacc4x01234567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm256_storeu_ps(c3, vacc3x01234567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567); __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567); __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c5, vacc5x0123); _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1); vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c5, vacc5x0123); _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c5, vacc5x0123); _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
7,085
32.742857
85
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { __m256 vacc0x01234567 = _mm256_load_ps(w + 0); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc4x01234567 = vacc0x01234567; __m256 vacc5x01234567 = vacc0x01234567; w += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; const __m256 va4 = _mm256_broadcast_ss(a4); a4 += 1; const __m256 va5 = _mm256_broadcast_ss(a5); a5 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567); vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567); vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567); vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567); vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c5, vacc5x01234567); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm256_storeu_ps(c4, vacc4x01234567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm256_storeu_ps(c3, vacc3x01234567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567); __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567); __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c5, vacc5x0123); _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1); vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c5, vacc5x0123); _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c5, vacc5x0123); _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
7,008
32.37619
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neon-dup-ld128.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld128.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(a0); a0 += 4; const float32x4_t va1 = vld1q_f32(a1); a1 += 4; const float32x4_t va2 = vld1q_f32(a2); a2 += 4; const float32x4_t va3 = vld1q_f32(a3); a3 += 4; const float32x4_t va4 = vld1q_f32(a4); a4 += 4; const float32x4_t va5 = vld1q_f32(a5); a5 += 4; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0); const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0); const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0); const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1); const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1); const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1); const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1); const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1); const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1); const float32x4_t vb0123c2 = vld1q_f32(w); w += 4; const float32x4_t vb4567c2 = vld1q_f32(w); w += 4; const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0); const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0); const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c2, vb0123c2); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c2, vb4567c2); const float32x4_t vb0123c3 = vld1q_f32(w); w += 4; const float32x4_t vb4567c3 = vld1q_f32(w); w += 4; const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1); const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1); const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1); const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1); const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c3, vb0123c3); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c3, vb4567c3); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3); } if XNN_UNLIKELY(k != 0) { do { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567); k -= sizeof(float); } while (k != 0); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
12,965
39.645768
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neon-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); const float32x4_t va1c0 = vdupq_lane_f32(va1, 0); const float32x4_t va2c0 = vdupq_lane_f32(va2, 0); const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); const float32x4_t va1c1 = vdupq_lane_f32(va1, 1); const float32x4_t va2c1 = vdupq_lane_f32(va2, 1); const float32x4_t va3c1 = vdupq_lane_f32(va3, 1); const float32x4_t va4c1 = vdupq_lane_f32(va4, 1); const float32x4_t va5c1 = vdupq_lane_f32(va5, 1); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1); vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1); vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1); vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1); vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1); vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1); vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1); vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1); vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
10,182
36.996269
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neon-lane-ld128.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld128.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neon_lane_ld128( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(a0); a0 += 4; const float32x4_t va1 = vld1q_f32(a1); a1 += 4; const float32x4_t va2 = vld1q_f32(a2); a2 += 4; const float32x4_t va3 = vld1q_f32(a3); a3 += 4; const float32x4_t va4 = vld1q_f32(a4); a4 += 4; const float32x4_t va5 = vld1q_f32(a5); a5 += 4; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1); const float32x4_t vb0123c2 = vld1q_f32(w); w += 4; const float32x4_t vb4567c2 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0); const float32x4_t vb0123c3 = vld1q_f32(w); w += 4; const float32x4_t vb4567c3 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1); } if XNN_UNLIKELY(k != 0) { do { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567); k -= sizeof(float); } while (k != 0); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
12,258
40.555932
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neon-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neon_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, va5, 0); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, va5, 0); vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1); vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1); vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1); vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1); vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, va5, 1); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1); vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1); vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1); vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1); vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, va5, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
9,655
36.71875
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neonfma-dup-ld128.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld128.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(a0); a0 += 4; const float32x4_t va1 = vld1q_f32(a1); a1 += 4; const float32x4_t va2 = vld1q_f32(a2); a2 += 4; const float32x4_t va3 = vld1q_f32(a3); a3 += 4; const float32x4_t va4 = vld1q_f32(a4); a4 += 4; const float32x4_t va5 = vld1q_f32(a5); a5 += 4; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0); const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0); const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0); const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1); const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1); const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1); const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1); const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1); const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1); const float32x4_t vb0123c2 = vld1q_f32(w); w += 4; const float32x4_t vb4567c2 = vld1q_f32(w); w += 4; const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0); const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0); const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c2, vb0123c2); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c2, vb4567c2); const float32x4_t vb0123c3 = vld1q_f32(w); w += 4; const float32x4_t vb4567c3 = vld1q_f32(w); w += 4; const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1); const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1); const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1); const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1); const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c3, vb0123c3); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c3, vb4567c3); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3); } if XNN_UNLIKELY(k != 0) { do { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567); k -= sizeof(float); } while (k != 0); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
12,968
39.655172
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-neonfma-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t va4 = vld1_f32(a4); a4 += 2; const float32x2_t va5 = vld1_f32(a5); a5 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); const float32x4_t va1c0 = vdupq_lane_f32(va1, 0); const float32x4_t va2c0 = vdupq_lane_f32(va2, 0); const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); const float32x4_t va1c1 = vdupq_lane_f32(va1, 1); const float32x4_t va2c1 = vdupq_lane_f32(va2, 1); const float32x4_t va3c1 = vdupq_lane_f32(va3, 1); const float32x4_t va4c1 = vdupq_lane_f32(va4, 1); const float32x4_t va5c1 = vdupq_lane_f32(va5, 1); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1); vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1); vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1); vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1); vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1); vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1); vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1); vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1); vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1; const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1; const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1; const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123); vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123); vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123); vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123); vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567); vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567); vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567); vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567); vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c5, vacc5x0123); vst1q_f32(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); vst1q_f32(c4, vacc4x0123); vst1q_f32(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); vst1q_f32(c3, vacc3x0123); vst1q_f32(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1q_f32(c2, vacc2x0123); vst1q_f32(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1q_f32(c1, vacc1x0123); vst1q_f32(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c5, vacc5x0123); c5 += 4; vst1q_f32(c4, vacc4x0123); c4 += 4; vst1q_f32(c3, vacc3x0123); c3 += 4; vst1q_f32(c2, vacc2x0123); c2 += 4; vst1q_f32(c1, vacc1x0123); c1 += 4; vst1q_f32(c0, vacc0x0123); c0 += 4; vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; } float32x2_t vacc5x01 = vget_low_f32(vacc5x0123); float32x2_t vacc4x01 = vget_low_f32(vacc4x0123); float32x2_t vacc3x01 = vget_low_f32(vacc3x0123); float32x2_t vacc2x01 = vget_low_f32(vacc2x0123); float32x2_t vacc1x01 = vget_low_f32(vacc1x0123); float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c5, vacc5x01); c5 += 2; vst1_f32(c4, vacc4x01); c4 += 2; vst1_f32(c3, vacc3x01); c3 += 2; vst1_f32(c2, vacc2x01); c2 += 2; vst1_f32(c1, vacc1x01); c1 += 2; vst1_f32(c0, vacc0x01); c0 += 2; vacc5x01 = vget_high_f32(vacc5x0123); vacc4x01 = vget_high_f32(vacc4x0123); vacc3x01 = vget_high_f32(vacc3x0123); vacc2x01 = vget_high_f32(vacc2x0123); vacc1x01 = vget_high_f32(vacc1x0123); vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c5, vacc5x01, 0); vst1_lane_f32(c4, vacc4x01, 0); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
10,185
37.007463
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,597
35.587234
78
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
15,211
43.479532
82
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmrelaxedsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmrelaxedsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123)); vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,557
35.417021
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmrelaxedsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmrelaxedsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
15,027
42.94152
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmsimd-arm-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123)); vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,122
33.565957
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmsimd-arm-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
14,592
41.669591
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmsimd-x86-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123)); vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,146
33.668085
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-minmax-wasmsimd-x86-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
14,616
41.739766
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-relu-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
7,476
32.832579
78
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-relu-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
14,090
41.960366
82
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-relu-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123)); vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567)); k -= sizeof(float); } while (k != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
7,429
32.61991
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-relu-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567); k -= sizeof(float); } while (k != 0); } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
13,899
41.378049
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
6,798
31.6875
78
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123)); vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567)); vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123)); vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567)); vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123)); vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567)); k -= sizeof(float); } while (k != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
6,751
31.461538
76
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567); k -= sizeof(float); } while (k != 0); } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
13,412
41.580952
82
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va3 = wasm_v128_load(a3); a3 += 4; const v128_t va4 = wasm_v128_load(a4); a4 += 4; const v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1); const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1); const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3); const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3); const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t va3 = wasm_v128_load32_splat(a3); a3 += 1; const v128_t va4 = wasm_v128_load32_splat(a4); a4 += 1; const v128_t va5 = wasm_v128_load32_splat(a5); a5 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567); k -= sizeof(float); } while (k != 0); } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
13,221
40.974603
79
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-minmax-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
21,100
50.718137
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-minmax-wasmrelaxedsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8s4__wasmrelaxedsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
20,808
50.002451
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-minmax-wasmsimd-arm.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8s4__wasmsimd_arm( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567); w += 32; } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
20,373
48.936275
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-minmax-wasmsimd-x86.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_6x8s4__wasmsimd_x86( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567); w += 32; } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123); vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123); vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567); vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567); vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123); vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123); vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567); vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567); vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
20,397
48.995098
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-relu-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567); w += 32; } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
19,979
49.71066
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-relu-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_relu_ukernel_6x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567); w += 32; } const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero); vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero); vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero); vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero); vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
19,680
48.951777
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123); vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123); vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567); vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567); vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567); w += 32; } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
19,301
49.661417
130
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-6x8s4-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_ukernel_6x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } do { v128_t vacc0x0123 = wasm_v128_load(w + 0); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; v128_t vacc3x0123 = vacc0x0123; v128_t vacc3x4567 = vacc0x4567; v128_t vacc4x0123 = vacc0x0123; v128_t vacc4x4567 = vacc0x4567; v128_t vacc5x0123 = vacc0x0123; v128_t vacc5x4567 = vacc0x4567; w += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; v128_t va3 = wasm_v128_load(a3); a3 += 4; v128_t va4 = wasm_v128_load(a4); a4 += 4; v128_t va5 = wasm_v128_load(a5); a5 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); v128_t va3 = wasm_v128_load(a3); a3 = (const float*) ((uintptr_t) a3 + k); v128_t va4 = wasm_v128_load(a4); a4 = (const float*) ((uintptr_t) a4 + k); v128_t va5 = wasm_v128_load(a5); a5 = (const float*) ((uintptr_t) a5 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0); va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0); va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123); vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123); vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567); vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567); vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567); w += 32; } if XNN_LIKELY(nc >= 8) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c5 + 4, vacc5x4567); c5 = (float*) ((uintptr_t) c5 + cn_stride); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c4 + 4, vacc4x4567); c4 = (float*) ((uintptr_t) c4 + cn_stride); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c3 + 4, vacc3x4567); c3 = (float*) ((uintptr_t) c3 + cn_stride); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c5, vacc5x0123); wasm_v128_store(c4, vacc4x0123); wasm_v128_store(c3, vacc3x0123); wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc5x0123 = vacc5x4567; vacc4x0123 = vacc4x4567; vacc3x0123 = vacc3x4567; vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c5, vacc5x0123, 0); wasm_v128_store64_lane(c4, vacc4x0123, 0); wasm_v128_store64_lane(c3, vacc3x0123, 0); wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1); vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1); vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c5, vacc5x0123, 0); wasm_v128_store32_lane(c4, vacc4x0123, 0); wasm_v128_store32_lane(c3, vacc3x0123, 0); wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
19,002
48.87664
127
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-7x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const float* a6 = (const float*) ((uintptr_t) a5 + a_stride); float* c6 = (float*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w); __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF; w += 16; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); const __m512 va1 = _mm512_set1_ps(*a1); vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF); const __m512 va2 = _mm512_set1_ps(*a2); vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF); const __m512 va3 = _mm512_set1_ps(*a3); vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF); const __m512 va4 = _mm512_set1_ps(*a4); vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF); const __m512 va5 = _mm512_set1_ps(*a5); vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF); const __m512 va6 = _mm512_set1_ps(*a6); vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF); a0 += 1; a1 += 1; a2 += 1; a3 += 1; a4 += 1; a5 += 1; a6 += 1; k -= sizeof(float); } while (k != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF); vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF); vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF); c6 = (float*) ((uintptr_t) c6 + cn_stride); _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a6 = (const float*) ((uintptr_t) a6 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF); _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF); _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
6,705
36.255556
106
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-7x8-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const float* a6 = (const float*) ((uintptr_t) a5 + a_stride); float* c6 = (float*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } do { __m256 vacc0x01234567 = _mm256_load_ps(w + 0); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc4x01234567 = vacc0x01234567; __m256 vacc5x01234567 = vacc0x01234567; __m256 vacc6x01234567 = vacc0x01234567; w += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; const __m256 va4 = _mm256_broadcast_ss(a4); a4 += 1; const __m256 va5 = _mm256_broadcast_ss(a5); a5 += 1; const __m256 va6 = _mm256_broadcast_ss(a6); a6 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567)); vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567)); vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567)); vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567)); vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567)); vacc6x01234567 = _mm256_add_ps(vacc6x01234567, _mm256_mul_ps(va6, vb01234567)); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567); vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567); vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567); vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567); vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c6, vacc6x01234567); c6 = (float*) ((uintptr_t) c6 + cn_stride); _mm256_storeu_ps(c5, vacc5x01234567); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm256_storeu_ps(c4, vacc4x01234567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm256_storeu_ps(c3, vacc3x01234567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a6 = (const float*) ((uintptr_t) a6 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567); __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567); __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567); __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c6, vacc6x0123); _mm_storeu_ps(c5, vacc5x0123); _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1); vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1); vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c6 += 4; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c6, vacc6x0123); _mm_storel_pi((__m64*) c5, vacc5x0123); _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123); vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c6 += 2; c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c6, vacc6x0123); _mm_store_ss(c5, vacc5x0123); _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,067
33.626609
85
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-7x8-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const float* a6 = (const float*) ((uintptr_t) a5 + a_stride); float* c6 = (float*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } do { __m256 vacc0x01234567 = _mm256_load_ps(w + 0); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc4x01234567 = vacc0x01234567; __m256 vacc5x01234567 = vacc0x01234567; __m256 vacc6x01234567 = vacc0x01234567; w += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; const __m256 va4 = _mm256_broadcast_ss(a4); a4 += 1; const __m256 va5 = _mm256_broadcast_ss(a5); a5 += 1; const __m256 va6 = _mm256_broadcast_ss(a6); a6 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567); vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567); vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567); vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567); vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567); vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567); vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567); vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c6, vacc6x01234567); c6 = (float*) ((uintptr_t) c6 + cn_stride); _mm256_storeu_ps(c5, vacc5x01234567); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm256_storeu_ps(c4, vacc4x01234567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm256_storeu_ps(c3, vacc3x01234567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a6 = (const float*) ((uintptr_t) a6 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567); __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567); __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567); __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c6, vacc6x0123); _mm_storeu_ps(c5, vacc5x0123); _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1); vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1); vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c6 += 4; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c6, vacc6x0123); _mm_storel_pi((__m64*) c5, vacc5x0123); _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123); vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c6 += 2; c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c6, vacc6x0123); _mm_store_ss(c5, vacc5x0123); _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
7,977
33.240343
75
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-8x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 8); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const float* a6 = (const float*) ((uintptr_t) a5 + a_stride); float* c6 = (float*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } const float* a7 = (const float*) ((uintptr_t) a6 + a_stride); float* c7 = (float*) ((uintptr_t) c6 + cm_stride); if XNN_UNPREDICTABLE(mr != 8) { a7 = a6; c7 = c6; } do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w); __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc7x0123456789ABCDEF = vacc0x0123456789ABCDEF; w += 16; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); const __m512 va1 = _mm512_set1_ps(*a1); vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF); const __m512 va2 = _mm512_set1_ps(*a2); vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF); const __m512 va3 = _mm512_set1_ps(*a3); vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF); const __m512 va4 = _mm512_set1_ps(*a4); vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF); const __m512 va5 = _mm512_set1_ps(*a5); vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF); const __m512 va6 = _mm512_set1_ps(*a6); vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF); const __m512 va7 = _mm512_set1_ps(*a7); vacc7x0123456789ABCDEF = _mm512_fmadd_ps(va7, vb0123456789ABCDEF, vacc7x0123456789ABCDEF); a0 += 1; a1 += 1; a2 += 1; a3 += 1; a4 += 1; a5 += 1; a6 += 1; a7 += 1; k -= sizeof(float); } while (k != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF); vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF); vacc7x0123456789ABCDEF = _mm512_max_ps(vmin, vacc7x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF); vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF); vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF); vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF); vacc7x0123456789ABCDEF = _mm512_min_ps(vmax, vacc7x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c7, vacc7x0123456789ABCDEF); c7 = (float*) ((uintptr_t) c7 + cn_stride); _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF); c6 = (float*) ((uintptr_t) c6 + cn_stride); _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a7 = (const float*) ((uintptr_t) a7 - kc); a6 = (const float*) ((uintptr_t) a6 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF); _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF); _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF); _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
7,469
37.112245
106
c
XNNPACK
XNNPACK-master/src/f32-gemm/gen/f32-gemm-8x8-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 8); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const float* a4 = (const float*) ((uintptr_t) a3 + a_stride); float* c4 = (float*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const float* a5 = (const float*) ((uintptr_t) a4 + a_stride); float* c5 = (float*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const float* a6 = (const float*) ((uintptr_t) a5 + a_stride); float* c6 = (float*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } const float* a7 = (const float*) ((uintptr_t) a6 + a_stride); float* c7 = (float*) ((uintptr_t) c6 + cm_stride); if XNN_UNPREDICTABLE(mr != 8) { a7 = a6; c7 = c6; } do { __m256 vacc0x01234567 = _mm256_load_ps(w + 0); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc4x01234567 = vacc0x01234567; __m256 vacc5x01234567 = vacc0x01234567; __m256 vacc6x01234567 = vacc0x01234567; __m256 vacc7x01234567 = vacc0x01234567; w += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; const __m256 va4 = _mm256_broadcast_ss(a4); a4 += 1; const __m256 va5 = _mm256_broadcast_ss(a5); a5 += 1; const __m256 va6 = _mm256_broadcast_ss(a6); a6 += 1; const __m256 va7 = _mm256_broadcast_ss(a7); a7 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567); vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567); vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567); vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567); vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567); vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567); vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567); vacc7x01234567 = _mm256_max_ps(vmin, vacc7x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567); vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567); vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567); vacc7x01234567 = _mm256_min_ps(vmax, vacc7x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c7, vacc7x01234567); c7 = (float*) ((uintptr_t) c7 + cn_stride); _mm256_storeu_ps(c6, vacc6x01234567); c6 = (float*) ((uintptr_t) c6 + cn_stride); _mm256_storeu_ps(c5, vacc5x01234567); c5 = (float*) ((uintptr_t) c5 + cn_stride); _mm256_storeu_ps(c4, vacc4x01234567); c4 = (float*) ((uintptr_t) c4 + cn_stride); _mm256_storeu_ps(c3, vacc3x01234567); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a7 = (const float*) ((uintptr_t) a7 - kc); a6 = (const float*) ((uintptr_t) a6 - kc); a5 = (const float*) ((uintptr_t) a5 - kc); a4 = (const float*) ((uintptr_t) a4 - kc); a3 = (const float*) ((uintptr_t) a3 - kc); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567); __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567); __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567); __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567); __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c7, vacc7x0123); _mm_storeu_ps(c6, vacc6x0123); _mm_storeu_ps(c5, vacc5x0123); _mm_storeu_ps(c4, vacc4x0123); _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1); vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1); vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1); vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c7 += 4; c6 += 4; c5 += 4; c4 += 4; c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c7, vacc7x0123); _mm_storel_pi((__m64*) c6, vacc6x0123); _mm_storel_pi((__m64*) c5, vacc5x0123); _mm_storel_pi((__m64*) c4, vacc4x0123); _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123); vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123); vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123); vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c7 += 2; c6 += 2; c5 += 2; c4 += 2; c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c7, vacc7x0123); _mm_store_ss(c6, vacc6x0123); _mm_store_ss(c5, vacc5x0123); _mm_store_ss(c4, vacc4x0123); _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,947
33.953125
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x16-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x16__avx_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8); acc += 16; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF)); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c0, vacc0x01234567); vacc0x01234567 = vacc0x89ABCDEF; c0 += 8; } __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,730
23.827273
85
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_gemminc_minmax_ukernel_1x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0); acc += 16; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); a0 += 1; k -= sizeof(float); } while (k != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
2,189
24.465116
106
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x16-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x16__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8); acc += 16; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c0, vacc0x01234567); vacc0x01234567 = vacc0x89ABCDEF; c0 += 8; } __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,705
23.6
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x4-minmax-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_f32_gemminc_minmax_ukernel_1x4__scalar( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = acc[0]; float vacc01 = acc[1]; float vacc02 = acc[2]; float vacc03 = acc[3]; acc += 4; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); vacc00 = math_max_f32(vacc00, vmin); vacc01 = math_max_f32(vacc01, vmin); vacc02 = math_max_f32(vacc02, vmin); vacc03 = math_max_f32(vacc03, vmin); vacc00 = math_min_f32(vacc00, vmax); vacc01 = math_min_f32(vacc01, vmax); vacc02 = math_min_f32(vacc02, vmax); vacc03 = math_min_f32(vacc03, vmax); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const void*) ((uintptr_t) a0 - kc); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,382
21.913462
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x4-minmax-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_f32_gemminc_minmax_ukernel_1x4__wasm( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = acc[0]; float vacc01 = acc[1]; float vacc02 = acc[2]; float vacc03 = acc[3]; acc += 4; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); vacc00 = __builtin_wasm_max_f32(vacc00, vmin); vacc01 = __builtin_wasm_max_f32(vacc01, vmin); vacc02 = __builtin_wasm_max_f32(vacc02, vmin); vacc03 = __builtin_wasm_max_f32(vacc03, vmin); vacc00 = __builtin_wasm_min_f32(vacc00, vmax); vacc01 = __builtin_wasm_min_f32(vacc01, vmax); vacc02 = __builtin_wasm_min_f32(vacc02, vmax); vacc03 = __builtin_wasm_min_f32(vacc03, vmax); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const void*) ((uintptr_t) a0 - kc); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,460
22.663462
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-aarch64-neonfma-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__aarch64_neonfma_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4; float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
2,953
26.351852
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__avx_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); acc += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,233
22.030928
85
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); acc += 8; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 vb01234567 = _mm256_load_ps(w); w += 8; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,221
21.907216
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-neon-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4; float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0); const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,029
26.545455
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-neon-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__neon_lane_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4; float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
2,942
26.25
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-neonfma-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4; float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0); const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1; const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,032
26.572727
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-sse-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__sse_dup( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
3,893
26.041667
80
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-sse-load1.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-load1.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__sse_load1( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); acc += 8; size_t k = kc; do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,352
22.068627
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-sse2-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
3,996
26.756944
114
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,577
24.27451
78
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,151
28.870504
82
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmrelaxedsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmrelaxedsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,567
24.176471
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmrelaxedsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmrelaxedsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,117
28.625899
79
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmsimd-arm-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,492
23.441176
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmsimd-arm-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,042
28.086331
79
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmsimd-x86-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,496
23.480392
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8-minmax-wasmsimd-x86-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,046
28.115108
79
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8s4-minmax-sse.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-shuffle.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8s4__sse( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { __m128 va0 = _mm_loadu_ps(a0); a0 = (const float*) ((uintptr_t) a0 + k); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3)); w += 32; } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,064
31.056962
126
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8s4-minmax-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,405
31.763636
130
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8s4-minmax-wasmrelaxedsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmrelaxedsimd( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,353
31.448485
127
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8s4-minmax-wasmsimd-arm.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmsimd_arm( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,278
30.993939
127
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-1x8s4-minmax-wasmsimd-x86.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmsimd_x86( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); acc += 8; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,282
31.018182
127
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-2x4-minmax-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_f32_gemminc_minmax_ukernel_2x4__scalar( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = acc[0]; float vacc01 = acc[1]; float vacc02 = acc[2]; float vacc03 = acc[3]; float vacc10 = acc[4]; float vacc11 = acc[5]; float vacc12 = acc[6]; float vacc13 = acc[7]; acc += 8; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); vacc00 = math_max_f32(vacc00, vmin); vacc01 = math_max_f32(vacc01, vmin); vacc02 = math_max_f32(vacc02, vmin); vacc03 = math_max_f32(vacc03, vmin); vacc10 = math_max_f32(vacc10, vmin); vacc11 = math_max_f32(vacc11, vmin); vacc12 = math_max_f32(vacc12, vmin); vacc13 = math_max_f32(vacc13, vmin); vacc00 = math_min_f32(vacc00, vmax); vacc01 = math_min_f32(vacc01, vmax); vacc02 = math_min_f32(vacc02, vmax); vacc03 = math_min_f32(vacc03, vmax); vacc10 = math_min_f32(vacc10, vmax); vacc11 = math_min_f32(vacc11, vmax); vacc12 = math_min_f32(vacc12, vmax); vacc13 = math_min_f32(vacc13, vmax); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a1 = (const void*) ((uintptr_t) a1 - kc); a0 = (const void*) ((uintptr_t) a0 - kc); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,530
24.586957
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-2x4-minmax-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_f32_gemminc_minmax_ukernel_2x4__wasm( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = acc[0]; float vacc01 = acc[1]; float vacc02 = acc[2]; float vacc03 = acc[3]; float vacc10 = acc[4]; float vacc11 = acc[5]; float vacc12 = acc[6]; float vacc13 = acc[7]; acc += 8; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); vacc00 = __builtin_wasm_max_f32(vacc00, vmin); vacc01 = __builtin_wasm_max_f32(vacc01, vmin); vacc02 = __builtin_wasm_max_f32(vacc02, vmin); vacc03 = __builtin_wasm_max_f32(vacc03, vmin); vacc10 = __builtin_wasm_max_f32(vacc10, vmin); vacc11 = __builtin_wasm_max_f32(vacc11, vmin); vacc12 = __builtin_wasm_max_f32(vacc12, vmin); vacc13 = __builtin_wasm_max_f32(vacc13, vmin); vacc00 = __builtin_wasm_min_f32(vacc00, vmax); vacc01 = __builtin_wasm_min_f32(vacc01, vmax); vacc02 = __builtin_wasm_min_f32(vacc02, vmax); vacc03 = __builtin_wasm_min_f32(vacc03, vmax); vacc10 = __builtin_wasm_min_f32(vacc10, vmax); vacc11 = __builtin_wasm_min_f32(vacc11, vmax); vacc12 = __builtin_wasm_min_f32(vacc12, vmax); vacc13 = __builtin_wasm_min_f32(vacc13, vmax); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a1 = (const void*) ((uintptr_t) a1 - kc); a0 = (const void*) ((uintptr_t) a0 - kc); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,688
25.731884
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x16-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x16__avx_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8); __m256 vacc1x01234567 = _mm256_load_ps(acc + 16); __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24); __m256 vacc2x01234567 = _mm256_load_ps(acc + 32); __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40); acc += 48; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567)); vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567)); vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF)); vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF)); vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF)); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,531
31.162791
85
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x16-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x16__fma3_broadcast( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m256 vacc0x01234567 = _mm256_load_ps(acc + 0); __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8); __m256 vacc1x01234567 = _mm256_load_ps(acc + 16); __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24); __m256 vacc2x01234567 = _mm256_load_ps(acc + 32); __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40); acc += 48; size_t k = kc; do { const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF); k -= sizeof(float); } while (k != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,454
30.715116
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-sse-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); __m128 vacc1x0123 = _mm_load_ps(acc + 8); __m128 vacc1x4567 = _mm_load_ps(acc + 12); __m128 vacc2x0123 = _mm_load_ps(acc + 16); __m128 vacc2x4567 = _mm_load_ps(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va1 = _mm_loadu_ps(a1); a1 += 4; const __m128 va2 = _mm_loadu_ps(a2); a2 += 4; const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0)); const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,044
34.597345
80
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-sse-load1.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-load1.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); __m128 vacc1x0123 = _mm_load_ps(acc + 8); __m128 vacc1x4567 = _mm_load_ps(acc + 12); __m128 vacc2x0123 = _mm_load_ps(acc + 16); __m128 vacc2x4567 = _mm_load_ps(acc + 20); acc += 24; size_t k = kc; do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
4,523
28
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-sse2-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(acc + 0); __m128 vacc0x4567 = _mm_load_ps(acc + 4); __m128 vacc1x0123 = _mm_load_ps(acc + 8); __m128 vacc1x4567 = _mm_load_ps(acc + 12); __m128 vacc2x0123 = _mm_load_ps(acc + 16); __m128 vacc2x4567 = _mm_load_ps(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va1 = _mm_loadu_ps(a1); a1 += 4; const __m128 va2 = _mm_loadu_ps(a2); a2 += 4; const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0)); const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); } const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,351
35.955752
114
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,066
31.480769
78
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,656
38.171946
82
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmrelaxedsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,044
31.339744
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmrelaxedsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,562
37.746606
79
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmsimd-arm-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,825
29.935897
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmsimd-arm-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,343
36.755656
79
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmsimd-x86-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123)); vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567)); vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123)); vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567)); vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123)); vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567)); k -= sizeof(float); } while (k != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,837
30.012821
75
c
XNNPACK
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8-minmax-wasmsimd-x86-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-gemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat( size_t mr, size_t nc, size_t kc, const float* restrict a, size_t a_stride, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, const float* restrict acc, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); assert(acc != NULL); const float* a0 = a; float* c0 = c; const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(acc + 0); v128_t vacc0x4567 = wasm_v128_load(acc + 4); v128_t vacc1x0123 = wasm_v128_load(acc + 8); v128_t vacc1x4567 = wasm_v128_load(acc + 12); v128_t vacc2x0123 = wasm_v128_load(acc + 16); v128_t vacc2x4567 = wasm_v128_load(acc + 20); acc += 24; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a2 = (const float*) ((uintptr_t) a2 - kc); a1 = (const float*) ((uintptr_t) a1 - kc); a0 = (const float*) ((uintptr_t) a0 - kc); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,355
36.809955
79
c