repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-neon-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__neon_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float32x2_t valphahv = vld1_f32(weights); weights += 2; size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1); vst1q_f32(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const float32x4_t vtl0123 = vld1q_f32(i0); const float32x4_t vtr0123 = vld1q_f32(i1); const float32x4_t vbl0123 = vld1q_f32(i2); const float32x4_t vbr0123 = vld1q_f32(i3); const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1); float32x2_t vo01 = vget_low_f32(vo0123); if (c & (2 * sizeof(float))) { vst1_f32(output, vo01); output += 2; vo01 = vget_high_f32(vo0123); } if (c & (1 * sizeof(float))) { vst1_lane_f32(output, vo01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
2,965
32.704545
81
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-neon-c8.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__neon_c8( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float32x2_t valphahv = vld1_f32(weights); weights += 2; size_t c = channels; for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtl4567 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr4567 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl4567 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr4567 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vtd4567 = vsubq_f32(vtr4567, vtl4567); const float32x4_t vbd4567 = vsubq_f32(vbr4567, vbl4567); const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vt4567 = vmlaq_lane_f32(vtl4567, vtd4567, valphahv, 0); const float32x4_t vb4567 = vmlaq_lane_f32(vbl4567, vbd4567, valphahv, 0); const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vd4567 = vsubq_f32(vb4567, vt4567); const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1); const float32x4_t vo4567 = vmlaq_lane_f32(vt4567, vd4567, valphahv, 1); vst1q_f32(output, vo0123); output += 4; vst1q_f32(output, vo4567); output += 4; } for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1); vst1q_f32(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const float32x4_t vtl0123 = vld1q_f32(i0); const float32x4_t vtr0123 = vld1q_f32(i1); const float32x4_t vbl0123 = vld1q_f32(i2); const float32x4_t vbr0123 = vld1q_f32(i3); const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1); float32x2_t vo01 = vget_low_f32(vo0123); if (c & (2 * sizeof(float))) { vst1_f32(output, vo01); output += 2; vo01 = vget_high_f32(vo0123); } if (c & (1 * sizeof(float))) { vst1_lane_f32(output, vo01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
4,447
36.694915
81
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-neonfma-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__neonfma_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float32x2_t valphahv = vld1_f32(weights); weights += 2; #if XNN_ARCH_ARM const float32x4_t valphah = vdupq_lane_f32(valphahv, 0); const float32x4_t valphav = vdupq_lane_f32(valphahv, 1); #endif size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); #if XNN_ARCH_ARM const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah); const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah); #else const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0); #endif const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); #if XNN_ARCH_ARM const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav); #else const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1); #endif vst1q_f32(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const float32x4_t vtl0123 = vld1q_f32(i0); const float32x4_t vtr0123 = vld1q_f32(i1); const float32x4_t vbl0123 = vld1q_f32(i2); const float32x4_t vbr0123 = vld1q_f32(i3); const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); #if XNN_ARCH_ARM const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah); const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah); #else const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0); #endif const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); #if XNN_ARCH_ARM float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav); #else float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1); #endif float32x2_t vo01 = vget_low_f32(vo0123); if (c & (2 * sizeof(float))) { vst1_f32(output, vo01); output += 2; vo01 = vget_high_f32(vo0123); } if (c & (1 * sizeof(float))) { vst1_lane_f32(output, vo01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
3,738
32.990909
81
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-neonfma-c8.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__neonfma_c8( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float32x2_t valphahv = vld1_f32(weights); weights += 2; #if XNN_ARCH_ARM const float32x4_t valphah = vdupq_lane_f32(valphahv, 0); const float32x4_t valphav = vdupq_lane_f32(valphahv, 1); #endif size_t c = channels; for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtl4567 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr4567 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl4567 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr4567 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); const float32x4_t vtd4567 = vsubq_f32(vtr4567, vtl4567); const float32x4_t vbd4567 = vsubq_f32(vbr4567, vbl4567); #if XNN_ARCH_ARM const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah); const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah); const float32x4_t vt4567 = vfmaq_f32(vtl4567, vtd4567, valphah); const float32x4_t vb4567 = vfmaq_f32(vbl4567, vbd4567, valphah); #else const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0); const float32x4_t vt4567 = vfmaq_lane_f32(vtl4567, vtd4567, valphahv, 0); const float32x4_t vb4567 = vfmaq_lane_f32(vbl4567, vbd4567, valphahv, 0); #endif const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); const float32x4_t vd4567 = vsubq_f32(vb4567, vt4567); #if XNN_ARCH_ARM const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav); const float32x4_t vo4567 = vfmaq_f32(vt4567, vd4567, valphav); #else const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1); const float32x4_t vo4567 = vfmaq_lane_f32(vt4567, vd4567, valphahv, 1); #endif vst1q_f32(output, vo0123); output += 4; vst1q_f32(output, vo4567); output += 4; } for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); #if XNN_ARCH_ARM const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah); const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah); #else const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0); #endif const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); #if XNN_ARCH_ARM const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav); #else const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1); #endif vst1q_f32(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const float32x4_t vtl0123 = vld1q_f32(i0); const float32x4_t vtr0123 = vld1q_f32(i1); const float32x4_t vbl0123 = vld1q_f32(i2); const float32x4_t vbr0123 = vld1q_f32(i3); const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123); const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123); #if XNN_ARCH_ARM const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah); const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah); #else const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0); const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0); #endif const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123); #if XNN_ARCH_ARM float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav); #else float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1); #endif float32x2_t vo01 = vget_low_f32(vo0123); if (c & (2 * sizeof(float))) { vst1_f32(output, vo01); output += 2; vo01 = vget_high_f32(vo0123); } if (c & (1 * sizeof(float))) { vst1_lane_f32(output, vo01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
5,738
36.756579
81
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-scalar-c1.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__scalar_c1( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float valphah = weights[0]; const float valphav = weights[1]; weights += 2; size_t c = channels; do { const float vtl = *i0++; const float vtr = *i1++; const float vbl = *i2++; const float vbr = *i3++; const float vtd = vtr - vtl; const float vbd = vbr - vbl; const float vt = vtl + vtd * valphah; const float vb = vbl + vbd * valphah; const float vd = vb - vt; const float vo = vt + vd * valphav; *output++ = vo; c -= sizeof(float); } while (c != 0); output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
1,661
24.96875
75
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-scalar-c2.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__scalar_c2( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float valphah = weights[0]; const float valphav = weights[1]; weights += 2; size_t c = channels; for (; c >= 2 * sizeof(float); c -= 2 * sizeof(float)) { const float vtl0 = i0[0]; const float vtr0 = i1[0]; const float vbl0 = i2[0]; const float vbr0 = i3[0]; const float vtl1 = i0[1]; const float vtr1 = i1[1]; const float vbl1 = i2[1]; const float vbr1 = i3[1]; i0 += 2; i1 += 2; i2 += 2; i3 += 2; const float vtd0 = vtr0 - vtl0; const float vbd0 = vbr0 - vbl0; const float vtd1 = vtr1 - vtl1; const float vbd1 = vbr1 - vbl1; const float vt0 = vtl0 + vtd0 * valphah; const float vb0 = vbl0 + vbd0 * valphah; const float vt1 = vtl1 + vtd1 * valphah; const float vb1 = vbl1 + vbd1 * valphah; const float vd0 = vb0 - vt0; const float vd1 = vb1 - vt1; const float vo0 = vt0 + vd0 * valphav; const float vo1 = vt1 + vd1 * valphav; output[0] = vo0; output[1] = vo1; output += 2; } for (; c >= sizeof(float); c -= sizeof(float)) { const float vtl = *i0++; const float vtr = *i1++; const float vbl = *i2++; const float vbr = *i3++; const float vtd = vtr - vtl; const float vbd = vbr - vbl; const float vt = vtl + vtd * valphah; const float vb = vbl + vbd * valphah; const float vd = vb - vt; const float vo = vt + vd * valphav; *output++ = vo; } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
2,615
26.25
75
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-scalar-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__scalar_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const float valphah = weights[0]; const float valphav = weights[1]; weights += 2; size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const float vtl0 = i0[0]; const float vtr0 = i1[0]; const float vbl0 = i2[0]; const float vbr0 = i3[0]; const float vtl1 = i0[1]; const float vtr1 = i1[1]; const float vbl1 = i2[1]; const float vbr1 = i3[1]; const float vtl2 = i0[2]; const float vtr2 = i1[2]; const float vbl2 = i2[2]; const float vbr2 = i3[2]; const float vtl3 = i0[3]; const float vtr3 = i1[3]; const float vbl3 = i2[3]; const float vbr3 = i3[3]; i0 += 4; i1 += 4; i2 += 4; i3 += 4; const float vtd0 = vtr0 - vtl0; const float vbd0 = vbr0 - vbl0; const float vtd1 = vtr1 - vtl1; const float vbd1 = vbr1 - vbl1; const float vtd2 = vtr2 - vtl2; const float vbd2 = vbr2 - vbl2; const float vtd3 = vtr3 - vtl3; const float vbd3 = vbr3 - vbl3; const float vt0 = vtl0 + vtd0 * valphah; const float vb0 = vbl0 + vbd0 * valphah; const float vt1 = vtl1 + vtd1 * valphah; const float vb1 = vbl1 + vbd1 * valphah; const float vt2 = vtl2 + vtd2 * valphah; const float vb2 = vbl2 + vbd2 * valphah; const float vt3 = vtl3 + vtd3 * valphah; const float vb3 = vbl3 + vbd3 * valphah; const float vd0 = vb0 - vt0; const float vd1 = vb1 - vt1; const float vd2 = vb2 - vt2; const float vd3 = vb3 - vt3; const float vo0 = vt0 + vd0 * valphav; const float vo1 = vt1 + vd1 * valphav; const float vo2 = vt2 + vd2 * valphav; const float vo3 = vt3 + vd3 * valphav; output[0] = vo0; output[1] = vo1; output[2] = vo2; output[3] = vo3; output += 4; } for (; c >= sizeof(float); c -= sizeof(float)) { const float vtl = *i0++; const float vtr = *i1++; const float vbl = *i2++; const float vbr = *i3++; const float vtd = vtr - vtl; const float vbd = vbr - vbl; const float vt = vtl + vtd * valphah; const float vb = vbl + vbd * valphah; const float vd = vb - vt; const float vo = vt + vd * valphav; *output++ = vo; } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
3,417
27.966102
75
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-sse-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__sse_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; __m128 valphahv = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) weights); valphahv = _mm_unpacklo_ps(valphahv, valphahv); const __m128 valphah = _mm_movelh_ps(valphahv, valphahv); const __m128 valphav = _mm_movehl_ps(valphahv, valphahv); weights += 2; size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const __m128 vtl0123 = _mm_loadu_ps(i0); const __m128 vtr0123 = _mm_loadu_ps(i1); const __m128 vbl0123 = _mm_loadu_ps(i2); const __m128 vbr0123 = _mm_loadu_ps(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const __m128 vtd0123 = _mm_sub_ps(vtr0123, vtl0123); const __m128 vbd0123 = _mm_sub_ps(vbr0123, vbl0123); const __m128 vt0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vtd0123, valphah)); const __m128 vb0123 = _mm_add_ps(vbl0123, _mm_mul_ps(vbd0123, valphah)); const __m128 vd0123 = _mm_sub_ps(vb0123, vt0123); const __m128 vo0123 = _mm_add_ps(vt0123, _mm_mul_ps(vd0123, valphav)); _mm_storeu_ps(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const __m128 vtl0123 = _mm_loadu_ps(i0); const __m128 vtr0123 = _mm_loadu_ps(i1); const __m128 vbl0123 = _mm_loadu_ps(i2); const __m128 vbr0123 = _mm_loadu_ps(i3); const __m128 vtd0123 = _mm_sub_ps(vtr0123, vtl0123); const __m128 vbd0123 = _mm_sub_ps(vbr0123, vbl0123); const __m128 vt0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vtd0123, valphah)); const __m128 vb0123 = _mm_add_ps(vbl0123, _mm_mul_ps(vbd0123, valphah)); const __m128 vd0123 = _mm_sub_ps(vb0123, vt0123); __m128 vo0123 = _mm_add_ps(vt0123, _mm_mul_ps(vd0123, valphav)); if (c & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vo0123); vo0123 = _mm_movehl_ps(vo0123, vo0123); output += 2; } if (c & (1 * sizeof(float))) { _mm_store_ss(output, vo0123); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
3,115
31.123711
79
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-sse-c8.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__sse_c8( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; __m128 valphahv = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) weights); valphahv = _mm_unpacklo_ps(valphahv, valphahv); const __m128 valphah = _mm_movelh_ps(valphahv, valphahv); const __m128 valphav = _mm_movehl_ps(valphahv, valphahv); weights += 2; size_t c = channels; for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) { const __m128 vtl0123 = _mm_loadu_ps(i0); const __m128 vtr0123 = _mm_loadu_ps(i1); const __m128 vbl0123 = _mm_loadu_ps(i2); const __m128 vbr0123 = _mm_loadu_ps(i3); const __m128 vtl4567 = _mm_loadu_ps(i0 + 4); const __m128 vtr4567 = _mm_loadu_ps(i1 + 4); const __m128 vbl4567 = _mm_loadu_ps(i2 + 4); const __m128 vbr4567 = _mm_loadu_ps(i3 + 4); i0 += 8; i1 += 8; i2 += 8; i3 += 8; const __m128 vtd0123 = _mm_sub_ps(vtr0123, vtl0123); const __m128 vbd0123 = _mm_sub_ps(vbr0123, vbl0123); const __m128 vtd4567 = _mm_sub_ps(vtr4567, vtl4567); const __m128 vbd4567 = _mm_sub_ps(vbr4567, vbl4567); const __m128 vt0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vtd0123, valphah)); const __m128 vb0123 = _mm_add_ps(vbl0123, _mm_mul_ps(vbd0123, valphah)); const __m128 vt4567 = _mm_add_ps(vtl4567, _mm_mul_ps(vtd4567, valphah)); const __m128 vb4567 = _mm_add_ps(vbl4567, _mm_mul_ps(vbd4567, valphah)); const __m128 vd0123 = _mm_sub_ps(vb0123, vt0123); const __m128 vd4567 = _mm_sub_ps(vb4567, vt4567); const __m128 vo0123 = _mm_add_ps(vt0123, _mm_mul_ps(vd0123, valphav)); const __m128 vo4567 = _mm_add_ps(vt4567, _mm_mul_ps(vd4567, valphav)); _mm_storeu_ps(output, vo0123); _mm_storeu_ps(output + 4, vo4567); output += 8; } for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const __m128 vtl0123 = _mm_loadu_ps(i0); const __m128 vtr0123 = _mm_loadu_ps(i1); const __m128 vbl0123 = _mm_loadu_ps(i2); const __m128 vbr0123 = _mm_loadu_ps(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const __m128 vtd0123 = _mm_sub_ps(vtr0123, vtl0123); const __m128 vbd0123 = _mm_sub_ps(vbr0123, vbl0123); const __m128 vt0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vtd0123, valphah)); const __m128 vb0123 = _mm_add_ps(vbl0123, _mm_mul_ps(vbd0123, valphah)); const __m128 vd0123 = _mm_sub_ps(vb0123, vt0123); const __m128 vo0123 = _mm_add_ps(vt0123, _mm_mul_ps(vd0123, valphav)); _mm_storeu_ps(output, vo0123); output += 4; } if XNN_UNLIKELY(c != 0) { const __m128 vtl0123 = _mm_loadu_ps(i0); const __m128 vtr0123 = _mm_loadu_ps(i1); const __m128 vbl0123 = _mm_loadu_ps(i2); const __m128 vbr0123 = _mm_loadu_ps(i3); const __m128 vtd0123 = _mm_sub_ps(vtr0123, vtl0123); const __m128 vbd0123 = _mm_sub_ps(vbr0123, vbl0123); const __m128 vt0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vtd0123, valphah)); const __m128 vb0123 = _mm_add_ps(vbl0123, _mm_mul_ps(vbd0123, valphah)); const __m128 vd0123 = _mm_sub_ps(vb0123, vt0123); __m128 vo0123 = _mm_add_ps(vt0123, _mm_mul_ps(vd0123, valphav)); if (c & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vo0123); vo0123 = _mm_movehl_ps(vo0123, vo0123); output += 2; } if (c & (1 * sizeof(float))) { _mm_store_ss(output, vo0123); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
4,554
33.770992
79
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-wasmrelaxedsimd-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__wasmrelaxedsimd_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const v128_t valphah = wasm_v128_load32_splat(weights); const v128_t valphav = wasm_v128_load32_splat(weights + 1); weights += 2; size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vtd, valphah, vtl); const v128_t vb = __builtin_wasm_relaxed_madd_f32x4(vbd, valphah, vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); const v128_t vo = __builtin_wasm_relaxed_madd_f32x4(vd, valphav, vt); wasm_v128_store(output, vo); output += 4; } if XNN_UNLIKELY(c != 0) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vtd, valphah, vtl); const v128_t vb = __builtin_wasm_relaxed_madd_f32x4(vbd, valphah, vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); v128_t vo = __builtin_wasm_relaxed_madd_f32x4(vd, valphav, vt); if (c & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vo, 0); vo = wasm_v64x2_shuffle(vo, vo, 1, 1); output += 2; } if (c & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vo, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
2,932
31.955056
77
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-wasmrelaxedsimd-c8.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__wasmrelaxedsimd_c8( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const v128_t valphah = wasm_v128_load32_splat(weights); const v128_t valphav = wasm_v128_load32_splat(weights + 1); weights += 2; size_t c = channels; for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) { const v128_t vtl0123 = wasm_v128_load(i0); const v128_t vtr0123 = wasm_v128_load(i1); const v128_t vbl0123 = wasm_v128_load(i2); const v128_t vbr0123 = wasm_v128_load(i3); const v128_t vtl4567 = wasm_v128_load(i0 + 4); const v128_t vtr4567 = wasm_v128_load(i1 + 4); const v128_t vbl4567 = wasm_v128_load(i2 + 4); const v128_t vbr4567 = wasm_v128_load(i3 + 4); i0 += 8; i1 += 8; i2 += 8; i3 += 8; const v128_t vtd0123 = wasm_f32x4_sub(vtr0123, vtl0123); const v128_t vbd0123 = wasm_f32x4_sub(vbr0123, vbl0123); const v128_t vtd4567 = wasm_f32x4_sub(vtr4567, vtl4567); const v128_t vbd4567 = wasm_f32x4_sub(vbr4567, vbl4567); const v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vtd0123, valphah, vtl0123); const v128_t vb0123 = __builtin_wasm_relaxed_madd_f32x4(vbd0123, valphah, vbl0123); const v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vtd4567, valphah, vtl4567); const v128_t vb4567 = __builtin_wasm_relaxed_madd_f32x4(vbd4567, valphah, vbl4567); const v128_t vd0123 = wasm_f32x4_sub(vb0123, vt0123); const v128_t vd4567 = wasm_f32x4_sub(vb4567, vt4567); const v128_t vo0123 = __builtin_wasm_relaxed_madd_f32x4(vd0123, valphav, vt0123); const v128_t vo4567 = __builtin_wasm_relaxed_madd_f32x4(vd4567, valphav, vt4567); wasm_v128_store(output, vo0123); wasm_v128_store(output + 4, vo4567); output += 8; } for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vtd, valphah, vtl); const v128_t vb = __builtin_wasm_relaxed_madd_f32x4(vbd, valphah, vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); const v128_t vo = __builtin_wasm_relaxed_madd_f32x4(vd, valphav, vt); wasm_v128_store(output, vo); output += 4; } if XNN_UNLIKELY(c != 0) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vtd, valphah, vtl); const v128_t vb = __builtin_wasm_relaxed_madd_f32x4(vbd, valphah, vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); v128_t vo = __builtin_wasm_relaxed_madd_f32x4(vd, valphav, vt); if (c & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vo, 0); vo = wasm_v64x2_shuffle(vo, vo, 1, 1); output += 2; } if (c & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vo, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
4,481
35.439024
89
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-wasmsimd-c4.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__wasmsimd_c4( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const v128_t valphah = wasm_v128_load32_splat(weights); const v128_t valphav = wasm_v128_load32_splat(weights + 1); weights += 2; size_t c = channels; for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vtd, valphah), vtl); const v128_t vb = wasm_f32x4_add(wasm_f32x4_mul(vbd, valphah), vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); const v128_t vo = wasm_f32x4_add(wasm_f32x4_mul(vd, valphav), vt); wasm_v128_store(output, vo); output += 4; } if XNN_UNLIKELY(c != 0) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vtd, valphah), vtl); const v128_t vb = wasm_f32x4_add(wasm_f32x4_mul(vbd, valphah), vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); v128_t vo = wasm_f32x4_add(wasm_f32x4_mul(vd, valphav), vt); if (c & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vo, 0); vo = wasm_v64x2_shuffle(vo, vo, 1, 1); output += 2; } if (c & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vo, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
2,907
31.674157
75
c
XNNPACK
XNNPACK-master/src/f32-ibilinear/gen/f32-ibilinear-wasmsimd-c8.c
// Auto-generated file. Do not edit! // Template: src/f32-ibilinear/wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/ibilinear.h> void xnn_f32_ibilinear_ukernel__wasmsimd_c8( size_t output_pixels, size_t channels, const float** restrict input, size_t input_offset, const float* restrict weights, float* restrict output, size_t output_increment) XNN_OOB_READS { assert(output_pixels != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); do { const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset); const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset); const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset); const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset); input += 4; const v128_t valphah = wasm_v128_load32_splat(weights); const v128_t valphav = wasm_v128_load32_splat(weights + 1); weights += 2; size_t c = channels; for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) { const v128_t vtl0123 = wasm_v128_load(i0); const v128_t vtr0123 = wasm_v128_load(i1); const v128_t vbl0123 = wasm_v128_load(i2); const v128_t vbr0123 = wasm_v128_load(i3); const v128_t vtl4567 = wasm_v128_load(i0 + 4); const v128_t vtr4567 = wasm_v128_load(i1 + 4); const v128_t vbl4567 = wasm_v128_load(i2 + 4); const v128_t vbr4567 = wasm_v128_load(i3 + 4); i0 += 8; i1 += 8; i2 += 8; i3 += 8; const v128_t vtd0123 = wasm_f32x4_sub(vtr0123, vtl0123); const v128_t vbd0123 = wasm_f32x4_sub(vbr0123, vbl0123); const v128_t vtd4567 = wasm_f32x4_sub(vtr4567, vtl4567); const v128_t vbd4567 = wasm_f32x4_sub(vbr4567, vbl4567); const v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vtd0123, valphah), vtl0123); const v128_t vb0123 = wasm_f32x4_add(wasm_f32x4_mul(vbd0123, valphah), vbl0123); const v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vtd4567, valphah), vtl4567); const v128_t vb4567 = wasm_f32x4_add(wasm_f32x4_mul(vbd4567, valphah), vbl4567); const v128_t vd0123 = wasm_f32x4_sub(vb0123, vt0123); const v128_t vd4567 = wasm_f32x4_sub(vb4567, vt4567); const v128_t vo0123 = wasm_f32x4_add(wasm_f32x4_mul(vd0123, valphav), vt0123); const v128_t vo4567 = wasm_f32x4_add(wasm_f32x4_mul(vd4567, valphav), vt4567); wasm_v128_store(output, vo0123); wasm_v128_store(output + 4, vo4567); output += 8; } for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); i0 += 4; i1 += 4; i2 += 4; i3 += 4; const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vtd, valphah), vtl); const v128_t vb = wasm_f32x4_add(wasm_f32x4_mul(vbd, valphah), vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); const v128_t vo = wasm_f32x4_add(wasm_f32x4_mul(vd, valphav), vt); wasm_v128_store(output, vo); output += 4; } if XNN_UNLIKELY(c != 0) { const v128_t vtl = wasm_v128_load(i0); const v128_t vtr = wasm_v128_load(i1); const v128_t vbl = wasm_v128_load(i2); const v128_t vbr = wasm_v128_load(i3); const v128_t vtd = wasm_f32x4_sub(vtr, vtl); const v128_t vbd = wasm_f32x4_sub(vbr, vbl); const v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vtd, valphah), vtl); const v128_t vb = wasm_f32x4_add(wasm_f32x4_mul(vbd, valphah), vbl); const v128_t vd = wasm_f32x4_sub(vb, vt); v128_t vo = wasm_f32x4_add(wasm_f32x4_mul(vd, valphav), vt); if (c & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vo, 0); vo = wasm_v64x2_shuffle(vo, vo, 1, 1); output += 2; } if (c & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vo, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
4,438
35.089431
86
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x16-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x16__avx_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF)); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c0, vacc0x01234567); vacc0x01234567 = vacc0x89ABCDEF; c0 += 8; } __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
3,076
24.429752
87
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_igemm_minmax_ukernel_1x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w); w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); a0 += 1; k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
2,534
24.867347
106
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x16-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x16__fma3_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c0, vacc0x01234567); vacc0x01234567 = vacc0x89ABCDEF; c0 += 8; } __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
3,051
24.22314
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x4-minmax-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_1x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, vmin); vacc01 = math_max_f32(vacc01, vmin); vacc02 = math_max_f32(vacc02, vmin); vacc03 = math_max_f32(vacc03, vmin); vacc00 = math_min_f32(vacc00, vmax); vacc01 = math_min_f32(vacc01, vmax); vacc02 = math_min_f32(vacc02, vmax); vacc03 = math_min_f32(vacc03, vmax); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,736
22.594828
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x4-minmax-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_1x4__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, vmin); vacc01 = __builtin_wasm_max_f32(vacc01, vmin); vacc02 = __builtin_wasm_max_f32(vacc02, vmin); vacc03 = __builtin_wasm_max_f32(vacc03, vmin); vacc00 = __builtin_wasm_min_f32(vacc00, vmax); vacc01 = __builtin_wasm_min_f32(vacc01, vmax); vacc02 = __builtin_wasm_min_f32(vacc02, vmax); vacc03 = __builtin_wasm_min_f32(vacc03, vmax); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,814
23.267241
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x4-relu-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_1x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, 0.0f); vacc01 = math_max_f32(vacc01, 0.0f); vacc02 = math_max_f32(vacc02, 0.0f); vacc03 = math_max_f32(vacc03, 0.0f); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,485
21.807339
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x4-relu-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_1x4__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f); vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f); vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,523
22.155963
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x4-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_ukernel_1x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const float va0 = *a0++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 4) { c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
2,319
21.095238
76
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-aarch64-neonfma-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__aarch64_neonfma_lane_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); } p -= 1 * sizeof(void*); } while (p != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,311
26.147541
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__avx_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(w); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); w += 8; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,577
22.87037
87
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__fma3_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m256 vacc0x01234567 = _mm256_load_ps(w); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); w += 8; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c0, vacc0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,565
22.759259
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-neon-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); } p -= 1 * sizeof(void*); } while (p != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,399
26.419355
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-neon-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__neon_lane_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1); vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567); } p -= 1 * sizeof(void*); } while (p != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,300
26.057377
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-neonfma-dup-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { float32x4_t vacc0x0123 = vld1q_f32(w); w += 4; float32x4_t vacc0x4567 = vld1q_f32(w); w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x4_t vb0123c0 = vld1q_f32(w); w += 4; const float32x4_t vb4567c0 = vld1q_f32(w); w += 4; const float32x4_t va0c0 = vdupq_lane_f32(va0, 0); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0); const float32x4_t vb0123c1 = vld1q_f32(w); w += 4; const float32x4_t vb4567c1 = vld1q_f32(w); w += 4; const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); } if XNN_UNLIKELY(k != 0) { const float32x4_t va0 = vld1q_dup_f32(a0); const float32x4_t vb0123 = vld1q_f32(w); w += 4; const float32x4_t vb4567 = vld1q_f32(w); w += 4; vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123); vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567); } p -= 1 * sizeof(void*); } while (p != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { vst1q_f32(c0, vacc0x0123); vst1q_f32(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_f32(c0, vacc0x0123); c0 += 4; vacc0x0123 = vacc0x4567; } float32x2_t vacc0x01 = vget_low_f32(vacc0x0123); if (nc & 2) { vst1_f32(c0, vacc0x01); c0 += 2; vacc0x01 = vget_high_f32(vacc0x0123); } if (nc & 1) { vst1_lane_f32(c0, vacc0x01, 0); } nc = 0; } } while (nc != 0); }
3,402
26.443548
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-sse-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__sse_dup( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
4,295
26.716129
82
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-sse-load1.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-load1.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__sse_load1( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
2,698
22.884956
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-sse2-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
4,398
27.380645
116
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,923
24.876106
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,554
29.165563
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmrelaxedsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmrelaxedsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,913
24.787611
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmrelaxedsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmrelaxedsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,520
28.940397
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmsimd-arm-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,838
24.123894
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmsimd-arm-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,445
28.443709
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmsimd-x86-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,842
24.159292
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-minmax-wasmsimd-x86-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,449
28.470199
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-relu-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,662
23.431193
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-relu-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,293
28.210884
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-relu-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,645
23.275229
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-relu-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,252
27.931973
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,504
22.632075
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,135
27.722222
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
2,487
22.471698
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); k -= sizeof(float); } while (k != 0); } p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,094
27.4375
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-minmax-sse.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-shuffle.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8s4__sse( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { __m128 va0 = _mm_loadu_ps(a0); a0 = (const float*) ((uintptr_t) a0 + k); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3)); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c0, vacc0x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c0 += 2; } if (nc & 1) { _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,497
29.88764
128
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-minmax-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,848
31.675978
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-minmax-wasmrelaxedsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8s4__wasmrelaxedsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,796
31.385475
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-minmax-wasmsimd-arm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8s4__wasmsimd_arm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,721
30.96648
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-minmax-wasmsimd-x86.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_1x8s4__wasmsimd_x86( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,725
30.988827
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-relu-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,587
30.931429
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-relu-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_1x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,528
30.594286
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,429
30.569767
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-1x8s4-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_1x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); w += 32; } p -= 1 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c0, vacc0x0123); vacc0x0123 = vacc0x4567; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,370
30.226744
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-2x4-minmax-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_2x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (2 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { c1 = c0; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc12 = vacc02; float vacc13 = vacc03; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } a += 2; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); p -= 2 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, vmin); vacc01 = math_max_f32(vacc01, vmin); vacc02 = math_max_f32(vacc02, vmin); vacc03 = math_max_f32(vacc03, vmin); vacc10 = math_max_f32(vacc10, vmin); vacc11 = math_max_f32(vacc11, vmin); vacc12 = math_max_f32(vacc12, vmin); vacc13 = math_max_f32(vacc13, vmin); vacc00 = math_min_f32(vacc00, vmax); vacc01 = math_min_f32(vacc01, vmax); vacc02 = math_min_f32(vacc02, vmax); vacc03 = math_min_f32(vacc03, vmax); vacc10 = math_min_f32(vacc10, vmax); vacc11 = math_min_f32(vacc11, vmax); vacc12 = math_min_f32(vacc12, vmax); vacc13 = math_min_f32(vacc13, vmax); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,940
24.927632
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-2x4-minmax-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_2x4__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (2 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { c1 = c0; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc12 = vacc02; float vacc13 = vacc03; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } a += 2; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); p -= 2 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, vmin); vacc01 = __builtin_wasm_max_f32(vacc01, vmin); vacc02 = __builtin_wasm_max_f32(vacc02, vmin); vacc03 = __builtin_wasm_max_f32(vacc03, vmin); vacc10 = __builtin_wasm_max_f32(vacc10, vmin); vacc11 = __builtin_wasm_max_f32(vacc11, vmin); vacc12 = __builtin_wasm_max_f32(vacc12, vmin); vacc13 = __builtin_wasm_max_f32(vacc13, vmin); vacc00 = __builtin_wasm_min_f32(vacc00, vmax); vacc01 = __builtin_wasm_min_f32(vacc01, vmax); vacc02 = __builtin_wasm_min_f32(vacc02, vmax); vacc03 = __builtin_wasm_min_f32(vacc03, vmax); vacc10 = __builtin_wasm_min_f32(vacc10, vmax); vacc11 = __builtin_wasm_min_f32(vacc11, vmax); vacc12 = __builtin_wasm_min_f32(vacc12, vmax); vacc13 = __builtin_wasm_min_f32(vacc13, vmax); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
4,098
25.967105
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-2x4-relu-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_2x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (2 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { c1 = c0; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc12 = vacc02; float vacc13 = vacc03; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } a += 2; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); p -= 2 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, 0.0f); vacc01 = math_max_f32(vacc01, 0.0f); vacc02 = math_max_f32(vacc02, 0.0f); vacc03 = math_max_f32(vacc03, 0.0f); vacc10 = math_max_f32(vacc10, 0.0f); vacc11 = math_max_f32(vacc11, 0.0f); vacc12 = math_max_f32(vacc12, 0.0f); vacc13 = math_max_f32(vacc13, 0.0f); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,525
24.007092
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-2x4-relu-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_2x4__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (2 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { c1 = c0; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc12 = vacc02; float vacc13 = vacc03; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } a += 2; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); p -= 2 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f); vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f); vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f); vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f); vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f); vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,603
24.560284
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-2x4-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_ukernel_2x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (2 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { c1 = c0; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc02 = w[2]; float vacc03 = w[3]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc12 = vacc02; float vacc13 = vacc03; w += 4; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } a += 2; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float vb0 = w[0]; const float vb1 = w[1]; const float vb2 = w[2]; const float vb3 = w[3]; w += 4; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc02 = math_muladd_f32(va0, vb2, vacc02); vacc03 = math_muladd_f32(va0, vb3, vacc03); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc12 = math_muladd_f32(va1, vb2, vacc12); vacc13 = math_muladd_f32(va1, vb3, vacc13); k -= sizeof(float); } while (k != 0); p -= 2 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 4) { c1[0] = vacc10; c1[1] = vacc11; c1[2] = vacc12; c1[3] = vacc13; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0[2] = vacc02; c0[3] = vacc03; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c1[0] = vacc10; c1[1] = vacc11; vacc10 = vacc12; c1 += 2; c0[0] = vacc00; c0[1] = vacc01; vacc00 = vacc02; c0 += 2; } if (nc & 1) { c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,195
23.030075
76
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x16-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x16__avx_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc1x89ABCDEF = vacc0x89ABCDEF; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc2x89ABCDEF = vacc0x89ABCDEF; w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF)); vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567)); vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF)); vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567)); vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF)); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,943
30.786096
87
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x16-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x16__fma3_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc1x89ABCDEF = vacc0x89ABCDEF; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc2x89ABCDEF = vacc0x89ABCDEF; w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
5,866
30.374332
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-sse-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__sse_dup( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); __m128 vacc1x0123 = vacc0x0123; __m128 vacc1x4567 = vacc0x4567; __m128 vacc2x0123 = vacc0x0123; __m128 vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va1 = _mm_loadu_ps(a1); a1 += 4; const __m128 va2 = _mm_loadu_ps(a2); a2 += 4; const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0)); const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,564
34.539419
82
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-sse-load1.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-load1.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__sse_load1( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); __m128 vacc1x0123 = vacc0x0123; __m128 vacc1x4567 = vacc0x4567; __m128 vacc2x0123 = vacc0x0123; __m128 vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
4,931
27.842105
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-sse2-dup.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-dup.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); __m128 vacc1x0123 = vacc0x0123; __m128 vacc1x4567 = vacc0x4567; __m128 vacc2x0123 = vacc0x0123; __m128 vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(a0); a0 += 4; const __m128 va1 = _mm_loadu_ps(a1); a1 += 4; const __m128 va2 = _mm_loadu_ps(a2); a2 += 4; const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0))); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0)); const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1))); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2))); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2)); const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const __m128 vb0123 = _mm_load_ps(w); const __m128 vb4567 = _mm_load_ps(w + 4); w += 8; const __m128 va0 = _mm_load1_ps(a0); a0 += 1; const __m128 va1 = _mm_load1_ps(a1); a1 += 1; const __m128 va2 = _mm_load1_ps(a2); a2 += 1; vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567)); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
8,871
35.813278
116
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,462
30.947368
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
9,165
37.675105
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmrelaxedsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmrelaxedsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,440
30.818713
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmrelaxedsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmrelaxedsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
9,071
37.278481
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmsimd-arm-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,221
29.538012
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-relu-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,459
35.943231
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmsimd-arm-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,852
36.35443
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmsimd-x86-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
5,233
29.608187
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-minmax-wasmsimd-x86-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,864
36.405063
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-relu-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,857
28.803681
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-relu-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,560
36.384279
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-relu-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,828
28.625767
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-wasmrelaxedsimd-fma-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,491
27.794872
80
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-wasmrelaxedsimd-fma-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8__wasmrelaxedsimd_fma_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,194
35.914414
84
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-wasmsimd-loadsplat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-loadsplat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8__wasmsimd_loadsplat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
4,462
27.608974
77
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8-wasmsimd-splat.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-splat.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8__wasmsimd_splat( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { const v128_t va0 = wasm_v128_load(a0); a0 += 4; const v128_t va1 = wasm_v128_load(a1); a1 += 4; const v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0); const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0); const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567); const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1); const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567); const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2); const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567); const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3); const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3); const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { const v128_t vb0123 = wasm_v128_load(w); const v128_t vb4567 = wasm_v128_load(w + 4); w += 8; const v128_t va0 = wasm_v128_load32_splat(a0); a0 += 1; const v128_t va1 = wasm_v128_load32_splat(a1); a1 += 1; const v128_t va2 = wasm_v128_load32_splat(a2); a2 += 1; vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567); k -= sizeof(float); } while (k != 0); } p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
8,093
35.459459
81
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-minmax-sse.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/sse-shuffle.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8s4__sse( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128 vacc0x0123 = _mm_load_ps(w); __m128 vacc0x4567 = _mm_load_ps(w + 4); __m128 vacc1x0123 = vacc0x0123; __m128 vacc1x4567 = vacc0x4567; __m128 vacc2x0123 = vacc0x0123; __m128 vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { __m128 va0 = _mm_loadu_ps(a0); a0 += 4; __m128 va1 = _mm_loadu_ps(a1); a1 += 4; __m128 va2 = _mm_loadu_ps(a2); a2 += 4; const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3)); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { __m128 va0 = _mm_loadu_ps(a0); a0 = (const float*) ((uintptr_t) a0 + k); __m128 va1 = _mm_loadu_ps(a1); a1 = (const float*) ((uintptr_t) a1 + k); __m128 va2 = _mm_loadu_ps(a2); a2 = (const float*) ((uintptr_t) a2 + k); const __m128 vb0123c0 = _mm_load_ps(w + 0); const __m128 vb4567c0 = _mm_load_ps(w + 4); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c1 = _mm_load_ps(w + 8); const __m128 vb4567c1 = _mm_load_ps(w + 12); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c2 = _mm_load_ps(w + 16); const __m128 vb4567c2 = _mm_load_ps(w + 20); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2)); va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1)); va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1)); va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128 vb0123c3 = _mm_load_ps(w + 24); const __m128 vb4567c3 = _mm_load_ps(w + 28); vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3)); vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3)); vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3)); vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3)); vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3)); vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3)); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); const __m128 vmax = _mm_load_ps(params->sse.max); vacc0x0123 = _mm_min_ps(vacc0x0123, vmax); vacc1x0123 = _mm_min_ps(vacc1x0123, vmax); vacc2x0123 = _mm_min_ps(vacc2x0123, vmax); vacc0x4567 = _mm_min_ps(vacc0x4567, vmax); vacc1x4567 = _mm_min_ps(vacc1x4567, vmax); vacc2x4567 = _mm_min_ps(vacc2x4567, vmax); const __m128 vmin = _mm_load_ps(params->sse.min); vacc0x0123 = _mm_max_ps(vacc0x0123, vmin); vacc1x0123 = _mm_max_ps(vacc1x0123, vmin); vacc2x0123 = _mm_max_ps(vacc2x0123, vmin); vacc0x4567 = _mm_max_ps(vacc0x4567, vmin); vacc1x4567 = _mm_max_ps(vacc1x4567, vmin); vacc2x4567 = _mm_max_ps(vacc2x4567, vmin); if XNN_LIKELY(nc >= 8) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm_storeu_ps(c0, vacc0x0123); _mm_storeu_ps(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
11,612
40.475
128
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-minmax-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
12,345
42.935943
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-minmax-wasmrelaxedsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8s4__wasmrelaxedsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567); vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
12,197
42.409253
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-minmax-wasmsimd-arm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8s4__wasmsimd_arm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,978
41.629893
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-minmax-wasmsimd-x86.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_3x8s4__wasmsimd_x86( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123); vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123); vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123); vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567); vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567); vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567); vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123); vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123); vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123); vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567); vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567); vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,990
41.672598
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-relu-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,740
42.007326
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-relu-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_relu_ukernel_3x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); const v128_t vzero = wasm_i32x4_const_splat(0); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero); vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero); vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero); vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,585
41.43956
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-wasmrelaxedsimd-fma.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8s4__wasmrelaxedsimd_fma( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123); vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123); vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123); vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567); vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567); vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,374
41.763158
132
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-3x8s4-wasmsimd.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/wasmsimd-s4.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_ukernel_3x8s4__wasmsimd( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc0x4567 = wasm_v128_load(w + 4); v128_t vacc1x0123 = vacc0x0123; v128_t vacc1x4567 = vacc0x4567; v128_t vacc2x0123 = vacc0x0123; v128_t vacc2x4567 = vacc0x4567; w += 8; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 4 * sizeof(float)) { v128_t va0 = wasm_v128_load(a0); a0 += 4; v128_t va1 = wasm_v128_load(a1); a1 += 4; v128_t va2 = wasm_v128_load(a2); a2 += 4; const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567); w += 32; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { v128_t va0 = wasm_v128_load(a0); a0 = (const float*) ((uintptr_t) a0 + k); v128_t va1 = wasm_v128_load(a1); a1 = (const float*) ((uintptr_t) a1 + k); v128_t va2 = wasm_v128_load(a2); a2 = (const float*) ((uintptr_t) a2 + k); const v128_t vzero = wasm_f32x4_const_splat(0.0f); const v128_t vb0123c0 = wasm_v128_load(w + 0); const v128_t vb4567c0 = wasm_v128_load(w + 4); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c1 = wasm_v128_load(w + 8); const v128_t vb4567c1 = wasm_v128_load(w + 12); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c2 = wasm_v128_load(w + 16); const v128_t vb4567c2 = wasm_v128_load(w + 20); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567); va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0); va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0); va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0); const v128_t vb0123c3 = wasm_v128_load(w + 24); const v128_t vb4567c3 = wasm_v128_load(w + 28); vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123); vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123); vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123); vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567); vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567); vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567); w += 32; } p -= 3 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 8) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c2 + 4, vacc2x4567); c2 = (float*) ((uintptr_t) c2 + cn_stride); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c1 + 4, vacc1x4567); c1 = (float*) ((uintptr_t) c1 + cn_stride); wasm_v128_store(c0, vacc0x0123); wasm_v128_store(c0 + 4, vacc0x4567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { wasm_v128_store(c2, vacc2x0123); wasm_v128_store(c1, vacc1x0123); wasm_v128_store(c0, vacc0x0123); vacc2x0123 = vacc2x4567; vacc1x0123 = vacc1x4567; vacc0x0123 = vacc0x4567; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { wasm_v128_store64_lane(c2, vacc2x0123, 0); wasm_v128_store64_lane(c1, vacc1x0123, 0); wasm_v128_store64_lane(c0, vacc0x0123, 0); vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1); vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1); vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1); c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { wasm_v128_store32_lane(c2, vacc2x0123, 0); wasm_v128_store32_lane(c1, vacc1x0123, 0); wasm_v128_store32_lane(c0, vacc0x0123, 0); } nc = 0; } } while (nc != 0); }
11,219
41.180451
129
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x16-minmax-avx-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_4x16__avx_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc1x89ABCDEF = vacc0x89ABCDEF; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc2x89ABCDEF = vacc0x89ABCDEF; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc3x89ABCDEF = vacc0x89ABCDEF; w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567)); vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF)); vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567)); vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF)); vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567)); vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF)); vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567)); vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF)); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c3, vacc3x01234567); _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c3, vacc3x01234567); _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc3x01234567 = vacc3x89ABCDEF; vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c3 += 8; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
7,377
32.536364
87
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x16-minmax-avx512f-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx512-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_igemm_minmax_ukernel_4x16__avx512f_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w); __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF; __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF; w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m512 vb0123456789ABCDEF = _mm512_load_ps(w); w += 16; const __m512 va0 = _mm512_set1_ps(*a0); vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF); const __m512 va1 = _mm512_set1_ps(*a1); vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF); const __m512 va2 = _mm512_set1_ps(*a2); vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF); const __m512 va3 = _mm512_set1_ps(*a3); vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF); a0 += 1; a1 += 1; a2 += 1; a3 += 1; k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const __m512 vmin = _mm512_set1_ps(params->scalar.min); vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF); const __m512 vmax = _mm512_set1_ps(params->scalar.max); vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF); vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF); if XNN_LIKELY(nc >= 16) { _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 15) { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1))); _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF); _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF); } nc = 0; } } while (nc != 0); }
4,978
31.756579
106
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x16-minmax-fma3-broadcast.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/avx-broadcast.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_4x16__fma3_broadcast( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m256 vacc0x01234567 = _mm256_load_ps(w); __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8); __m256 vacc1x01234567 = vacc0x01234567; __m256 vacc1x89ABCDEF = vacc0x89ABCDEF; __m256 vacc2x01234567 = vacc0x01234567; __m256 vacc2x89ABCDEF = vacc0x89ABCDEF; __m256 vacc3x01234567 = vacc0x01234567; __m256 vacc3x89ABCDEF = vacc0x89ABCDEF; w += 16; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m256 vb01234567 = _mm256_load_ps(w); const __m256 vb89ABCDEF = _mm256_load_ps(w + 8); w += 16; const __m256 va0 = _mm256_broadcast_ss(a0); a0 += 1; const __m256 va1 = _mm256_broadcast_ss(a1); a1 += 1; const __m256 va2 = _mm256_broadcast_ss(a2); a2 += 1; const __m256 va3 = _mm256_broadcast_ss(a3); a3 += 1; vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567); vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF); vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567); vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF); vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567); vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF); vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567); vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const __m256 vmin = _mm256_load_ps(params->avx.min); vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567); vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567); vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567); vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567); vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF); vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF); const __m256 vmax = _mm256_load_ps(params->avx.max); vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567); vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567); vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567); vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567); vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF); vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF); vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF); vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF); if XNN_LIKELY(nc >= 16) { _mm256_storeu_ps(c3, vacc3x01234567); _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF); c3 = (float*) ((uintptr_t) c3 + cn_stride); _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF); c2 = (float*) ((uintptr_t) c2 + cn_stride); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF); c1 = (float*) ((uintptr_t) c1 + cn_stride); _mm256_storeu_ps(c0, vacc0x01234567); _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 16; } else { if (nc & 8) { _mm256_storeu_ps(c3, vacc3x01234567); _mm256_storeu_ps(c2, vacc2x01234567); _mm256_storeu_ps(c1, vacc1x01234567); _mm256_storeu_ps(c0, vacc0x01234567); vacc3x01234567 = vacc3x89ABCDEF; vacc2x01234567 = vacc2x89ABCDEF; vacc1x01234567 = vacc1x89ABCDEF; vacc0x01234567 = vacc0x89ABCDEF; c3 += 8; c2 += 8; c1 += 8; c0 += 8; } __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567); __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567); if (nc & 4) { _mm_storeu_ps(c3, vacc3x0123); _mm_storeu_ps(c2, vacc2x0123); _mm_storeu_ps(c1, vacc1x0123); _mm_storeu_ps(c0, vacc0x0123); vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1); vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1); vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1); vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1); c3 += 4; c2 += 4; c1 += 4; c0 += 4; } if (nc & 2) { _mm_storel_pi((__m64*) c3, vacc3x0123); _mm_storel_pi((__m64*) c2, vacc2x0123); _mm_storel_pi((__m64*) c1, vacc1x0123); _mm_storel_pi((__m64*) c0, vacc0x0123); vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123); vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123); vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123); vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123); c3 += 2; c2 += 2; c1 += 2; c0 += 2; } if (nc & 1) { _mm_store_ss(c3, vacc3x0123); _mm_store_ss(c2, vacc2x0123); _mm_store_ss(c1, vacc1x0123); _mm_store_ss(c0, vacc0x0123); } nc = 0; } } while (nc != 0); }
7,274
32.068182
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-minmax-aarch64-neonfma-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/MRx2-neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_4x2__aarch64_neonfma_lane_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { float32x2_t vacc0x01 = vld1_f32(w); w += 2; float32x2_t vacc1x01 = vacc0x01; float32x2_t vacc2x01 = vacc0x01; float32x2_t vacc3x01 = vacc0x01; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t vb01c0 = vld1_f32(w); w += 2; #if XNN_ARCH_ARM64 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0); vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0); vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0); vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0); #else const float32x2_t va0c0 = vdup_lane_f32(va0, 0); const float32x2_t va1c0 = vdup_lane_f32(va1, 0); const float32x2_t va2c0 = vdup_lane_f32(va2, 0); const float32x2_t va3c0 = vdup_lane_f32(va3, 0); vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0); vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0); vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0); vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0); #endif const float32x2_t vb01c1 = vld1_f32(w); w += 2; #if XNN_ARCH_ARM64 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1); vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1); vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1); vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1); #else const float32x2_t va0c1 = vdup_lane_f32(va0, 1); const float32x2_t va1c1 = vdup_lane_f32(va1, 1); const float32x2_t va2c1 = vdup_lane_f32(va2, 1); const float32x2_t va3c1 = vdup_lane_f32(va3, 1); vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1); vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1); vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1); vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1); #endif } if XNN_UNLIKELY(k != 0) { const float32x2_t va0 = vld1_dup_f32(a0); const float32x2_t va1 = vld1_dup_f32(a1); const float32x2_t va2 = vld1_dup_f32(a2); const float32x2_t va3 = vld1_dup_f32(a3); const float32x2_t vb01 = vld1_f32(w); w += 2; vacc0x01 = vfma_f32(vacc0x01, va0, vb01); vacc1x01 = vfma_f32(vacc1x01, va1, vb01); vacc2x01 = vfma_f32(vacc2x01, va2, vb01); vacc3x01 = vfma_f32(vacc3x01, va3, vb01); } p -= 4 * sizeof(void*); } while (p != 0); const float32x2_t vmax = vld1_dup_f32(&params->scalar.max); vacc0x01 = vmin_f32(vacc0x01, vmax); vacc1x01 = vmin_f32(vacc1x01, vmax); vacc2x01 = vmin_f32(vacc2x01, vmax); vacc3x01 = vmin_f32(vacc3x01, vmax); const float32x2_t vmin = vld1_dup_f32(&params->scalar.min); vacc0x01 = vmax_f32(vacc0x01, vmin); vacc1x01 = vmax_f32(vacc1x01, vmin); vacc2x01 = vmax_f32(vacc2x01, vmin); vacc3x01 = vmax_f32(vacc3x01, vmin); if XNN_LIKELY(nc >= 2) { vst1_f32(c3, vacc3x01); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1_f32(c2, vacc2x01); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1_f32(c1, vacc1x01); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1_f32(c0, vacc0x01); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { assert(nc == 1); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); nc = 0; } } while (nc != 0); }
5,871
31.441989
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-minmax-neon-lane-ld64.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/MRx2-neon-ld64.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> void xnn_f32_igemm_minmax_ukernel_4x2__neon_lane_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { float32x2_t vacc0x01 = vld1_f32(w); w += 2; float32x2_t vacc1x01 = vacc0x01; float32x2_t vacc2x01 = vacc0x01; float32x2_t vacc3x01 = vacc0x01; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { const float32x2_t va0 = vld1_f32(a0); a0 += 2; const float32x2_t va1 = vld1_f32(a1); a1 += 2; const float32x2_t va2 = vld1_f32(a2); a2 += 2; const float32x2_t va3 = vld1_f32(a3); a3 += 2; const float32x2_t vb01c0 = vld1_f32(w); w += 2; vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0); vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0); vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0); vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0); const float32x2_t vb01c1 = vld1_f32(w); w += 2; vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1); vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1); vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1); vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1); } if XNN_UNLIKELY(k != 0) { const float32x2_t va0 = vld1_dup_f32(a0); const float32x2_t va1 = vld1_dup_f32(a1); const float32x2_t va2 = vld1_dup_f32(a2); const float32x2_t va3 = vld1_dup_f32(a3); const float32x2_t vb01 = vld1_f32(w); w += 2; vacc0x01 = vmla_f32(vacc0x01, va0, vb01); vacc1x01 = vmla_f32(vacc1x01, va1, vb01); vacc2x01 = vmla_f32(vacc2x01, va2, vb01); vacc3x01 = vmla_f32(vacc3x01, va3, vb01); } p -= 4 * sizeof(void*); } while (p != 0); const float32x2_t vmax = vld1_dup_f32(&params->scalar.max); vacc0x01 = vmin_f32(vacc0x01, vmax); vacc1x01 = vmin_f32(vacc1x01, vmax); vacc2x01 = vmin_f32(vacc2x01, vmax); vacc3x01 = vmin_f32(vacc3x01, vmax); const float32x2_t vmin = vld1_dup_f32(&params->scalar.min); vacc0x01 = vmax_f32(vacc0x01, vmin); vacc1x01 = vmax_f32(vacc1x01, vmin); vacc2x01 = vmax_f32(vacc2x01, vmin); vacc3x01 = vmax_f32(vacc3x01, vmin); if XNN_LIKELY(nc >= 2) { vst1_f32(c3, vacc3x01); c3 = (float*) ((uintptr_t) c3 + cn_stride); vst1_f32(c2, vacc2x01); c2 = (float*) ((uintptr_t) c2 + cn_stride); vst1_f32(c1, vacc1x01); c1 = (float*) ((uintptr_t) c1 + cn_stride); vst1_f32(c0, vacc0x01); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { assert(nc == 1); vst1_lane_f32(c3, vacc3x01, 0); vst1_lane_f32(c2, vacc2x01, 0); vst1_lane_f32(c1, vacc1x01, 0); vst1_lane_f32(c0, vacc0x01, 0); nc = 0; } } while (nc != 0); }
4,812
29.27044
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-minmax-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_4x2__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc20 = vacc00; float vacc21 = vacc01; float vacc30 = vacc00; float vacc31 = vacc01; w += 2; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float va2 = *a2++; const float va3 = *a3++; const float vb0 = w[0]; const float vb1 = w[1]; w += 2; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc20 = math_muladd_f32(va2, vb0, vacc20); vacc21 = math_muladd_f32(va2, vb1, vacc21); vacc30 = math_muladd_f32(va3, vb0, vacc30); vacc31 = math_muladd_f32(va3, vb1, vacc31); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, vmin); vacc01 = math_max_f32(vacc01, vmin); vacc10 = math_max_f32(vacc10, vmin); vacc11 = math_max_f32(vacc11, vmin); vacc20 = math_max_f32(vacc20, vmin); vacc21 = math_max_f32(vacc21, vmin); vacc30 = math_max_f32(vacc30, vmin); vacc31 = math_max_f32(vacc31, vmin); vacc00 = math_min_f32(vacc00, vmax); vacc01 = math_min_f32(vacc01, vmax); vacc10 = math_min_f32(vacc10, vmax); vacc11 = math_min_f32(vacc11, vmax); vacc20 = math_min_f32(vacc20, vmax); vacc21 = math_min_f32(vacc21, vmax); vacc30 = math_min_f32(vacc30, vmax); vacc31 = math_min_f32(vacc31, vmax); if XNN_LIKELY(nc >= 2) { c3[0] = vacc30; c3[1] = vacc31; c3 = (float*) ((uintptr_t) c3 + cn_stride); c2[0] = vacc20; c2[1] = vacc21; c2 = (float*) ((uintptr_t) c2 + cn_stride); c1[0] = vacc10; c1[1] = vacc11; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = vacc30; c2[0] = vacc20; c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
4,435
26.04878
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-minmax-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_minmax_ukernel_4x2__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } const float vmin = params->scalar.min; const float vmax = params->scalar.max; do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc20 = vacc00; float vacc21 = vacc01; float vacc30 = vacc00; float vacc31 = vacc01; w += 2; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float va2 = *a2++; const float va3 = *a3++; const float vb0 = w[0]; const float vb1 = w[1]; w += 2; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc20 = math_muladd_f32(va2, vb0, vacc20); vacc21 = math_muladd_f32(va2, vb1, vacc21); vacc30 = math_muladd_f32(va3, vb0, vacc30); vacc31 = math_muladd_f32(va3, vb1, vacc31); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, vmin); vacc01 = __builtin_wasm_max_f32(vacc01, vmin); vacc10 = __builtin_wasm_max_f32(vacc10, vmin); vacc11 = __builtin_wasm_max_f32(vacc11, vmin); vacc20 = __builtin_wasm_max_f32(vacc20, vmin); vacc21 = __builtin_wasm_max_f32(vacc21, vmin); vacc30 = __builtin_wasm_max_f32(vacc30, vmin); vacc31 = __builtin_wasm_max_f32(vacc31, vmin); vacc00 = __builtin_wasm_min_f32(vacc00, vmax); vacc01 = __builtin_wasm_min_f32(vacc01, vmax); vacc10 = __builtin_wasm_min_f32(vacc10, vmax); vacc11 = __builtin_wasm_min_f32(vacc11, vmax); vacc20 = __builtin_wasm_min_f32(vacc20, vmax); vacc21 = __builtin_wasm_min_f32(vacc21, vmax); vacc30 = __builtin_wasm_min_f32(vacc30, vmax); vacc31 = __builtin_wasm_min_f32(vacc31, vmax); if XNN_LIKELY(nc >= 2) { c3[0] = vacc30; c3[1] = vacc31; c3 = (float*) ((uintptr_t) c3 + cn_stride); c2[0] = vacc20; c2[1] = vacc21; c2 = (float*) ((uintptr_t) c2 + cn_stride); c1[0] = vacc10; c1[1] = vacc11; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = vacc30; c2[0] = vacc20; c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
4,593
27.012195
75
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-relu-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_4x2__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc20 = vacc00; float vacc21 = vacc01; float vacc30 = vacc00; float vacc31 = vacc01; w += 2; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float va2 = *a2++; const float va3 = *a3++; const float vb0 = w[0]; const float vb1 = w[1]; w += 2; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc20 = math_muladd_f32(va2, vb0, vacc20); vacc21 = math_muladd_f32(va2, vb1, vacc21); vacc30 = math_muladd_f32(va3, vb0, vacc30); vacc31 = math_muladd_f32(va3, vb1, vacc31); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc00 = math_max_f32(vacc00, 0.0f); vacc01 = math_max_f32(vacc01, 0.0f); vacc10 = math_max_f32(vacc10, 0.0f); vacc11 = math_max_f32(vacc11, 0.0f); vacc20 = math_max_f32(vacc20, 0.0f); vacc21 = math_max_f32(vacc21, 0.0f); vacc30 = math_max_f32(vacc30, 0.0f); vacc31 = math_max_f32(vacc31, 0.0f); if XNN_LIKELY(nc >= 2) { c3[0] = vacc30; c3[1] = vacc31; c3 = (float*) ((uintptr_t) c3 + cn_stride); c2[0] = vacc20; c2[1] = vacc21; c2 = (float*) ((uintptr_t) c2 + cn_stride); c1[0] = vacc10; c1[1] = vacc11; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = vacc30; c2[0] = vacc20; c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
4,020
25.281046
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-relu-wasm.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_relu_ukernel_4x2__wasm( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc20 = vacc00; float vacc21 = vacc01; float vacc30 = vacc00; float vacc31 = vacc01; w += 2; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float va2 = *a2++; const float va3 = *a3++; const float vb0 = w[0]; const float vb1 = w[1]; w += 2; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc20 = math_muladd_f32(va2, vb0, vacc20); vacc21 = math_muladd_f32(va2, vb1, vacc21); vacc30 = math_muladd_f32(va3, vb0, vacc30); vacc31 = math_muladd_f32(va3, vb1, vacc31); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f); vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f); vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f); vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f); vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f); vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f); vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f); vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f); if XNN_LIKELY(nc >= 2) { c3[0] = vacc30; c3[1] = vacc31; c3 = (float*) ((uintptr_t) c3 + cn_stride); c2[0] = vacc20; c2[1] = vacc21; c2 = (float*) ((uintptr_t) c2 + cn_stride); c1[0] = vacc10; c1[1] = vacc11; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = vacc30; c2[0] = vacc20; c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
4,098
25.79085
73
c
XNNPACK
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2-scalar.c
// Auto-generated file. Do not edit! // Template: src/f32-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_f32_igemm_ukernel_4x2__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const float** restrict a, const float* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const float* zero, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); float* c0 = c; float* c1 = (float*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } float* c2 = (float*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { float vacc00 = w[0]; float vacc01 = w[1]; float vacc10 = vacc00; float vacc11 = vacc01; float vacc20 = vacc00; float vacc21 = vacc01; float vacc30 = vacc00; float vacc31 = vacc01; w += 2; size_t p = ks; do { const float* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const float*) ((uintptr_t) a0 + a_offset); } const float* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const float*) ((uintptr_t) a1 + a_offset); } const float* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const float*) ((uintptr_t) a2 + a_offset); } const float* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const float*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const float va0 = *a0++; const float va1 = *a1++; const float va2 = *a2++; const float va3 = *a3++; const float vb0 = w[0]; const float vb1 = w[1]; w += 2; vacc00 = math_muladd_f32(va0, vb0, vacc00); vacc01 = math_muladd_f32(va0, vb1, vacc01); vacc10 = math_muladd_f32(va1, vb0, vacc10); vacc11 = math_muladd_f32(va1, vb1, vacc11); vacc20 = math_muladd_f32(va2, vb0, vacc20); vacc21 = math_muladd_f32(va2, vb1, vacc21); vacc30 = math_muladd_f32(va3, vb0, vacc30); vacc31 = math_muladd_f32(va3, vb1, vacc31); k -= sizeof(float); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); if XNN_LIKELY(nc >= 2) { c3[0] = vacc30; c3[1] = vacc31; c3 = (float*) ((uintptr_t) c3 + cn_stride); c2[0] = vacc20; c2[1] = vacc21; c2 = (float*) ((uintptr_t) c2 + cn_stride); c1[0] = vacc10; c1[1] = vacc11; c1 = (float*) ((uintptr_t) c1 + cn_stride); c0[0] = vacc00; c0[1] = vacc01; c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const float**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = vacc30; c2[0] = vacc20; c1[0] = vacc10; c0[0] = vacc00; } nc = 0; } } while (nc != 0); }
3,690
24.455172
76
c