repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-scalar-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_s8_ibilinear_ukernel__scalar_c4(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
for (; c >= 4 * sizeof(int8_t); c -= 4 * sizeof(int8_t)) {
const int32_t vtl0 = (int32_t) i0[0];
const int32_t vtr0 = (int32_t) i1[0];
const int32_t vbl0 = (int32_t) i2[0];
const int32_t vbr0 = (int32_t) i3[0];
const int32_t vtl1 = (int32_t) i0[1];
const int32_t vtr1 = (int32_t) i1[1];
const int32_t vbl1 = (int32_t) i2[1];
const int32_t vbr1 = (int32_t) i3[1];
const int32_t vtl2 = (int32_t) i0[2];
const int32_t vtr2 = (int32_t) i1[2];
const int32_t vbl2 = (int32_t) i2[2];
const int32_t vbr2 = (int32_t) i3[2];
const int32_t vtl3 = (int32_t) i0[3];
const int32_t vtr3 = (int32_t) i1[3];
const int32_t vbl3 = (int32_t) i2[3];
const int32_t vbr3 = (int32_t) i3[3];
i0 += 4;
i1 += 4;
i2 += 4;
i3 += 4;
const int32_t vtd0 = vtr0 - vtl0;
const int32_t vbd0 = vbr0 - vbl0;
const int32_t vtd1 = vtr1 - vtl1;
const int32_t vbd1 = vbr1 - vbl1;
const int32_t vtd2 = vtr2 - vtl2;
const int32_t vbd2 = vbr2 - vbl2;
const int32_t vtd3 = vtr3 - vtl3;
const int32_t vbd3 = vbr3 - vbl3;
const int32_t vt0 = (int32_t) ((uint32_t) vtl0 << 11) + vtd0 * valphah;
const int32_t vb0 = (int32_t) ((uint32_t) vbl0 << 11) + vbd0 * valphah;
const int32_t vt1 = (int32_t) ((uint32_t) vtl1 << 11) + vtd1 * valphah;
const int32_t vb1 = (int32_t) ((uint32_t) vbl1 << 11) + vbd1 * valphah;
const int32_t vt2 = (int32_t) ((uint32_t) vtl2 << 11) + vtd2 * valphah;
const int32_t vb2 = (int32_t) ((uint32_t) vbl2 << 11) + vbd2 * valphah;
const int32_t vt3 = (int32_t) ((uint32_t) vtl3 << 11) + vtd3 * valphah;
const int32_t vb3 = (int32_t) ((uint32_t) vbl3 << 11) + vbd3 * valphah;
const int32_t vd0 = vb0 - vt0;
const int32_t vd1 = vb1 - vt1;
const int32_t vd2 = vb2 - vt2;
const int32_t vd3 = vb3 - vt3;
const int32_t vacc0 = (int32_t) ((uint32_t) vt0 << 11) + vd0 * valphav;
const int32_t vacc1 = (int32_t) ((uint32_t) vt1 << 11) + vd1 * valphav;
const int32_t vacc2 = (int32_t) ((uint32_t) vt2 << 11) + vd2 * valphav;
const int32_t vacc3 = (int32_t) ((uint32_t) vt3 << 11) + vd3 * valphav;
const int32_t vo0 = math_asr_s32(vacc0 + vrounding, 22);
const int32_t vo1 = math_asr_s32(vacc1 + vrounding, 22);
const int32_t vo2 = math_asr_s32(vacc2 + vrounding, 22);
const int32_t vo3 = math_asr_s32(vacc3 + vrounding, 22);
output[0] = (int8_t) vo0;
output[1] = (int8_t) vo1;
output[2] = (int8_t) vo2;
output[3] = (int8_t) vo3;
output += 4;
}
for (; c >= sizeof(int8_t); c -= sizeof(int8_t)) {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 4,633
| 35.488189
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_s8_ibilinear_ukernel__sse2_c16(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
valphav = _mm_unpacklo_epi64(valphav, valphav);
valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(int8_t); c -= 16 * sizeof(int8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vtl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
__m128i vtr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
__m128i vbl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
__m128i vbr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
vtl89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vtl89ABCDEF, vtl89ABCDEF), 8);
vtr89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vtr89ABCDEF, vtr89ABCDEF), 8);
vbl89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vbl89ABCDEF, vbl89ABCDEF), 8);
vbr89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vbr89ABCDEF, vbr89ABCDEF), 8);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
__m128i vacc89AB = _mm_slli_epi32(_mm_mulhi_epu16(vd89AB, valphav), 16);
__m128i vaccCDEF = _mm_slli_epi32(_mm_mulhi_epu16(vdCDEF, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc89AB = _mm_add_epi16(_mm_mullo_epi16(vd89AB, valphav), vacc89AB);
vaccCDEF = _mm_add_epi16(_mm_mullo_epi16(vdCDEF, valphav), vaccCDEF);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
vacc89AB = _mm_srai_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
vaccCDEF = _mm_srai_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
const __m128i vo0123456789ABCDEF = _mm_packs_epi16(vacc01234567, vacc89ABCDEF);
_mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
if (c & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vo0123);
output += 2;
vo0123 >>= 16;
}
if (c & (1 * sizeof(int8_t))) {
*output++ = (uint8_t) vo0123;
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,240
| 47.306604
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_s8_ibilinear_ukernel__sse2_c8(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
valphav = _mm_unpacklo_epi64(valphav, valphav);
valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
if (c & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vo0123);
output += 2;
vo0123 >>= 16;
}
if (c & (1 * sizeof(int8_t))) {
*output++ = (uint8_t) vo0123;
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 6,265
| 41.62585
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_s8_ibilinear_ukernel__sse41_c16(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_srli_epi32(valpha, 16);
valphav = _mm_shuffle_epi32(valphav, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_blend_epi16(valphah, _mm_sub_epi16(_mm_set1_epi32(0x08000000), valphah), 0xAA);
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(int8_t); c -= 16 * sizeof(int8_t)) {
const __m128i vtl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vtl89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vtr89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vbl89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
const __m128i vbr89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
__m128i vacc89AB = _mm_mullo_epi32(vd89AB, valphav);
__m128i vaccCDEF = _mm_mullo_epi32(vdCDEF, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
vacc89AB = _mm_srai_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
vaccCDEF = _mm_srai_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
const __m128i vo0123456789ABCDEF = _mm_packs_epi16(vacc01234567, vacc89ABCDEF);
_mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
const __m128i vtl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vtr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
const __m128i vbl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
const __m128i vbr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m128i vtl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
if (c & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vo01234567, 0));
output += 2;
vo01234567 = _mm_srli_epi32(vo01234567, 16);
}
if (c & (1 * sizeof(int8_t))) {
*output++ = (uint8_t) _mm_extract_epi8(vo01234567, 0);
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,496
| 45.431694
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_s8_ibilinear_ukernel__sse41_c8(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_srli_epi32(valpha, 16);
valphav = _mm_shuffle_epi32(valphav, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_blend_epi16(valphah, _mm_sub_epi16(_mm_set1_epi32(0x08000000), valphah), 0xAA);
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
const __m128i vtl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vtr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
const __m128i vbl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
const __m128i vbr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m128i vtl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
if (c & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vo01234567, 0));
output += 2;
vo01234567 = _mm_srli_epi32(vo01234567, 16);
}
if (c & (1 * sizeof(int8_t))) {
*output++ = (uint8_t) _mm_extract_epi8(vo01234567, 0);
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,378
| 40.061069
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-wasmsimd-dot16x2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_s8_ibilinear_ukernel__wasmsimd_dot16x2_c16(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const v128_t valphah =
wasm_i16x8_add(
wasm_v128_xor(
wasm_v128_load16_splat(weights),
wasm_i32x4_const_splat(0xFFFF0000)),
wasm_i32x4_const_splat(0x08010000));
const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
weights += 2;
const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(int8_t); c -= 16 * sizeof(int8_t)) {
const v128_t vtl01234567 = wasm_i16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_i16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_i16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_i16x8_load8x8(i3);
const v128_t vtl89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vtr89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vbl89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
const v128_t vbr89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vdr89ABCDEF = wasm_i16x8_sub(vbr89ABCDEF, vtr89ABCDEF);
const v128_t vt89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl89ABCDEF = wasm_i16x8_sub(vbl89ABCDEF, vtl89ABCDEF);
const v128_t vtCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
v128_t vacc89AB = wasm_i32x4_mul(vd89AB, valphav);
v128_t vaccCDEF = wasm_i32x4_mul(vdCDEF, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc89AB = wasm_i32x4_add(wasm_i32x4_shl(vt89AB, 11), vacc89AB);
vaccCDEF = wasm_i32x4_add(wasm_i32x4_shl(vtCDEF, 11), vaccCDEF);
vacc0123 = wasm_i32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_i32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
vacc89AB = wasm_i32x4_shr(wasm_i16x8_add(vacc89AB, vrounding), 22);
vaccCDEF = wasm_i32x4_shr(wasm_i16x8_add(vaccCDEF, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vo0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
wasm_v128_store(output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
const v128_t vtl01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vtr01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
const v128_t vbl01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
const v128_t vbr01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_i32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_i32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vo01234567 = wasm_i8x16_narrow_i16x8(vacc01234567, vacc01234567);
wasm_v128_store64_lane(output, vo01234567, 0);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vtl01234567 = wasm_i16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_i16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_i16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_i16x8_load8x8(i3);
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_i32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_i32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vo01234567 = wasm_i8x16_narrow_i16x8(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vo01234567, 0);
vo01234567 = wasm_u64x2_shr(vo01234567, 32);
output += 4;
}
if (c & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vo01234567, 0);
vo01234567 = wasm_u32x4_shr(vo01234567, 16);
output += 2;
}
if (c & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vo01234567, 0);
output += 1;
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,337
| 45.322222
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-wasmsimd-dot16x2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_s8_ibilinear_ukernel__wasmsimd_dot16x2_c8(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const v128_t valphah =
wasm_i16x8_add(
wasm_v128_xor(
wasm_v128_load16_splat(weights),
wasm_i32x4_const_splat(0xFFFF0000)),
wasm_i32x4_const_splat(0x08010000));
const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
weights += 2;
const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
const v128_t vtl01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vtr01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
const v128_t vbl01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
const v128_t vbr01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_i32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_i32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vo01234567 = wasm_i8x16_narrow_i16x8(vacc01234567, vacc01234567);
wasm_v128_store64_lane(output, vo01234567, 0);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vtl01234567 = wasm_i16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_i16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_i16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_i16x8_load8x8(i3);
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_i32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_i32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vo01234567 = wasm_i8x16_narrow_i16x8(vacc01234567, vacc01234567);
if (c & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vo01234567, 0);
vo01234567 = wasm_u64x2_shr(vo01234567, 32);
output += 4;
}
if (c & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vo01234567, 0);
vo01234567 = wasm_u32x4_shr(vo01234567, 16);
output += 2;
}
if (c & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vo01234567, 0);
output += 1;
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,235
| 39.589147
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-2p2x-minmax-neon-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.max);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.min);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
int8x16_t vout = vmaxq_s8(vi0, vi1);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
int8x16_t vout = vmaxq_s8(vi0, vi1);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 2; k > 0; k -= 2) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
if (k < 2) {
i1 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
int8x16_t vout = vmaxq_s8(vo, vmax01);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
int8x16_t vout = vmaxq_s8(vo, vmax01);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 4,081
| 29.462687
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-4p3x-minmax-neon-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.max);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.min);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
const int8x16_t vi2 = vld1q_s8(i2); i2 += 16;
const int8x16_t vi3 = vld1q_s8(i3); i3 += 16;
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
int8x16_t vout = vmaxq_s8(vmax01, vmax23);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
const int8x16_t vi2 = vld1q_s8(i2);
const int8x16_t vi3 = vld1q_s8(i3);
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
int8x16_t vout = vmaxq_s8(vmax01, vmax23);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 4; k > 0; k -= 3) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
const int8x16_t vi2 = vld1q_s8(i2); i2 += 16;
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
const int8x16_t vmax2o = vmaxq_s8(vi2, vo);
int8x16_t vout = vmaxq_s8(vmax01, vmax2o);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
const int8x16_t vi2 = vld1q_s8(i2);
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vi0, vi1);
const int8x16_t vmax2o = vmaxq_s8(vi2, vo);
int8x16_t vout = vmaxq_s8(vmax01, vmax2o);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,158
| 31.043478
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-9p8x-minmax-neon-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_s8_maxpool_minmax_ukernel_9p8x__neon_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.max);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.min);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
const int8_t* i8 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
const int8x16_t vi2 = vld1q_s8(i2); i2 += 16;
const int8x16_t vi3 = vld1q_s8(i3); i3 += 16;
const int8x16_t vi4 = vld1q_s8(i4); i4 += 16;
const int8x16_t vi5 = vld1q_s8(i5); i5 += 16;
const int8x16_t vi6 = vld1q_s8(i6); i6 += 16;
const int8x16_t vi7 = vld1q_s8(i7); i7 += 16;
const int8x16_t vi8 = vld1q_s8(i8); i8 += 16;
const int8x16_t vmax018 = vmaxq_s8(vmaxq_s8(vi0, vi1), vi8);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
const int8x16_t vmax45 = vmaxq_s8(vi4, vi5);
const int8x16_t vmax67 = vmaxq_s8(vi6, vi7);
const int8x16_t vmax2345 = vmaxq_s8(vmax23, vmax45);
const int8x16_t vmax01678 = vmaxq_s8(vmax018, vmax67);
int8x16_t vout = vmaxq_s8(vmax2345, vmax01678);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
const int8x16_t vi2 = vld1q_s8(i2);
const int8x16_t vi3 = vld1q_s8(i3);
const int8x16_t vi4 = vld1q_s8(i4);
const int8x16_t vi5 = vld1q_s8(i5);
const int8x16_t vi6 = vld1q_s8(i6);
const int8x16_t vi7 = vld1q_s8(i7);
const int8x16_t vi8 = vld1q_s8(i8);
const int8x16_t vmax018 = vmaxq_s8(vmaxq_s8(vi0, vi1), vi8);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
const int8x16_t vmax45 = vmaxq_s8(vi4, vi5);
const int8x16_t vmax67 = vmaxq_s8(vi6, vi7);
const int8x16_t vmax2345 = vmaxq_s8(vmax23, vmax45);
const int8x16_t vmax01678 = vmaxq_s8(vmax018, vmax67);
int8x16_t vout = vmaxq_s8(vmax2345, vmax01678);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const int8x16_t vi0 = vld1q_s8(i0); i0 += 16;
const int8x16_t vi1 = vld1q_s8(i1); i1 += 16;
const int8x16_t vi2 = vld1q_s8(i2); i2 += 16;
const int8x16_t vi3 = vld1q_s8(i3); i3 += 16;
const int8x16_t vi4 = vld1q_s8(i4); i4 += 16;
const int8x16_t vi5 = vld1q_s8(i5); i5 += 16;
const int8x16_t vi6 = vld1q_s8(i6); i6 += 16;
const int8x16_t vi7 = vld1q_s8(i7); i7 += 16;
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vmaxq_s8(vi0, vi1), vo);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
const int8x16_t vmax45 = vmaxq_s8(vi4, vi5);
const int8x16_t vmax67 = vmaxq_s8(vi6, vi7);
const int8x16_t vmax2345 = vmaxq_s8(vmax23, vmax45);
const int8x16_t vmax0167 = vmaxq_s8(vmax01, vmax67);
int8x16_t vout = vmaxq_s8(vmax2345, vmax0167);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
vst1q_s8(o, vout); o += 16;
}
if (c != 0) {
const int8x16_t vi0 = vld1q_s8(i0);
const int8x16_t vi1 = vld1q_s8(i1);
const int8x16_t vi2 = vld1q_s8(i2);
const int8x16_t vi3 = vld1q_s8(i3);
const int8x16_t vi4 = vld1q_s8(i4);
const int8x16_t vi5 = vld1q_s8(i5);
const int8x16_t vi6 = vld1q_s8(i6);
const int8x16_t vi7 = vld1q_s8(i7);
const int8x16_t vo = vld1q_s8(o);
const int8x16_t vmax01 = vmaxq_s8(vmaxq_s8(vi0, vi1), vo);
const int8x16_t vmax23 = vmaxq_s8(vi2, vi3);
const int8x16_t vmax45 = vmaxq_s8(vi4, vi5);
const int8x16_t vmax67 = vmaxq_s8(vi6, vi7);
const int8x16_t vmax2345 = vmaxq_s8(vmax23, vmax45);
const int8x16_t vmax0167 = vmaxq_s8(vmax01, vmax67);
int8x16_t vout = vmaxq_s8(vmax2345, vmax0167);
vout = vmaxq_s8(vout, voutput_min);
vout = vminq_s8(vout, voutput_max);
int8x8_t vout_lo = vget_low_s8(vout);
if (c & 8) {
vst1_s8(o, vout_lo); o += 8;
vout_lo = vget_high_s8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_s8(vout_lo), 0); o += 4;
vout_lo = vext_s8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_s8(vout_lo), 0); o += 2;
vout_lo = vext_s8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_s8(o, vout_lo, 0); o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,603
| 33.278884
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-9p8x-minmax-scalar-c1.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/maxpool.h>
void xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const int32_t voutput_max = params->scalar.max;
const int32_t voutput_min = params->scalar.min;
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
const int8_t* i8 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
do {
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vmax01 = math_max_s32(vi0, vi1);
const int32_t vmax23 = math_max_s32(vi2, vi3);
const int32_t vmax45 = math_max_s32(vi4, vi5);
const int32_t vmax67 = math_max_s32(vi6, vi7);
const int32_t vmax018 = math_max_s32(vmax01, vi8);
const int32_t vmax2345 = math_max_s32(vmax23, vmax45);
const int32_t vmax01678 = math_max_s32(vmax018, vmax67);
int32_t vout = math_max_s32(vmax2345, vmax01678);
vout = math_min_s32(vout, voutput_max);
vout = math_max_s32(vout, voutput_min);
*o++ = (int8_t) vout;
} while (--c != 0);
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
do {
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vi8 = (int32_t) *o;
const int32_t vmax01 = math_max_s32(vi0, vi1);
const int32_t vmax23 = math_max_s32(vi2, vi3);
const int32_t vmax45 = math_max_s32(vi4, vi5);
const int32_t vmax67 = math_max_s32(vi6, vi7);
const int32_t vmax018 = math_max_s32(vmax01, vi8);
const int32_t vmax2345 = math_max_s32(vmax23, vmax45);
const int32_t vmax01678 = math_max_s32(vmax018, vmax67);
int32_t vout = math_max_s32(vmax2345, vmax01678);
vout = math_min_s32(vout, voutput_max);
vout = math_max_s32(vout, voutput_min);
*o++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,588
| 30.755682
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-9p8x-minmax-sse2-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/maxpool.h>
#include <xnnpack/unaligned.h>
void xnn_s8_maxpool_minmax_ukernel_9p8x__sse2_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i voutput_max_with_bias = _mm_load_si128((const __m128i*) params->sse2.max_with_bias);
const __m128i voutput_min_with_bias = _mm_load_si128((const __m128i*) params->sse2.min_with_bias);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
const int8_t* i8 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i0), vbias);
i0 += 16;
const __m128i vi1 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i1), vbias);
i1 += 16;
const __m128i vi2 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i2), vbias);
i2 += 16;
const __m128i vi3 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i3), vbias);
i3 += 16;
const __m128i vi4 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i4), vbias);
i4 += 16;
const __m128i vi5 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i5), vbias);
i5 += 16;
const __m128i vi6 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i6), vbias);
i6 += 16;
const __m128i vi7 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i7), vbias);
i7 += 16;
const __m128i vi8 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i8), vbias);
i8 += 16;
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax01678);
vout = _mm_max_epu8(vout, voutput_min_with_bias);
vout = _mm_min_epu8(vout, voutput_max_with_bias);
vout = _mm_xor_si128(vout, vbias);
_mm_storeu_si128((__m128i*) o, vout); o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i0), vbias);
const __m128i vi1 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i1), vbias);
const __m128i vi2 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i2), vbias);
const __m128i vi3 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i3), vbias);
const __m128i vi4 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i4), vbias);
const __m128i vi5 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i5), vbias);
const __m128i vi6 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i6), vbias);
const __m128i vi7 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i7), vbias);
const __m128i vi8 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i8), vbias);
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax01678);
vout = _mm_max_epu8(vout, voutput_min_with_bias);
vout = _mm_min_epu8(vout, voutput_max_with_bias);
vout = _mm_xor_si128(vout, vbias);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*((int8_t*) o) = (int8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i0), vbias);
i0 += 16;
const __m128i vi1 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i1), vbias);
i1 += 16;
const __m128i vi2 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i2), vbias);
i2 += 16;
const __m128i vi3 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i3), vbias);
i3 += 16;
const __m128i vi4 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i4), vbias);
i4 += 16;
const __m128i vi5 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i5), vbias);
i5 += 16;
const __m128i vi6 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i6), vbias);
i6 += 16;
const __m128i vi7 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i7), vbias);
i7 += 16;
const __m128i vo = _mm_xor_si128(_mm_loadu_si128((const __m128i*) o), vbias);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax0167);
vout = _mm_max_epu8(vout, voutput_min_with_bias);
vout = _mm_min_epu8(vout, voutput_max_with_bias);
vout = _mm_xor_si128(vout, vbias);
_mm_storeu_si128((__m128i*) o, vout);
o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i0), vbias);
const __m128i vi1 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i1), vbias);
const __m128i vi2 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i2), vbias);
const __m128i vi3 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i3), vbias);
const __m128i vi4 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i4), vbias);
const __m128i vi5 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i5), vbias);
const __m128i vi6 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i6), vbias);
const __m128i vi7 = _mm_xor_si128(_mm_loadu_si128((const __m128i*) i7), vbias);
const __m128i vo = _mm_xor_si128(_mm_loadu_si128((const __m128i*) o), vbias);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax0167);
vout = _mm_max_epu8(vout, voutput_min_with_bias);
vout = _mm_min_epu8(vout, voutput_max_with_bias);
vout = _mm_xor_si128(vout, vbias);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*o = (int8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 10,960
| 37.868794
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-9p8x-minmax-sse41-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/maxpool.h>
#include <xnnpack/unaligned.h>
void xnn_s8_maxpool_minmax_ukernel_9p8x__sse41_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.max);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.min);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
const int8_t* i8 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0); i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1); i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2); i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3); i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4); i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5); i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6); i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7); i7 += 16;
const __m128i vi8 = _mm_loadu_si128((const __m128i*) i8); i8 += 16;
const __m128i vmax018 = _mm_max_epi8(_mm_max_epi8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epi8(vi2, vi3);
const __m128i vmax45 = _mm_max_epi8(vi4, vi5);
const __m128i vmax67 = _mm_max_epi8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epi8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epi8(vmax018, vmax67);
__m128i vout = _mm_max_epi8(vmax2345, vmax01678);
vout = _mm_max_epi8(vout, voutput_min);
vout = _mm_min_epi8(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, vout); o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7);
const __m128i vi8 = _mm_loadu_si128((const __m128i*) i8);
const __m128i vmax018 = _mm_max_epi8(_mm_max_epi8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epi8(vi2, vi3);
const __m128i vmax45 = _mm_max_epi8(vi4, vi5);
const __m128i vmax67 = _mm_max_epi8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epi8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epi8(vmax018, vmax67);
__m128i vout = _mm_max_epi8(vmax2345, vmax01678);
vout = _mm_max_epi8(vout, voutput_min);
vout = _mm_min_epi8(vout, voutput_max);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*o = (int8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0); i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1); i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2); i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3); i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4); i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5); i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6); i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7); i7 += 16;
const __m128i vo = _mm_loadu_si128((const __m128i*) o);
const __m128i vmax01 = _mm_max_epi8(_mm_max_epi8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epi8(vi2, vi3);
const __m128i vmax45 = _mm_max_epi8(vi4, vi5);
const __m128i vmax67 = _mm_max_epi8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epi8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epi8(vmax01, vmax67);
__m128i vout = _mm_max_epi8(vmax2345, vmax0167);
vout = _mm_max_epi8(vout, voutput_min);
vout = _mm_min_epi8(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, vout);
o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7);
const __m128i vo = _mm_loadu_si128((const __m128i*) o);
const __m128i vmax01 = _mm_max_epi8(_mm_max_epi8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epi8(vi2, vi3);
const __m128i vmax45 = _mm_max_epi8(vi4, vi5);
const __m128i vmax67 = _mm_max_epi8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epi8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epi8(vmax01, vmax67);
__m128i vout = _mm_max_epi8(vmax2345, vmax0167);
vout = _mm_max_epi8(vout, voutput_min);
vout = _mm_min_epi8(vout, voutput_max);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*o = (int8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 9,653
| 36.130769
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-maxpool/s8-maxpool-9p8x-minmax-wasmsimd-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/maxpool.h>
void xnn_s8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
do {
int8_t* o = output;
{
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
const int8_t* i8 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 16;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 16;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 16;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 16;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 16;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 16;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 16;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 16;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 16;
const v128_t vmax018 = wasm_i8x16_max(wasm_i8x16_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_i8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_i8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_i8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_i8x16_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_i8x16_max(vmax018, vmax67);
v128_t vout = wasm_i8x16_max(vmax2345, vmax01678);
vout = wasm_i8x16_min(vout, voutput_max);
vout = wasm_i8x16_max(vout, voutput_min);
wasm_v128_store(o, vout); o += 16;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vmax018 = wasm_i8x16_max(wasm_i8x16_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_i8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_i8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_i8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_i8x16_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_i8x16_max(vmax018, vmax67);
v128_t vout = wasm_i8x16_max(vmax2345, vmax01678);
vout = wasm_i8x16_min(vout, voutput_max);
vout = wasm_i8x16_max(vout, voutput_min);
if (c & 8) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 8;
}
if (c & 4) {
wasm_v128_store32_lane(o, vout, 0);
vout = wasm_u64x2_shr(vout, 32);
o += 4;
}
if (c & 2) {
wasm_v128_store16_lane(o, vout, 0);
vout = wasm_u32x4_shr(vout, 16);
o += 2;
}
if (c & 1) {
wasm_v128_store8_lane(o, vout, 0);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const int8_t* i0 = *input++;
const int8_t* i1 = *input++;
const int8_t* i2 = *input++;
const int8_t* i3 = *input++;
const int8_t* i4 = *input++;
const int8_t* i5 = *input++;
const int8_t* i6 = *input++;
const int8_t* i7 = *input++;
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 16;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 16;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 16;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 16;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 16;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 16;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 16;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 16;
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_i8x16_max(wasm_i8x16_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_i8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_i8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_i8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_i8x16_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_i8x16_max(vmax01, vmax67);
v128_t vout = wasm_i8x16_max(vmax2345, vmax0167);
vout = wasm_i8x16_min(vout, voutput_max);
vout = wasm_i8x16_max(vout, voutput_min);
wasm_v128_store(o, vout);
o += 16;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_i8x16_max(wasm_i8x16_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_i8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_i8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_i8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_i8x16_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_i8x16_max(vmax01, vmax67);
v128_t vout = wasm_i8x16_max(vmax2345, vmax0167);
vout = wasm_i8x16_min(vout, voutput_max);
vout = wasm_i8x16_max(vout, voutput_min);
if (c & 8) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 8;
}
if (c & 4) {
wasm_v128_store32_lane(o, vout, 0);
vout = wasm_u64x2_shr(vout, 32);
o += 4;
}
if (c & 2) {
wasm_v128_store16_lane(o, vout, 0);
vout = wasm_u32x4_shr(vout, 16);
o += 2;
}
if (c & 1) {
wasm_v128_store8_lane(o, vout, 0);
o += 1;
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_increment);
output = (int8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,994
| 31.59058
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-vclamp/s8-vclamp-neon-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
void xnn_s8_vclamp_ukernel__neon_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.max);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.min);
for (; batch >= 64; batch -= 64) {
int8x16_t vacc0 = vld1q_s8(input); input += 16;
int8x16_t vacc1 = vld1q_s8(input); input += 16;
int8x16_t vacc2 = vld1q_s8(input); input += 16;
int8x16_t vacc3 = vld1q_s8(input); input += 16;
vacc0 = vmaxq_s8(vacc0, voutput_min);
vacc1 = vmaxq_s8(vacc1, voutput_min);
vacc2 = vmaxq_s8(vacc2, voutput_min);
vacc3 = vmaxq_s8(vacc3, voutput_min);
vacc0 = vminq_s8(vacc0, voutput_max);
vacc1 = vminq_s8(vacc1, voutput_max);
vacc2 = vminq_s8(vacc2, voutput_max);
vacc3 = vminq_s8(vacc3, voutput_max);
vst1q_s8(output, vacc0); output += 16;
vst1q_s8(output, vacc1); output += 16;
vst1q_s8(output, vacc2); output += 16;
vst1q_s8(output, vacc3); output += 16;
}
for (; batch >= 8; batch -= 8) {
int8x8_t vacc = vld1_s8(input); input += 8;
vacc = vmin_s8(vacc, vget_low_s8(voutput_max));
vacc = vmax_s8(vacc, vget_low_s8(voutput_min));
vst1_s8(output, vacc); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
int8x8_t vacc = vld1_s8(input); input += 8;
vacc = vmin_s8(vacc, vget_low_s8(voutput_max));
vacc = vmax_s8(vacc, vget_low_s8(voutput_min));
if (batch & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vacc), 0); output += 4;
vacc = vext_s8(vacc, vacc, 4);
}
if (batch & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vacc), 0); output += 2;
vacc = vext_s8(vacc, vacc, 2);
}
if (batch & 1) {
vst1_lane_s8(output, vacc, 0);
}
}
}
| 2,212
| 28.506667
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-vclamp/s8-vclamp-scalar-x4.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_s8_vclamp_ukernel__scalar_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t voutput_max = params->scalar.max;
const int32_t voutput_min = params->scalar.min;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vt0 = (int32_t) input[0];
int32_t vt1 = (int32_t) input[1];
int32_t vt2 = (int32_t) input[2];
int32_t vt3 = (int32_t) input[3];
input += 4;
vt0 = math_max_s32(vt0, voutput_min);
vt1 = math_max_s32(vt1, voutput_min);
vt2 = math_max_s32(vt2, voutput_min);
vt3 = math_max_s32(vt3, voutput_min);
vt0 = math_min_s32(vt0, voutput_max);
vt1 = math_min_s32(vt1, voutput_max);
vt2 = math_min_s32(vt2, voutput_max);
vt3 = math_min_s32(vt3, voutput_max);
output[0] = (int8_t) vt0;
output[1] = (int8_t) vt1;
output[2] = (int8_t) vt2;
output[3] = (int8_t) vt3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vt = (int32_t) *input++;
vt = math_max_s32(vt, voutput_min);
vt = math_min_s32(vt, voutput_max);
*output++ = (int8_t) vt;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 1,612
| 25.442623
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-vclamp/s8-vclamp-sse2-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vunary.h>
void xnn_s8_vclamp_ukernel__sse2_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i voutput_max_with_bias = _mm_load_si128((const __m128i*) params->sse2.max_with_bias);
const __m128i voutput_min_with_bias = _mm_load_si128((const __m128i*) params->sse2.min_with_bias);
for (; batch >= 64; batch -= 64) {
__m128i vacc0 = _mm_loadu_si128((const __m128i*) input);
__m128i vacc1 = _mm_loadu_si128((const __m128i*) input + 1);
__m128i vacc2 = _mm_loadu_si128((const __m128i*) input + 2);
__m128i vacc3 = _mm_loadu_si128((const __m128i*) input + 3);
input += 64;
vacc0 = _mm_xor_si128(vacc0, vbias);
vacc1 = _mm_xor_si128(vacc1, vbias);
vacc2 = _mm_xor_si128(vacc2, vbias);
vacc3 = _mm_xor_si128(vacc3, vbias);
vacc0 = _mm_max_epu8(vacc0, voutput_min_with_bias);
vacc1 = _mm_max_epu8(vacc1, voutput_min_with_bias);
vacc2 = _mm_max_epu8(vacc2, voutput_min_with_bias);
vacc3 = _mm_max_epu8(vacc3, voutput_min_with_bias);
vacc0 = _mm_min_epu8(vacc0, voutput_max_with_bias);
vacc1 = _mm_min_epu8(vacc1, voutput_max_with_bias);
vacc2 = _mm_min_epu8(vacc2, voutput_max_with_bias);
vacc3 = _mm_min_epu8(vacc3, voutput_max_with_bias);
vacc0 = _mm_xor_si128(vacc0, vbias);
vacc1 = _mm_xor_si128(vacc1, vbias);
vacc2 = _mm_xor_si128(vacc2, vbias);
vacc3 = _mm_xor_si128(vacc3, vbias);
_mm_storeu_si128((__m128i*) output, vacc0);
_mm_storeu_si128((__m128i*) output + 1, vacc1);
_mm_storeu_si128((__m128i*) output + 2, vacc2);
_mm_storeu_si128((__m128i*) output + 3, vacc3);
output += 64;
}
for (; batch >= 16; batch -= 16) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
input += 16;
vacc = _mm_xor_si128(vacc, vbias);
vacc = _mm_min_epu8(vacc, voutput_max_with_bias);
vacc = _mm_max_epu8(vacc, voutput_min_with_bias);
vacc = _mm_xor_si128(vacc, vbias);
_mm_storeu_si128((__m128i*) output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
vacc = _mm_xor_si128(vacc, vbias);
vacc = _mm_min_epu8(vacc, voutput_max_with_bias);
vacc = _mm_max_epu8(vacc, voutput_min_with_bias);
vacc = _mm_xor_si128(vacc, vbias);
if (batch & 8) {
_mm_storel_epi64((__m128i*) output, vacc);
output += 8;
vacc = _mm_unpackhi_epi64(vacc, vacc);
}
if (batch & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vacc));
output += 4;
vacc = _mm_srli_epi64(vacc, 32);
}
if (batch & 2) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vacc));
output += 2;
vacc = _mm_srli_epi32(vacc, 16);
}
if (batch & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vacc);
}
}
}
| 3,357
| 32.247525
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-vclamp/s8-vclamp-sse41-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vunary.h>
void xnn_s8_vclamp_ukernel__sse41_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.max);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.min);
for (; batch >= 64; batch -= 64) {
__m128i vacc0 = _mm_loadu_si128((const __m128i*) input);
__m128i vacc1 = _mm_loadu_si128((const __m128i*) input + 1);
__m128i vacc2 = _mm_loadu_si128((const __m128i*) input + 2);
__m128i vacc3 = _mm_loadu_si128((const __m128i*) input + 3);
input += 64;
vacc0 = _mm_max_epi8(vacc0, voutput_min);
vacc1 = _mm_max_epi8(vacc1, voutput_min);
vacc2 = _mm_max_epi8(vacc2, voutput_min);
vacc3 = _mm_max_epi8(vacc3, voutput_min);
vacc0 = _mm_min_epi8(vacc0, voutput_max);
vacc1 = _mm_min_epi8(vacc1, voutput_max);
vacc2 = _mm_min_epi8(vacc2, voutput_max);
vacc3 = _mm_min_epi8(vacc3, voutput_max);
_mm_storeu_si128((__m128i*) output, vacc0);
_mm_storeu_si128((__m128i*) output + 1, vacc1);
_mm_storeu_si128((__m128i*) output + 2, vacc2);
_mm_storeu_si128((__m128i*) output + 3, vacc3);
output += 64;
}
for (; batch >= 16; batch -= 16) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
input += 16;
vacc = _mm_min_epi8(vacc, voutput_max);
vacc = _mm_max_epi8(vacc, voutput_min);
_mm_storeu_si128((__m128i*) output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
vacc = _mm_min_epi8(vacc, voutput_max);
vacc = _mm_max_epi8(vacc, voutput_min);
if (batch & 8) {
_mm_storel_epi64((__m128i*) output, vacc);
output += 8;
vacc = _mm_unpackhi_epi64(vacc, vacc);
}
if (batch & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vacc));
output += 4;
vacc = _mm_srli_epi64(vacc, 32);
}
if (batch & 2) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vacc));
output += 2;
vacc = _mm_srli_epi32(vacc, 16);
}
if (batch & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vacc);
}
}
}
| 2,636
| 29.662791
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-vclamp/s8-vclamp-wasmsimd-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
void xnn_s8_vclamp_ukernel__wasmsimd_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
for (; batch >= 64; batch -= 64) {
v128_t vacc0 = wasm_v128_load(input);
v128_t vacc1 = wasm_v128_load(input + 16);
v128_t vacc2 = wasm_v128_load(input + 32);
v128_t vacc3 = wasm_v128_load(input + 48);
input += 64;
vacc0 = wasm_i8x16_max(vacc0, voutput_min);
vacc1 = wasm_i8x16_max(vacc1, voutput_min);
vacc2 = wasm_i8x16_max(vacc2, voutput_min);
vacc3 = wasm_i8x16_max(vacc3, voutput_min);
vacc0 = wasm_i8x16_min(vacc0, voutput_max);
vacc1 = wasm_i8x16_min(vacc1, voutput_max);
vacc2 = wasm_i8x16_min(vacc2, voutput_max);
vacc3 = wasm_i8x16_min(vacc3, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 16, vacc1);
wasm_v128_store(output + 32, vacc2);
wasm_v128_store(output + 48, vacc3);
output += 64;
}
for (; batch >= 16; batch -= 16) {
v128_t vacc = wasm_v128_load(input);
input += 16;
vacc = wasm_i8x16_min(vacc, voutput_max);
vacc = wasm_i8x16_max(vacc, voutput_min);
wasm_v128_store(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_i8x16_min(vacc, voutput_max);
vacc = wasm_i8x16_max(vacc, voutput_min);
if (batch & 8) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 8;
}
if (batch & 4) {
wasm_v128_store32_lane(output, vacc, 0);
vacc = wasm_u64x2_shr(vacc, 32);
output += 4;
}
if (batch & 2) {
wasm_v128_store16_lane(output, vacc, 0);
vacc = wasm_u32x4_shr(vacc, 16);
output += 2;
}
if (batch & 1) {
wasm_v128_store8_lane(output, vacc, 0);
}
}
}
| 2,407
| 27.329412
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/abs.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_abs_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_abs_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_abs_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_abs_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_abs_nc_f32:
return xnn_reshape_abs_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_abs_nc_f16:
return xnn_reshape_abs_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_abs_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_abs_nc_f32:
return xnn_setup_abs_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_abs_nc_f16:
return xnn_setup_abs_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_abs(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_abs)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_abs, input_id, subgraph->num_values)) != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_abs, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_abs), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_abs, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_abs, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_abs, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_abs), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_abs;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_abs_operator;
node->reshape = reshape_abs_operator;
node->setup = setup_abs_operator;
return xnn_status_success;
}
| 5,931
| 29.111675
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/add2.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_add_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_add_nd_f16(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_add_nd_f32(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_add_nd_qs8(
(int8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(int8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(int8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_add_nd_qu8(
(uint8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(uint8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(uint8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_add_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_add_nd_f32:
return xnn_reshape_add_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_add_nd_f16:
return xnn_reshape_add_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_add_nd_qs8:
return xnn_reshape_add_nd_qs8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_add_nd_qu8:
return xnn_reshape_add_nd_qu8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_add_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_add_nd_f32:
return xnn_setup_add_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_add_nd_f16:
return xnn_setup_add_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_add_nd_qs8:
return xnn_setup_add_nd_qs8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_add_nd_qu8:
return xnn_setup_add_nd_qu8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_add2(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_add2)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_add2, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(xnn_node_type_add2, input1_id, subgraph->num_values, 1)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_add2, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_add2), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(xnn_node_type_add2, input2_id, subgraph->num_values, 2)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_add2, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_add2), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_add2, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_add2, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_add2), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches_two_inputs(
xnn_node_type_add2, input1_id, input1_value, input2_id, input2_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_add2;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_add_operator;
node->reshape = reshape_add_operator;
node->setup = setup_add_operator;
return xnn_status_success;
}
| 11,965
| 34.090909
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/argmax-pooling-2d.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_argmax_pooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->compute_type == xnn_compute_type_fp32);
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 2);
const size_t channel_dim = values[input_id].shape.dim[3];
assert(channel_dim == values[node->outputs[0]].shape.dim[3]);
assert(channel_dim == values[node->outputs[1]].shape.dim[3]);
const enum xnn_status status = xnn_create_argmax_pooling2d_nhwc_f32(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_argmax_pooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
return xnn_reshape_argmax_pooling2d_nhwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
threadpool);
}
static enum xnn_status setup_argmax_pooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_value_id = opdata->outputs[0];
assert(output_value_id != XNN_INVALID_VALUE_ID);
assert(output_value_id < num_values);
const uint32_t output_index_id = opdata->outputs[1];
assert(output_index_id != XNN_INVALID_VALUE_ID);
assert(output_index_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value_value = values + output_value_id;
void* output_value_data = output_value_value->data;
assert(output_value_data != NULL);
const struct xnn_value* output_index_value = values + output_index_id;
void* output_index_data = output_index_value->data;
assert(output_index_data != NULL);
return xnn_setup_argmax_pooling2d_nhwc_f32(
opdata->operator_objects[0],
input_data,
output_value_data,
output_index_data);
}
enum xnn_status xnn_define_argmax_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t input_id,
uint32_t output_value_id,
uint32_t output_index_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_argmax_pooling_2d)) != xnn_status_success) {
return status;
}
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
"pooling size dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), pooling_width, pooling_height);
return xnn_status_invalid_parameter;
}
if (pooling_size == 1) {
xnn_log_error(
"failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d));
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_argmax_pooling_2d, input_id, subgraph->num_values))
!= xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_argmax_pooling_2d, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
if (output_value_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with output value ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_value_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* output_value_value = &subgraph->values[output_value_id];
if (output_value_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with output value ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_value_id, output_value_value->type);
return xnn_status_invalid_parameter;
}
switch (output_value_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output value ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_value_id,
xnn_datatype_to_string(output_value_value->datatype), output_value_value->datatype);
return xnn_status_invalid_parameter;
}
if (output_index_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with output index ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_index_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* output_index_value = &subgraph->values[output_index_id];
if (output_index_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with output index ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_index_id, output_index_value->type);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_argmax_pooling_2d;
node->compute_type = xnn_compute_type_fp32;
node->params.pooling_2d.padding_top = input_padding_top;
node->params.pooling_2d.padding_right = input_padding_right;
node->params.pooling_2d.padding_bottom = input_padding_bottom;
node->params.pooling_2d.padding_left = input_padding_left;
node->params.pooling_2d.pooling_height = pooling_height;
node->params.pooling_2d.pooling_width = pooling_width;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 2;
node->outputs[0] = output_value_id;
node->outputs[1] = output_index_id;
node->flags = flags;
node->create = create_argmax_pooling_operator;
node->reshape = reshape_argmax_pooling_operator;
node->setup = setup_argmax_pooling_operator;
return xnn_status_success;
}
| 8,203
| 34.362069
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/average-pooling-2d.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_average_pooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t channel_dim = values[input_id].shape.dim[3];
assert(channel_dim == values[node->outputs[0]].shape.dim[3]);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_average_pooling2d_nhwc_f16(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_average_pooling2d_nhwc_f32(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_average_pooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_average_pooling_nhwc_f16:
return xnn_reshape_average_pooling2d_nhwc_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
threadpool);
case xnn_operator_type_average_pooling_nhwc_f32:
return xnn_reshape_average_pooling2d_nhwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_average_pooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_average_pooling_nhwc_f16:
return xnn_setup_average_pooling2d_nhwc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_average_pooling_nhwc_f32:
return xnn_setup_average_pooling2d_nhwc_f32(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_average_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_average_pooling_2d)) != xnn_status_success) {
return status;
}
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
"pooling size dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d), pooling_width, pooling_height);
return xnn_status_invalid_parameter;
}
if (pooling_size == 1) {
xnn_log_error(
"failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
return xnn_status_invalid_parameter;
}
if (stride_height == 0 || stride_width == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " stride: "
"stride dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d), stride_width, stride_height);
return xnn_status_invalid_parameter;
}
if (stride_height > pooling_height) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 " stride height: must be less than pooling height %" PRIu32,
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_height, pooling_height);
return xnn_status_invalid_parameter;
}
if (stride_width > pooling_width) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 " stride width: must be less than pooling width %" PRIu32,
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_width, pooling_width);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_average_pooling_2d, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
if (any_padding) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
"TensorFlow SAME padding can't be combined with explicit padding specification",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d),
input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
return xnn_status_invalid_parameter;
}
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_average_pooling_2d, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_average_pooling_2d, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_average_pooling_2d, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_average_pooling_2d, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_average_pooling_2d), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_average_pooling_2d;
node->compute_type = xnn_compute_type_fp32;
node->params.pooling_2d.padding_top = input_padding_top;
node->params.pooling_2d.padding_right = input_padding_right;
node->params.pooling_2d.padding_bottom = input_padding_bottom;
node->params.pooling_2d.padding_left = input_padding_left;
node->params.pooling_2d.pooling_height = pooling_height;
node->params.pooling_2d.pooling_width = pooling_width;
node->params.pooling_2d.stride_height = stride_height;
node->params.pooling_2d.stride_width = stride_width;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_average_pooling_operator;
node->reshape = reshape_average_pooling_operator;
node->setup = setup_average_pooling_operator;
return xnn_status_success;
}
| 10,495
| 34.221477
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/bankers-rounding.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_bankers_rounding_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_bankers_rounding_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_bankers_rounding_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_bankers_rounding_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_bankers_rounding_nc_f32:
return xnn_reshape_bankers_rounding_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_bankers_rounding_nc_f16:
return xnn_reshape_bankers_rounding_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_bankers_rounding_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_bankers_rounding_nc_f32:
return xnn_setup_bankers_rounding_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_bankers_rounding_nc_f16:
return xnn_setup_bankers_rounding_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_bankers_rounding(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_bankers_rounding)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_bankers_rounding, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_bankers_rounding, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_bankers_rounding), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_bankers_rounding, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_bankers_rounding, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_bankers_rounding, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_bankers_rounding), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_bankers_rounding;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_bankers_rounding_operator;
node->reshape = reshape_bankers_rounding_operator;
node->setup = setup_bankers_rounding_operator;
return xnn_status_success;
}
| 6,276
| 30.542714
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/batch-matrix-multiply.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_batch_matrix_multiply_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
// input1: b x m x k
// input2: b x n x k
const struct xnn_value* input1 = values + input1_id;
const struct xnn_value* input2 = values + input2_id;
const size_t m = input1->shape.dim[input1->shape.num_dims - 2];
const size_t k = input1->shape.dim[input1->shape.num_dims - 1];
const size_t n = input2->shape.dim[input2->shape.num_dims - 2];
const size_t batch_size = xnn_shape_multiply_batch_dims(&input1->shape, 2);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_batch_matrix_multiply_nc_f32(node->flags, &opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = batch_size;
opdata->input_height = m;
opdata->input_width = k;
opdata->output_channels = n;
}
return status;
}
static enum xnn_status reshape_batch_matrix_multiply_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_batch_matrix_multiply_nc_f32:
return xnn_reshape_batch_matrix_multiply_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height, opdata->input_width, opdata->output_channels,
&opdata->workspace_size, &opdata->workspace_alignment,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_batch_matrix_multiply_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_batch_matrix_multiply_nc_f32:
return xnn_setup_batch_matrix_multiply_nc_f32(
opdata->operator_objects[0],
opdata->workspace, input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
static inline enum xnn_compute_type validate_datatypes(
enum xnn_datatype input1_datatype,
enum xnn_datatype input2_datatype,
enum xnn_datatype output_datatype)
{
switch (input2_datatype) {
case xnn_datatype_fp32:
if (input1_datatype == xnn_datatype_fp32 && output_datatype == xnn_datatype_fp32) {
return xnn_compute_type_fp32;
}
break;
default:
XNN_UNREACHABLE;
}
return xnn_compute_type_invalid;
}
enum xnn_status xnn_define_batch_matrix_multiply(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_batch_matrix_multiply);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_batch_matrix_multiply, input1_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_batch_matrix_multiply, input1_id, input1_value);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if (input1_value->shape.num_dims < 3) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32
": unsupported number of dimension %zu, must be at least 3",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, input1_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_batch_matrix_multiply, input2_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_batch_matrix_multiply, input2_id, input1_value);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input2 ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
if (input2_value->shape.num_dims < 3) {
xnn_log_error(
"failed to define %s operator with input2 ID #%" PRIu32
": unsupported number of dimension %zu, must be at least 3",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input2_id, input2_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
if (input1_value->shape.num_dims != input2_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 " and input2 ID #%" PRIu32
": mismatch number of dimension %zu != %zu",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, input2_id, input1_value->shape.num_dims,
input2_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_batch_matrix_multiply, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_batch_matrix_multiply, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
if (output_value->shape.num_dims < 3) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32
": unsupported number of dimension %zu, must be at least 3",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), output_id, output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
if (input1_value->shape.num_dims != output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 " and output ID #%" PRIu32
": mismatch number of dimension %zu != %zu",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, output_id, input1_value->shape.num_dims,
output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
// Check that all batch dimensions match.
for (size_t i = 0; i < input1_value->shape.num_dims - 2; i++) {
if (input1_value->shape.dim[i] != input2_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 " and input2 ID #%" PRIu32
": mismatch at dimension %zu (%zu != %zu)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, input2_id, i,
input1_value->shape.dim[i], input2_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
if (input1_value->shape.dim[i] != output_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 " and output ID #%" PRIu32
": mismatch at dimension %zu (%zu != %zu)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, output_id, i,
input1_value->shape.dim[i], output_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
// Check that last dimension matches.
size_t last_dimension = input1_value->shape.num_dims - 1;
if (input1_value->shape.dim[last_dimension] != input2_value->shape.dim[last_dimension]) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 " and input2 ID #%" PRIu32
": mismatch at last dimension (%zu != %zu)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, input2_id,
input1_value->shape.dim[last_dimension], input2_value->shape.dim[last_dimension]);
return xnn_status_invalid_parameter;
}
// Check that output is [M x N].
if (output_value->shape.dim[last_dimension - 1] != input1_value->shape.dim[last_dimension - 1]) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 " and input1 ID #%" PRIu32
": mismatch at second last dimension of output (%zu != %zu)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), output_id, input1_id,
output_value->shape.dim[last_dimension - 1], input1_value->shape.dim[last_dimension - 1]);
return xnn_status_invalid_parameter;
}
if (output_value->shape.dim[last_dimension] != input2_value->shape.dim[last_dimension - 1]) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 " and input2 ID #%" PRIu32
": mismatch at last dimension of output (%zu != %zu)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), output_id, input2_id,
output_value->shape.dim[last_dimension], input2_value->shape.dim[last_dimension - 1]);
return xnn_status_invalid_parameter;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
compute_type = validate_datatypes(input1_value->datatype, input2_value->datatype, output_value->datatype);
if (compute_type == xnn_compute_type_invalid) {
xnn_log_error(
"failed to define %s operator with input1 ID #%" PRIu32 ", input2 ID #%" PRIu32 ", and output ID #%" PRIu32
": mismatching datatypes across input1 (%s), input2 (%s), and output (%s)",
xnn_node_type_to_string(xnn_node_type_batch_matrix_multiply), input1_id, input2_id, output_id,
xnn_datatype_to_string(input1_value->datatype),
xnn_datatype_to_string(input2_value->datatype),
xnn_datatype_to_string(output_value->datatype));
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_batch_matrix_multiply;
node->compute_type = compute_type;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_batch_matrix_multiply_operator;
node->setup = setup_batch_matrix_multiply_operator;
node->reshape = reshape_batch_matrix_multiply_operator;
return xnn_status_success;
}
| 12,961
| 36.900585
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/ceiling.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_ceiling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_ceiling_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_ceiling_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_ceiling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_ceiling_nc_f32:
return xnn_reshape_ceiling_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_ceiling_nc_f16:
return xnn_reshape_ceiling_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_ceiling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_ceiling_nc_f32:
return xnn_setup_ceiling_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_ceiling_nc_f16:
return xnn_setup_ceiling_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_ceiling(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_ceiling)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_ceiling, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_ceiling, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_ceiling), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_ceiling, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_ceiling, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_ceiling, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_ceiling), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_ceiling;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_ceiling_operator;
node->reshape = reshape_ceiling_operator;
node->setup = setup_ceiling_operator;
return xnn_status_success;
}
| 6,042
| 29.366834
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/clamp.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_clamp_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_clamp_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_clamp_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_clamp_nc_s8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_clamp_nc_u8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_clamp_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_clamp_nc_f16:
return xnn_reshape_clamp_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_clamp_nc_f32:
return xnn_reshape_clamp_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_clamp_nc_s8:
return xnn_reshape_clamp_nc_s8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_clamp_nc_u8:
return xnn_reshape_clamp_nc_u8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_clamp_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_clamp_nc_f16:
return xnn_setup_clamp_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_clamp_nc_f32:
return xnn_setup_clamp_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_clamp_nc_s8:
return xnn_setup_clamp_nc_s8(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_clamp_nc_u8:
return xnn_setup_clamp_nc_u8(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_clamp(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_clamp)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_clamp, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_clamp, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_clamp), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_clamp, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_clamp, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_clamp, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_clamp), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
assert(compute_type != xnn_compute_type_invalid);
status = xnn_subgraph_check_datatype_matches(xnn_node_type_clamp, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_clamp, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_clamp;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_clamp_operator;
node->reshape = reshape_clamp_operator;
node->setup = setup_clamp_operator;
return xnn_status_success;
}
| 9,203
| 31.522968
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/concatenate.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_concatenate_operator_helper(
const struct xnn_node *node,
size_t channels,
size_t input_stride,
size_t output_stride,
struct xnn_operator_data *opdata,
size_t index)
{
switch (node->compute_type) {
case xnn_compute_type_fp16:
return xnn_create_copy_nc_x16(channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
case xnn_compute_type_fp32:
return xnn_create_copy_nc_x32(channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
return xnn_create_copy_nc_x8(channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status create_concatenate2_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t axis = node->params.concatenate.axis;
size_t batch_size = 1, channels_1 = 1, channels_2 = 1;
for (size_t i = 0; i < axis; i++) {
batch_size *= values[output_id].shape.dim[i];
}
for (size_t i = axis; i < values[input1_id].shape.num_dims; i++) {
channels_1 *= values[input1_id].shape.dim[i];
channels_2 *= values[input2_id].shape.dim[i];
}
const size_t output_stride = channels_1 + channels_2;
enum xnn_status status;
status = create_concatenate_operator_helper(node, channels_1, channels_1, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_2, channels_2, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status create_concatenate3_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 3);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t input3_id = node->inputs[2];
assert(input3_id != XNN_INVALID_VALUE_ID);
assert(input3_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t axis = node->params.concatenate.axis;
size_t batch_size = 1, channels_1 = 1, channels_2 = 1, channels_3 = 1;
for (size_t i = 0; i < axis; i++) {
batch_size *= values[output_id].shape.dim[i];
}
for (size_t i = axis; i < values[input1_id].shape.num_dims; i++) {
channels_1 *= values[input1_id].shape.dim[i];
channels_2 *= values[input2_id].shape.dim[i];
channels_3 *= values[input3_id].shape.dim[i];
}
const size_t output_stride = channels_1 + channels_2 + channels_3;
enum xnn_status status;
status = create_concatenate_operator_helper(node, channels_1, channels_1, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_2, channels_2, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_3, channels_3, output_stride, opdata, 2);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status create_concatenate4_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 4);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t input3_id = node->inputs[2];
assert(input3_id != XNN_INVALID_VALUE_ID);
assert(input3_id < num_values);
const uint32_t input4_id = node->inputs[3];
assert(input4_id != XNN_INVALID_VALUE_ID);
assert(input4_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t axis = node->params.concatenate.axis;
size_t batch_size = 1, channels_1 = 1, channels_2 = 1, channels_3 = 1, channels_4 = 1;
for (size_t i = 0; i < axis; i++) {
batch_size *= values[output_id].shape.dim[i];
}
for (size_t i = axis; i < values[input1_id].shape.num_dims; i++) {
channels_1 *= values[input1_id].shape.dim[i];
channels_2 *= values[input2_id].shape.dim[i];
channels_3 *= values[input3_id].shape.dim[i];
channels_4 *= values[input4_id].shape.dim[i];
}
const size_t output_stride = channels_1 + channels_2 + channels_3 + channels_4;
enum xnn_status status;
status = create_concatenate_operator_helper(node, channels_1, channels_1, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_2, channels_2, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_3, channels_3, output_stride, opdata, 2);
if (status != xnn_status_success) {
return status;
}
status = create_concatenate_operator_helper(node, channels_4, channels_4, output_stride, opdata, 3);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status reshape_concatenate_operator_helper(
const struct xnn_operator_data *opdata,
size_t index,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[index]->type) {
case xnn_operator_type_copy_nc_x16:
return xnn_reshape_copy_nc_x16(
opdata->operator_objects[index],
opdata->batch_size,
threadpool);
case xnn_operator_type_copy_nc_x32:
return xnn_reshape_copy_nc_x32(
opdata->operator_objects[index],
opdata->batch_size,
threadpool);
case xnn_operator_type_copy_nc_x8:
return xnn_reshape_copy_nc_x8(
opdata->operator_objects[index],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status reshape_concatenate2_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status;
status = reshape_concatenate_operator_helper(opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
return reshape_concatenate_operator_helper(opdata, 1, threadpool);
}
static enum xnn_status reshape_concatenate3_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status;
status = reshape_concatenate_operator_helper(opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_concatenate_operator_helper(opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
return reshape_concatenate_operator_helper(opdata, 2, threadpool);
}
static enum xnn_status reshape_concatenate4_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status;
status = reshape_concatenate_operator_helper(opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_concatenate_operator_helper(opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_concatenate_operator_helper(opdata, 2, threadpool);
if (status != xnn_status_success) {
return status;
}
return reshape_concatenate_operator_helper(opdata, 3, threadpool);
}
static enum xnn_status setup_concatenate_operator_helper(
const void* input_data,
void* output_data,
const struct xnn_operator_data *opdata,
size_t index,
pthreadpool_t threadpool)
{
// The output pointer of this operator is the sum of all channels of the earlier operators.
size_t channels = 0;
for (size_t i = 0; i < index; i++) {
channels += opdata->operator_objects[i]->channels;
}
switch (opdata->operator_objects[index]->type) {
case xnn_operator_type_copy_nc_x16:
return xnn_setup_copy_nc_x16(
opdata->operator_objects[index],
input_data,
(uint16_t*) output_data + channels);
case xnn_operator_type_copy_nc_x32:
return xnn_setup_copy_nc_x32(
opdata->operator_objects[index],
input_data,
(uint32_t*) output_data + channels);
case xnn_operator_type_copy_nc_x8:
return xnn_setup_copy_nc_x8(
opdata->operator_objects[index],
input_data,
(uint8_t*) output_data + channels);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_concatenate2_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
enum xnn_status status;
status = setup_concatenate_operator_helper(input1_data, output_data, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
return setup_concatenate_operator_helper(input2_data, output_data, opdata, 1, threadpool);
}
static enum xnn_status setup_concatenate3_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t input3_id = opdata->inputs[2];
assert(input3_id != XNN_INVALID_VALUE_ID);
assert(input3_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* input3_value = values + input3_id;
const void* input3_data = input3_value->data;
assert(input3_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
enum xnn_status status;
status = setup_concatenate_operator_helper(input1_data, output_data, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_concatenate_operator_helper(input2_data, output_data, opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
return setup_concatenate_operator_helper(input3_data, output_data, opdata, 2, threadpool);
}
static enum xnn_status setup_concatenate4_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t input3_id = opdata->inputs[2];
assert(input3_id != XNN_INVALID_VALUE_ID);
assert(input3_id < num_values);
const uint32_t input4_id = opdata->inputs[3];
assert(input4_id != XNN_INVALID_VALUE_ID);
assert(input4_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* input3_value = values + input3_id;
const void* input3_data = input3_value->data;
assert(input3_data != NULL);
const struct xnn_value* input4_value = values + input4_id;
const void* input4_data = input4_value->data;
assert(input4_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
enum xnn_status status;
status = setup_concatenate_operator_helper(input1_data, output_data, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_concatenate_operator_helper(input2_data, output_data, opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_concatenate_operator_helper(input3_data, output_data, opdata, 2, threadpool);
if (status != xnn_status_success) {
return status;
}
return setup_concatenate_operator_helper(input4_data, output_data, opdata, 3, threadpool);
}
enum xnn_status check_input_value(
xnn_subgraph_t subgraph,
size_t axis,
uint32_t input_id,
uint32_t output_id,
size_t nth,
enum xnn_node_type node_type)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_nth_input_node_id(node_type, input_id, subgraph->num_values, nth)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(node_type, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
if (input_value->shape.num_dims != output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input %zu ID #%" PRIu32
": mismatch number of dimensions, input %zu has %zu, output has %zu",
xnn_node_type_to_string(node_type), nth, input_id, nth, input_value->shape.num_dims,
output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < input_value->shape.num_dims; i++) {
if (i != axis && input_value->shape.dim[i] != output_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32
": mismatch dimension %zu, input %zu has %zu, output has %zu",
xnn_node_type_to_string(node_type), input_id, i, nth, input_value->shape.dim[i], output_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
status = xnn_subgraph_check_datatype_matches(node_type, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
return xnn_status_success;
}
enum xnn_status check_input_compute_type(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
const char* nth,
enum xnn_node_type node_type)
{
const struct xnn_value* input_value = &subgraph->values[input_id];
const struct xnn_value* output_value = &subgraph->values[output_id];
if (input_value->quantization.zero_point != output_value->quantization.zero_point) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching quantization zero point across the %s input (%" PRId32 ") and the output (%" PRId32 ")",
xnn_node_type_to_string(node_type), input_id, output_id,
nth, input_value->quantization.zero_point, output_value->quantization.zero_point);
return xnn_status_invalid_parameter;
}
if (input_value->quantization.scale != output_value->quantization.scale) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching quantization scale across the %s input (%.7g) and the output (%.7g)",
xnn_node_type_to_string(node_type), input_id, output_id,
nth, input_value->quantization.scale, output_value->quantization.scale);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_define_concatenate_n(
enum xnn_node_type node_type,
xnn_subgraph_t subgraph,
size_t axis,
size_t num_inputs,
uint32_t* input_ids,
uint32_t output_id,
uint32_t flags)
{
assert(num_inputs >= 2);
assert(num_inputs <= 4);
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(node_type)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_node_id(node_type, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(node_type, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (axis >= output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with the output ID #%" PRIu32
": axis (%zu) exceeds the number of dimensions (%zu)",
xnn_node_type_to_string(node_type), output_id, axis, output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < num_inputs; i++) {
status = check_input_value(subgraph, axis, input_ids[i], output_id, i+1, node_type);
if (status != xnn_status_success) {
return status;
}
}
size_t input_axis_dimensions_sum = 0;
for (size_t i = 0; i < num_inputs; i++) {
const struct xnn_value* input_value = &subgraph->values[input_ids[i]];
input_axis_dimensions_sum += input_value->shape.dim[axis];
}
if (output_value->shape.dim[axis] != input_axis_dimensions_sum) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32
": mismatch axis dimension %zu, output has %zu, sum of input dimensions is %zu",
xnn_node_type_to_string(node_type), output_id, axis, output_value->shape.dim[axis], input_axis_dimensions_sum);
return xnn_status_invalid_parameter;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp16:
compute_type = xnn_compute_type_fp16;
break;
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
if (compute_type == xnn_compute_type_qs8 || compute_type == xnn_compute_type_qu8) {
status = check_input_compute_type(subgraph, input_ids[0], output_id, "first", node_type);
if (status != xnn_status_success) {
return status;
}
status = check_input_compute_type(subgraph, input_ids[1], output_id, "second", node_type);
if (status != xnn_status_success) {
return status;
}
}
if (num_inputs > 2) {
status = check_input_compute_type(subgraph, input_ids[2], output_id, "third", node_type);
if (status != xnn_status_success) {
return status;
}
}
if (num_inputs > 3) {
status = check_input_compute_type(subgraph, input_ids[3], output_id, "fourth", node_type);
if (status != xnn_status_success) {
return status;
}
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->params.concatenate.axis = axis;
node->type = node_type;
node->compute_type = compute_type;
node->num_inputs = num_inputs;
node->inputs[0] = input_ids[0];
node->inputs[1] = input_ids[1];
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
switch (num_inputs) {
case 2:
node->create = create_concatenate2_operator;
node->reshape = reshape_concatenate2_operator;
node->setup = setup_concatenate2_operator;
break;
case 3:
node->create = create_concatenate3_operator;
node->reshape = reshape_concatenate3_operator;
node->setup = setup_concatenate3_operator;
node->inputs[2] = input_ids[2];
break;
case 4:
node->create = create_concatenate4_operator;
node->reshape = reshape_concatenate4_operator;
node->setup = setup_concatenate4_operator;
node->inputs[2] = input_ids[2];
node->inputs[3] = input_ids[3];
break;
default:
XNN_UNREACHABLE;
}
return xnn_status_success;
}
enum xnn_status xnn_define_concatenate2(
xnn_subgraph_t subgraph,
size_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
uint32_t input_ids[2] = { input1_id, input2_id };
return xnn_define_concatenate_n(
xnn_node_type_concatenate2, subgraph, axis, XNN_COUNT_OF(input_ids), input_ids, output_id, flags);
}
enum xnn_status xnn_define_concatenate3(
xnn_subgraph_t subgraph,
size_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t input3_id,
uint32_t output_id,
uint32_t flags)
{
uint32_t input_ids[3] = { input1_id, input2_id, input3_id };
return xnn_define_concatenate_n(
xnn_node_type_concatenate3, subgraph, axis, XNN_COUNT_OF(input_ids), input_ids, output_id, flags);
}
enum xnn_status xnn_define_concatenate4(
xnn_subgraph_t subgraph,
size_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t input3_id,
uint32_t input4_id,
uint32_t output_id,
uint32_t flags)
{
uint32_t input_ids[4] = { input1_id, input2_id, input3_id, input4_id };
return xnn_define_concatenate_n(
xnn_node_type_concatenate4, subgraph, axis, XNN_COUNT_OF(input_ids), input_ids, output_id, flags);
}
| 24,210
| 31.89538
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/copy.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_copy_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_copy_nc_x16(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_copy_nc_x32(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_copy_nc_x8(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_all_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_copy_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_copy_nc_x8:
return xnn_reshape_copy_nc_x8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
case xnn_operator_type_copy_nc_x16:
return xnn_reshape_copy_nc_x16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
case xnn_operator_type_copy_nc_x32:
return xnn_reshape_copy_nc_x32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_copy_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_copy_nc_x8:
return xnn_setup_copy_nc_x8(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_copy_nc_x16:
return xnn_setup_copy_nc_x16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_copy_nc_x32:
return xnn_setup_copy_nc_x32(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_copy(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_copy)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_copy, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_copy, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_copy), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_copy, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_copy, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (input_value->shape.num_dims != output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": number of dimensions of input, %zu, does not match number of dimensions of output %zu",
xnn_node_type_to_string(xnn_node_type_copy), input_id, output_id, input_value->shape.num_dims,
output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < input_value->shape.num_dims; i++) {
if (input_value->shape.dim[i] != output_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": size of dimension %zu of input, %zu, does not match output %zu",
xnn_node_type_to_string(xnn_node_type_copy), input_id, output_id, i, input_value->shape.dim[i],
output_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_copy, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_copy), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_copy, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_copy, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_copy;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_copy_operator;
node->reshape = reshape_copy_operator;
node->setup = setup_copy_operator;
return xnn_status_success;
}
| 7,974
| 30.031128
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/depth-to-space.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_depth_to_space_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t input_channel_dim = values[input_id].shape.dim[3];
const size_t output_channel_dim = values[output_id].shape.dim[3];
enum xnn_status status;
if (values[input_id].layout == xnn_layout_type_nchw) {
assert(values[output_id].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_depth_to_space_nchw2nhwc_x16(
output_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.depth_to_space.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_depth_to_space_nchw2nhwc_x32(
output_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.depth_to_space.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
} else {
assert(values[input_id].layout == xnn_layout_type_nhwc);
assert(values[output_id].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_depth_to_space_nhwc_x16(
output_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.depth_to_space.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_depth_to_space_nhwc_x32(
output_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.depth_to_space.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_depth_to_space_nhwc_x8(
output_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.depth_to_space.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
}
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
opdata->output_height = values[output_id].shape.dim[1];
opdata->output_width = values[output_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_depth_to_space_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_depth_to_space_nchw2nhwc_x16:
return xnn_reshape_depth_to_space_nchw2nhwc_x16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_depth_to_space_nchw2nhwc_x32:
return xnn_reshape_depth_to_space_nchw2nhwc_x32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_depth_to_space_nhwc_x16:
return xnn_reshape_depth_to_space_nhwc_x16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_depth_to_space_nhwc_x32:
return xnn_reshape_depth_to_space_nhwc_x32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_depth_to_space_nhwc_x8:
return xnn_reshape_depth_to_space_nhwc_x8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_depth_to_space_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_depth_to_space_nchw2nhwc_x16:
return xnn_setup_depth_to_space_nchw2nhwc_x16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_depth_to_space_nchw2nhwc_x32:
return xnn_setup_depth_to_space_nchw2nhwc_x32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_depth_to_space_nhwc_x16:
return xnn_setup_depth_to_space_nhwc_x16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_depth_to_space_nhwc_x32:
return xnn_setup_depth_to_space_nhwc_x32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_depth_to_space_nhwc_x8:
return xnn_setup_depth_to_space_nhwc_x8(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_depth_to_space(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t block_size,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_depth_to_space)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_depth_to_space, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_depth_to_space, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_depth_to_space), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_depth_to_space, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_depth_to_space, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_depth_to_space), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
assert(compute_type != xnn_compute_type_invalid);
status = xnn_subgraph_check_datatype_matches(
xnn_node_type_depth_to_space, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_depth_to_space, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (block_size < 2) {
xnn_log_error(
"failed to define %s operator with block size #%" PRIu32 ": invalid block_size",
xnn_node_type_to_string(xnn_node_type_depth_to_space), block_size);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_depth_to_space;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->params.depth_to_space.block_size = block_size;
node->flags = flags;
node->create = create_depth_to_space_operator;
node->reshape = reshape_depth_to_space_operator;
node->setup = setup_depth_to_space_operator;
return xnn_status_success;
}
| 11,296
| 33.129909
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/divide.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_divide_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_divide_nd_f16(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_divide_nd_f32(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_divide_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_divide_nd_f16:
return xnn_reshape_divide_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_divide_nd_f32:
return xnn_reshape_divide_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_divide_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_divide_nd_f16:
return xnn_setup_divide_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_divide_nd_f32:
return xnn_setup_divide_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_divide(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_divide)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_divide, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(xnn_node_type_divide, input1_id, subgraph->num_values, 1)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_divide, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_divide), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_divide, input2_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_divide, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_divide), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_divide, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_divide, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_divide), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_divide;
node->compute_type = xnn_compute_type_fp32;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_divide_operator;
node->reshape = reshape_divide_operator;
node->setup = setup_divide_operator;
return xnn_status_success;
}
| 8,905
| 32.607547
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/elu.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_elu_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_elu_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->params.elu.alpha,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_elu_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->params.elu.alpha,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
status = xnn_create_elu_nc_qs8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->params.elu.alpha,
(int8_t) values[input_id].quantization.zero_point,
values[input_id].quantization.scale,
(int8_t) values[output_id].quantization.zero_point,
values[output_id].quantization.scale,
INT8_MIN, INT8_MAX,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_elu_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_elu_nc_f16:
return xnn_reshape_elu_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_elu_nc_f32:
return xnn_reshape_elu_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_elu_nc_qs8:
return xnn_reshape_elu_nc_qs8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_elu_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_elu_nc_f16:
return xnn_setup_elu_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_elu_nc_f32:
return xnn_setup_elu_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_elu_nc_qs8:
return xnn_setup_elu_nc_qs8(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_elu(
xnn_subgraph_t subgraph,
float alpha,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_elu)) != xnn_status_success) {
return status;
}
if (alpha <= 0.0f || !isnormal(alpha)) {
xnn_log_error(
"failed to define %s operator with %.7g alpha parameter: alpha must be finite, normalized, and positive",
xnn_node_type_to_string(xnn_node_type_elu), alpha);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_elu, input_id, subgraph->num_values)) != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_elu, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_elu), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_elu, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_elu, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_elu, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_elu), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_elu, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_elu;
node->compute_type = compute_type;
node->params.elu.alpha = alpha;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_elu_operator;
node->reshape = reshape_elu_operator;
node->setup = setup_elu_operator;
return xnn_status_success;
}
| 7,668
| 30.302041
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/even-split.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h> // For size_t.
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static size_t calculate_batch_size(const struct xnn_value* input, size_t axis)
{
size_t batch_size = 1;
for (size_t i = 0; i < axis; i++) {
batch_size *= input->shape.dim[i];
}
return batch_size;
}
static size_t calculate_input_stride(const struct xnn_value* input, size_t axis)
{
size_t input_stride = 1;
for (size_t i = axis; i < input->shape.num_dims; i++) {
input_stride *= input->shape.dim[i];
}
return input_stride;
}
static enum xnn_status create_even_split_operator_helper(
const uint32_t output_id,
const struct xnn_node* node,
size_t channels,
size_t input_stride,
size_t output_stride,
struct xnn_operator_data* opdata,
size_t index)
{
if (output_id == XNN_INVALID_VALUE_ID) {
// Node's output value has been optimized away, don't even create operator object.
return xnn_status_success;
}
switch (node->compute_type) {
case xnn_compute_type_fp16:
return xnn_create_copy_nc_x16(
channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
case xnn_compute_type_fp32:
return xnn_create_copy_nc_x32(
channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
return xnn_create_copy_nc_x8(
channels, input_stride, output_stride, node->flags, &opdata->operator_objects[index]);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status create_even_split2_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 2);
uint32_t output1_id = node->outputs[0];
assert(output1_id != XNN_INVALID_VALUE_ID);
assert(output1_id < num_values);
if (values[output1_id].type == xnn_value_type_invalid) {
output1_id = XNN_INVALID_VALUE_ID;
}
uint32_t output2_id = node->outputs[1];
assert(output2_id != XNN_INVALID_VALUE_ID);
assert(output2_id < num_values);
if (values[output2_id].type == xnn_value_type_invalid) {
output2_id = XNN_INVALID_VALUE_ID;
}
const size_t axis = node->params.even_split.axis;
const size_t batch_size = calculate_batch_size(&values[input_id], axis);
const size_t input_stride = calculate_input_stride(&values[input_id], axis);
assert(input_stride % 2 == 0);
const size_t channels = input_stride / 2;
const size_t output_stride = channels;
enum xnn_status status;
status = create_even_split_operator_helper(output1_id, node, channels, input_stride, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output2_id, node, channels, input_stride, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status create_even_split3_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 3);
uint32_t output1_id = node->outputs[0];
if (values[output1_id].type == xnn_value_type_invalid) {
output1_id = XNN_INVALID_VALUE_ID;
}
uint32_t output2_id = node->outputs[1];
if (values[output2_id].type == xnn_value_type_invalid) {
output2_id = XNN_INVALID_VALUE_ID;
}
uint32_t output3_id = node->outputs[2];
if (values[output3_id].type == xnn_value_type_invalid) {
output3_id = XNN_INVALID_VALUE_ID;
}
const size_t axis = node->params.even_split.axis;
const size_t batch_size = calculate_batch_size(&values[input_id], axis);
const size_t input_stride = calculate_input_stride(&values[input_id], axis);
assert(input_stride % 3 == 0);
const size_t channels = input_stride / 3;
const size_t output_stride = channels;
enum xnn_status status;
status = create_even_split_operator_helper(output1_id, node, channels, input_stride, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output2_id, node, channels, input_stride, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output3_id, node, channels, input_stride, output_stride, opdata, 2);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status create_even_split4_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 4);
uint32_t output1_id = node->outputs[0];
if (values[output1_id].type == xnn_value_type_invalid) {
output1_id = XNN_INVALID_VALUE_ID;
}
uint32_t output2_id = node->outputs[1];
if (values[output2_id].type == xnn_value_type_invalid) {
output2_id = XNN_INVALID_VALUE_ID;
}
uint32_t output3_id = node->outputs[2];
if (values[output3_id].type == xnn_value_type_invalid) {
output3_id = XNN_INVALID_VALUE_ID;
}
uint32_t output4_id = node->outputs[3];
if (values[output4_id].type == xnn_value_type_invalid) {
output4_id = XNN_INVALID_VALUE_ID;
}
const size_t axis = node->params.even_split.axis;
const size_t batch_size = calculate_batch_size(&values[input_id], axis);
const size_t input_stride = calculate_input_stride(&values[input_id], axis);
assert(input_stride % 4 == 0);
const size_t channels = input_stride / 4;
const size_t output_stride = channels;
enum xnn_status status;
status = create_even_split_operator_helper(output1_id, node, channels, input_stride, output_stride, opdata, 0);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output2_id, node, channels, input_stride, output_stride, opdata, 1);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output3_id, node, channels, input_stride, output_stride, opdata, 2);
if (status != xnn_status_success) {
return status;
}
status = create_even_split_operator_helper(output4_id, node, channels, input_stride, output_stride, opdata, 3);
if (status != xnn_status_success) {
return status;
}
opdata->batch_size = batch_size;
return status;
}
static enum xnn_status reshape_even_split_operator_helper(
const struct xnn_value* values,
const uint32_t num_values,
struct xnn_operator_data* opdata,
size_t index,
pthreadpool_t threadpool)
{
const uint32_t output_id = opdata->outputs[index];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
if (values[output_id].allocation_type == xnn_allocation_type_invalid) {
assert(opdata->operator_objects[index] == NULL);
// output_id was removed during optimization.
return xnn_status_success;
}
switch (opdata->operator_objects[index]->type) {
case xnn_operator_type_copy_nc_x16:
return xnn_reshape_copy_nc_x16(
opdata->operator_objects[index], opdata->batch_size, threadpool);
case xnn_operator_type_copy_nc_x32:
return xnn_reshape_copy_nc_x32(
opdata->operator_objects[index], opdata->batch_size, threadpool);
case xnn_operator_type_copy_nc_x8:
return xnn_reshape_copy_nc_x8(
opdata->operator_objects[index], opdata->batch_size, threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status reshape_even_split2_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status = xnn_status_success;
status = reshape_even_split_operator_helper(values, num_values, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
static enum xnn_status reshape_even_split3_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status = xnn_status_success;
status = reshape_even_split_operator_helper(values, num_values, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 2, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
static enum xnn_status reshape_even_split4_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status = xnn_status_success;
status = reshape_even_split_operator_helper(values, num_values, opdata, 0, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 1, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 2, threadpool);
if (status != xnn_status_success) {
return status;
}
status = reshape_even_split_operator_helper(values, num_values, opdata, 3, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
static enum xnn_status setup_even_split_operator_helper(
const struct xnn_value* values,
const uint32_t num_values,
const struct xnn_operator_data* opdata,
size_t index,
const void* input_data,
pthreadpool_t threadpool)
{
const uint32_t output_id = opdata->outputs[index];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
if (values[output_id].allocation_type == xnn_allocation_type_invalid) {
assert(opdata->operator_objects[index] == NULL);
// output_id was removed during optimization.
return xnn_status_success;
}
const size_t channels = opdata->operator_objects[index]->channels;
assert(output_id < num_values);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[index]->type) {
case xnn_operator_type_copy_nc_x16:
return xnn_setup_copy_nc_x16(
opdata->operator_objects[index], (const uint16_t*) input_data + index * channels,
output_data);
case xnn_operator_type_copy_nc_x32:
return xnn_setup_copy_nc_x32(
opdata->operator_objects[index], (const uint32_t*) input_data + index * channels,
output_data);
case xnn_operator_type_copy_nc_x8:
return xnn_setup_copy_nc_x8(
opdata->operator_objects[index], (const uint8_t*) input_data + index * channels,
output_data);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_even_split2_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
enum xnn_status status = xnn_status_success;
status = setup_even_split_operator_helper(values, num_values, opdata, 0, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 1, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
static enum xnn_status setup_even_split3_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t
threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
enum xnn_status status = xnn_status_success;
status = setup_even_split_operator_helper(values, num_values, opdata, 0, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 1, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 2, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
static enum xnn_status setup_even_split4_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t
threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
enum xnn_status status = xnn_status_success;
status = setup_even_split_operator_helper(values, num_values, opdata, 0, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 1, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 2, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
status = setup_even_split_operator_helper(values, num_values, opdata, 3, input_data, threadpool);
if (status != xnn_status_success) {
return status;
}
return status;
}
enum xnn_status check_output_value(
xnn_subgraph_t subgraph,
size_t split_dim,
uint32_t input_id,
uint32_t output_id,
const char* nth,
enum xnn_node_type node_type)
{
const struct xnn_value* input_value = &subgraph->values[input_id];
const struct xnn_value* output_value = &subgraph->values[output_id];
enum xnn_status status;
status = xnn_subgraph_check_output_node_id(node_type, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_type_dense(node_type, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (input_value->shape.num_dims != output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with %s output ID #%" PRIu32
": mismatch number of dimensions, input has %zu, %s output has %zu",
xnn_node_type_to_string(node_type), nth, output_id, input_value->shape.num_dims,
nth, output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < input_value->shape.num_dims; i++) {
if (i != split_dim && input_value->shape.dim[i] != output_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with %s output ID #%" PRIu32
": mismatch dimension %zu, %s output has %zu, input has %zu",
xnn_node_type_to_string(node_type), nth, output_id, i, nth, output_value->shape.dim[i],
input_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
status = xnn_subgraph_check_datatype_matches(node_type, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
return xnn_status_success;
}
enum xnn_status check_output_compute_type(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
const char* nth,
enum xnn_node_type node_type)
{
const struct xnn_value* input_value = &subgraph->values[input_id];
const struct xnn_value* output_value = &subgraph->values[output_id];
if (input_value->quantization.zero_point != output_value->quantization.zero_point) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching quantization zero point across the input (%" PRId32 ") and the %s output (%" PRId32 ")",
xnn_node_type_to_string(node_type), input_id, output_id,
input_value->quantization.zero_point, nth, output_value->quantization.zero_point);
return xnn_status_invalid_parameter;
}
if (input_value->quantization.scale != output_value->quantization.scale) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching quantization scale across the input (%.7g) and the %s output (%.7g)",
xnn_node_type_to_string(node_type), input_id, output_id, input_value->quantization.scale,
nth, output_value->quantization.scale);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_define_even_split_n(
enum xnn_node_type node_type,
xnn_subgraph_t subgraph,
size_t split_dim,
uint32_t input_id,
size_t num_outputs,
const uint32_t* output_ids,
uint32_t flags)
{
assert(num_outputs > 1);
assert(num_outputs < 5);
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(node_type)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(node_type, input_id, subgraph->num_values)) != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(node_type, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
check_output_value(subgraph, split_dim, input_id, output_ids[0], "first", node_type);
check_output_value(subgraph, split_dim, input_id, output_ids[1], "second", node_type);
if (num_outputs > 2) {
check_output_value(subgraph, split_dim, input_id, output_ids[2], "third", node_type);
}
if (num_outputs > 3) {
check_output_value(subgraph, split_dim, input_id, output_ids[3], "fourth", node_type);
}
// Check that the split dimension can be evenly split into outputs.
if (split_dim >= input_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with the input ID #%" PRIu32
": split dimension (%zu) exceeds the number of dimensions (%zu)",
xnn_node_type_to_string(node_type), input_id, split_dim, input_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
if (input_value->shape.dim[split_dim] % num_outputs != 0) {
xnn_log_error(
"failed to define %s operator with the input ID #%" PRIu32
": split dimension %zu has value %zu which cannot be evenly split into %zu",
xnn_node_type_to_string(node_type), input_id, split_dim, input_value->shape.dim[split_dim], num_outputs);
return xnn_status_invalid_parameter;
}
// Check that the split dimensions of output add up;
size_t output_dimensions_sum = 0;
for (size_t i = 0; i < num_outputs; i++) {
const struct xnn_value* output_value = &subgraph->values[output_ids[i]];
output_dimensions_sum += output_value->shape.dim[split_dim];
}
if (output_dimensions_sum != input_value->shape.dim[split_dim]) {
xnn_log_error(
"failed to define %s operator with the input ID #%" PRIu32
": input split dimension value (%zu) does not match the sum of output split dimensions value %zu",
xnn_node_type_to_string(node_type), input_id, input_value->shape.dim[split_dim], output_dimensions_sum);
return xnn_status_invalid_parameter;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (input_value->datatype) {
case xnn_datatype_fp16:
compute_type = xnn_compute_type_fp16;
break;
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), input_id, xnn_datatype_to_string(input_value->datatype),
input_value->datatype);
return xnn_status_invalid_parameter;
}
if (compute_type == xnn_compute_type_qs8 || compute_type == xnn_compute_type_qu8) {
check_output_compute_type(subgraph, input_id, output_ids[0], "first", node_type);
check_output_compute_type(subgraph, input_id, output_ids[1], "second", node_type);
if (num_outputs > 2) {
check_output_compute_type(subgraph, input_id, output_ids[2], "third", node_type);
}
if (num_outputs > 3) {
check_output_compute_type(subgraph, input_id, output_ids[3], "fourth", node_type);
}
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->params.even_split.axis = split_dim;
node->type = node_type;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = num_outputs;
node->outputs[0] = output_ids[0];
node->outputs[1] = output_ids[1];
switch (num_outputs) {
case 2:
node->create = create_even_split2_operator;
node->reshape = reshape_even_split2_operator;
node->setup = setup_even_split2_operator;
break;
case 3:
node->outputs[2] = output_ids[2];
node->create = create_even_split3_operator;
node->reshape = reshape_even_split3_operator;
node->setup = setup_even_split3_operator;
break;
case 4:
node->outputs[2] = output_ids[2];
node->outputs[3] = output_ids[3];
node->create = create_even_split4_operator;
node->reshape = reshape_even_split4_operator;
node->setup = setup_even_split4_operator;
break;
default:
XNN_UNREACHABLE;
}
node->flags = flags;
return xnn_status_success;
};
enum xnn_status xnn_define_even_split2(
xnn_subgraph_t subgraph,
size_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t flags)
{
const uint32_t output_ids[2] = { output1_id, output2_id };
return xnn_define_even_split_n(
xnn_node_type_even_split2, subgraph, split_dim, input_id, XNN_COUNT_OF(output_ids), output_ids, flags);
}
enum xnn_status xnn_define_even_split3(
xnn_subgraph_t subgraph,
size_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t output3_id,
uint32_t flags)
{
const uint32_t output_ids[3] = { output1_id, output2_id, output3_id };
return xnn_define_even_split_n(
xnn_node_type_even_split3, subgraph, split_dim, input_id, XNN_COUNT_OF(output_ids), output_ids, flags);
}
enum xnn_status xnn_define_even_split4(
xnn_subgraph_t subgraph,
size_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t output3_id,
uint32_t output4_id,
uint32_t flags)
{
const uint32_t output_ids[4] = { output1_id, output2_id, output3_id, output4_id };
return xnn_define_even_split_n(
xnn_node_type_even_split4, subgraph, split_dim, input_id, XNN_COUNT_OF(output_ids), output_ids, flags);
}
| 24,490
| 32.50342
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/floor.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_floor_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_floor_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_floor_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_floor_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_floor_nc_f32:
return xnn_reshape_floor_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_floor_nc_f16:
return xnn_reshape_floor_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_floor_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_floor_nc_f32:
return xnn_setup_floor_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_floor_nc_f16:
return xnn_setup_floor_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_floor(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_floor)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_floor, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_floor, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_floor), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_floor, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_floor, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_floor, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_floor), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_floor;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_floor_operator;
node->reshape = reshape_floor_operator;
node->setup = setup_floor_operator;
return xnn_status_success;
}
| 5,990
| 29.105528
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/global-average-pooling.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_global_average_pooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
assert(num_input_dims >= 1);
const size_t channel_dim = values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
if (values[node->inputs[0]].layout == xnn_layout_type_nchw) {
assert(node->compute_type == xnn_compute_type_fp32 || node->compute_type == xnn_compute_type_fp16);
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_global_average_pooling_ncw_f32(
channel_dim /* channels */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_global_average_pooling_ncw_f16(
channel_dim /* channels */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
} else {
assert(values[node->inputs[0]].layout == xnn_layout_type_nhwc);
assert(values[node->outputs[0]].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_global_average_pooling_nwc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_global_average_pooling_nwc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_global_average_pooling_nwc_qs8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(int8_t) values[input_id].quantization.zero_point, values[input_id].quantization.scale,
(int8_t) values[output_id].quantization.zero_point, values[output_id].quantization.scale,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_global_average_pooling_nwc_qu8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(uint8_t) values[input_id].quantization.zero_point, values[input_id].quantization.scale,
(uint8_t) values[output_id].quantization.zero_point, values[output_id].quantization.scale,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
}
if (status == xnn_status_success) {
switch (node->type) {
case xnn_node_type_global_average_pooling_1d:
opdata->batch_size = xnn_shape_multiply_batch_dims(&values[input_id].shape, 2);
opdata->input_width = values[input_id].shape.dim[num_input_dims - 2];
break;
case xnn_node_type_global_average_pooling_2d:
opdata->batch_size = xnn_shape_multiply_batch_dims(&values[input_id].shape, 3);
opdata->input_width = values[input_id].shape.dim[num_input_dims - 3] * values[input_id].shape.dim[num_input_dims - 2];
break;
default:
XNN_UNREACHABLE;
}
}
return status;
}
static enum xnn_status reshape_global_average_pooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_global_average_pooling_ncw_f32:
return xnn_reshape_global_average_pooling_ncw_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_average_pooling_ncw_f16:
return xnn_reshape_global_average_pooling_ncw_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_average_pooling_nwc_f32:
return xnn_reshape_global_average_pooling_nwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_average_pooling_nwc_f16:
return xnn_reshape_global_average_pooling_nwc_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_average_pooling_nwc_qs8:
return xnn_reshape_global_average_pooling_nwc_qs8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_average_pooling_nwc_qu8:
return xnn_reshape_global_average_pooling_nwc_qu8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_global_average_pooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_global_average_pooling_ncw_f32:
return xnn_setup_global_average_pooling_ncw_f32(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_average_pooling_ncw_f16:
return xnn_setup_global_average_pooling_ncw_f16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_average_pooling_nwc_f32:
return xnn_setup_global_average_pooling_nwc_f32(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_average_pooling_nwc_f16:
return xnn_setup_global_average_pooling_nwc_f16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_average_pooling_nwc_qs8:
return xnn_setup_global_average_pooling_nwc_qs8(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_average_pooling_nwc_qu8:
return xnn_setup_global_average_pooling_nwc_qu8(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status define_global_average_pooling_nd(
xnn_subgraph_t subgraph,
enum xnn_node_type node_type,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(node_type)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(node_type, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(node_type, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(node_type, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(node_type, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(node_type, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(
node_type, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = node_type;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_global_average_pooling_operator;
node->reshape = reshape_global_average_pooling_operator;
node->setup = setup_global_average_pooling_operator;
return xnn_status_success;
}
enum xnn_status xnn_define_global_average_pooling_1d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
return define_global_average_pooling_nd(
subgraph, xnn_node_type_global_average_pooling_1d, output_min, output_max, input_id, output_id, flags);
}
enum xnn_status xnn_define_global_average_pooling_2d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
return define_global_average_pooling_nd(
subgraph, xnn_node_type_global_average_pooling_2d, output_min, output_max, input_id, output_id, flags);
}
| 13,108
| 33.497368
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/global-sum-pooling.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_global_sum_pooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
assert(num_input_dims >= 1);
const size_t channel_dim = values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
assert(values[node->inputs[0]].layout == xnn_layout_type_nhwc);
assert(values[node->outputs[0]].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_global_sum_pooling_nwc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_global_sum_pooling_nwc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
switch (node->type) {
case xnn_node_type_global_sum_pooling_1d:
opdata->batch_size = xnn_shape_multiply_batch_dims(&values[input_id].shape, 2);
opdata->input_width = values[input_id].shape.dim[num_input_dims - 2];
break;
case xnn_node_type_global_sum_pooling_2d:
opdata->batch_size = xnn_shape_multiply_batch_dims(&values[input_id].shape, 3);
opdata->input_width = values[input_id].shape.dim[num_input_dims - 3] * values[input_id].shape.dim[num_input_dims - 2];
break;
default:
XNN_UNREACHABLE;
}
}
return status;
}
static enum xnn_status reshape_global_sum_pooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_global_sum_pooling_nwc_f32:
return xnn_reshape_global_sum_pooling_nwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
case xnn_operator_type_global_sum_pooling_nwc_f16:
return xnn_reshape_global_sum_pooling_nwc_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_width,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_global_sum_pooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_global_sum_pooling_nwc_f32:
return xnn_setup_global_sum_pooling_nwc_f32(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_global_sum_pooling_nwc_f16:
return xnn_setup_global_sum_pooling_nwc_f16(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status define_global_sum_pooling_nd(
xnn_subgraph_t subgraph,
enum xnn_node_type node_type,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(node_type)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(node_type, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(node_type, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(node_type, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(node_type, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(node_type, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(node_type), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(
node_type, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = node_type;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_global_sum_pooling_operator;
node->reshape = reshape_global_sum_pooling_operator;
node->setup = setup_global_sum_pooling_operator;
return xnn_status_success;
}
enum xnn_status xnn_define_global_sum_pooling_1d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
return define_global_sum_pooling_nd(
subgraph, xnn_node_type_global_sum_pooling_1d, output_min, output_max, input_id, output_id, flags);
}
enum xnn_status xnn_define_global_sum_pooling_2d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
return define_global_sum_pooling_nd(
subgraph, xnn_node_type_global_sum_pooling_2d, output_min, output_max, input_id, output_id, flags);
}
| 8,145
| 30.210728
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/hardswish.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_hardswish_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_hardswish_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_hardswish_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_hardswish_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_hardswish_nc_f32:
return xnn_reshape_hardswish_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_hardswish_nc_f16:
return xnn_reshape_hardswish_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_hardswish_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_hardswish_nc_f32:
return xnn_setup_hardswish_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_hardswish_nc_f16:
return xnn_setup_hardswish_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_hardswish(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_hardswish)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_hardswish, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_hardswish, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_hardswish), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_hardswish, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_hardswish, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_hardswish, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_hardswish), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_hardswish;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_hardswish_operator;
node->reshape = reshape_hardswish_operator;
node->setup = setup_hardswish_operator;
return xnn_status_success;
}
| 6,094
| 29.628141
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/max-pooling-2d.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_max_pooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t channel_dim = values[input_id].shape.dim[3];
assert(channel_dim == values[output_id].shape.dim[3]);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_max_pooling2d_nhwc_f16(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
node->params.pooling_2d.dilation_height,
node->params.pooling_2d.dilation_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_max_pooling2d_nhwc_f32(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
node->params.pooling_2d.dilation_height,
node->params.pooling_2d.dilation_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_max_pooling2d_nhwc_s8(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
node->params.pooling_2d.dilation_height,
node->params.pooling_2d.dilation_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_max_pooling2d_nhwc_u8(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
node->params.pooling_2d.stride_height,
node->params.pooling_2d.stride_width,
node->params.pooling_2d.dilation_height,
node->params.pooling_2d.dilation_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
output_min,
output_max,
node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_max_pooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_max_pooling_nhwc_f16:
return xnn_reshape_max_pooling2d_nhwc_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL, /*output_width_out=*/NULL,
threadpool);
case xnn_operator_type_max_pooling_nhwc_f32:
return xnn_reshape_max_pooling2d_nhwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL, /*output_width_out=*/NULL,
threadpool);
case xnn_operator_type_max_pooling_nhwc_s8:
return xnn_reshape_max_pooling2d_nhwc_s8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL, /*output_width_out=*/NULL,
threadpool);
case xnn_operator_type_max_pooling_nhwc_u8:
return xnn_reshape_max_pooling2d_nhwc_u8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL, /*output_width_out=*/NULL,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_max_pooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_max_pooling_nhwc_f16:
return xnn_setup_max_pooling2d_nhwc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_max_pooling_nhwc_f32:
return xnn_setup_max_pooling2d_nhwc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_max_pooling_nhwc_s8:
return xnn_setup_max_pooling2d_nhwc_s8(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_max_pooling_nhwc_u8:
return xnn_setup_max_pooling2d_nhwc_u8(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_max_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_max_pooling_2d)) != xnn_status_success) {
return status;
}
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
"pooling size dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), pooling_width, pooling_height);
return xnn_status_invalid_parameter;
}
if (pooling_size == 1) {
xnn_log_error(
"failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d));
return xnn_status_invalid_parameter;
}
if (stride_height == 0 || stride_width == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_width, stride_height);
return xnn_status_invalid_parameter;
}
if (dilation_height == 0 || dilation_width == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), dilation_width, dilation_height);
return xnn_status_invalid_parameter;
}
if (stride_height > pooling_height) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 " stride height: must be less than pooling height %" PRIu32,
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_height, pooling_height);
return xnn_status_invalid_parameter;
}
if (stride_width > pooling_width) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 " stride width: must be less than pooling width %" PRIu32,
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_width, pooling_width);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_max_pooling_2d, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
if (any_padding) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
"TensorFlow SAME padding can't be combined with explicit padding specification",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d),
input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
return xnn_status_invalid_parameter;
}
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_max_pooling_2d, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_max_pooling_2d, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_max_pooling_2d, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_max_pooling_2d, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_max_pooling_2d), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(
xnn_node_type_max_pooling_2d, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_max_pooling_2d, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_max_pooling_2d;
node->compute_type = compute_type;
node->params.pooling_2d.padding_top = input_padding_top;
node->params.pooling_2d.padding_right = input_padding_right;
node->params.pooling_2d.padding_bottom = input_padding_bottom;
node->params.pooling_2d.padding_left = input_padding_left;
node->params.pooling_2d.pooling_height = pooling_height;
node->params.pooling_2d.pooling_width = pooling_width;
node->params.pooling_2d.stride_height = stride_height;
node->params.pooling_2d.stride_width = stride_width;
node->params.pooling_2d.dilation_height = dilation_height;
node->params.pooling_2d.dilation_width = dilation_width;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_max_pooling_operator;
node->reshape = reshape_max_pooling_operator;
node->setup = setup_max_pooling_operator;
return xnn_status_success;
}
| 15,217
| 36.026764
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/maximum2.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_maximum_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_maximum_nd_f16(
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_maximum_nd_f32(
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_maximum_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_maximum_nd_f16:
return xnn_reshape_maximum_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_maximum_nd_f32:
return xnn_reshape_maximum_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_maximum_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_maximum_nd_f16:
return xnn_setup_maximum_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_maximum_nd_f32:
return xnn_setup_maximum_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_maximum2(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_maximum2)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_maximum2, input1_id, subgraph->num_values, 1)) != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_maximum2, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_maximum2), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_maximum2, input2_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_maximum2, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_maximum2), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_maximum2, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_maximum2, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_maximum2), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_maximum2;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_maximum_operator;
node->reshape = reshape_maximum_operator;
node->setup = setup_maximum_operator;
return xnn_status_success;
}
| 8,536
| 32.743083
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/minimum2.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_minimum_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_minimum_nd_f16(
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_minimum_nd_f32(
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_minimum_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_minimum_nd_f16:
return xnn_reshape_minimum_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_minimum_nd_f32:
return xnn_reshape_minimum_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_minimum_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_minimum_nd_f16:
return xnn_setup_minimum_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_minimum_nd_f32:
return xnn_setup_minimum_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_minimum2(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_minimum2)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_minimum2, input1_id, subgraph->num_values, 1)) != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_minimum2, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_minimum2), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_minimum2, input2_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_minimum2, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_minimum2), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_minimum2, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_minimum2, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_minimum2), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_minimum2;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_minimum_operator;
node->reshape = reshape_minimum_operator;
node->setup = setup_minimum_operator;
return xnn_status_success;
}
| 8,536
| 32.743083
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/multiply2.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_multiply_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_multiply_nd_f16(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_multiply_nd_f32(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_multiply_nd_qs8(
(int8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(int8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(int8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_multiply_nd_qu8(
(uint8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(uint8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(uint8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_multiply_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_multiply_nd_f16:
return xnn_reshape_multiply_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
break;
case xnn_operator_type_multiply_nd_f32:
return xnn_reshape_multiply_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
break;
case xnn_operator_type_multiply_nd_qs8:
return xnn_reshape_multiply_nd_qs8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
break;
case xnn_operator_type_multiply_nd_qu8:
return xnn_reshape_multiply_nd_qu8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_multiply_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_multiply_nd_f16:
return xnn_setup_multiply_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
break;
case xnn_operator_type_multiply_nd_f32:
return xnn_setup_multiply_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
break;
case xnn_operator_type_multiply_nd_qs8:
return xnn_setup_multiply_nd_qs8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
break;
case xnn_operator_type_multiply_nd_qu8:
return xnn_setup_multiply_nd_qu8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_multiply2(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_multiply2)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_multiply2, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_multiply2, input1_id, subgraph->num_values, 1)) != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_multiply2, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_multiply2), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_multiply2, input2_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_multiply2, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_multiply2), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_multiply2, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_multiply2, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_multiply2), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches_two_inputs(
xnn_node_type_multiply2, input1_id, input1_value, input2_id, input2_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_multiply2;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_multiply_operator;
node->reshape = reshape_multiply_operator;
node->setup = setup_multiply_operator;
return xnn_status_success;
}
| 12,275
| 34.174785
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/negate.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_negate_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_negate_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_negate_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_negate_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_negate_nc_f32:
return xnn_reshape_negate_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_negate_nc_f16:
return xnn_reshape_negate_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_negate_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_negate_nc_f32:
return xnn_setup_negate_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_negate_nc_f16:
return xnn_setup_negate_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_negate(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_negate)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_negate, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_negate, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_negate), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_negate, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_negate, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_negate, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_negate), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_negate;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_negate_operator;
node->reshape = reshape_negate_operator;
node->setup = setup_negate_operator;
return xnn_status_success;
}
| 6,016
| 29.236181
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/prelu.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_prelu_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t slope_id = node->inputs[1];
assert(slope_id != XNN_INVALID_VALUE_ID);
assert(slope_id < num_values);
const void* slope_data = values[slope_id].fp32_data != NULL ? values[slope_id].fp32_data : values[slope_id].data;
assert(slope_data != NULL);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_prelu_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
slope_data /* negative slope */,
node->flags | XNN_FLAG_FP32_STATIC_WEIGHTS,
code_cache,
weights_cache,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_prelu_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
slope_data /* negative slope */,
node->flags,
code_cache,
weights_cache,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_prelu_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_prelu_nc_f16:
return xnn_reshape_prelu_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_prelu_nc_f32:
return xnn_reshape_prelu_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_prelu_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_prelu_nc_f16:
return xnn_setup_prelu_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_prelu_nc_f32:
return xnn_setup_prelu_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_prelu(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t slope_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_prelu)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_prelu, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_prelu, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_prelu), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
if (slope_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with slope ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_prelu), slope_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* slope_value = &subgraph->values[slope_id];
if (slope_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with slope ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(xnn_node_type_prelu), slope_id, slope_value->type);
return xnn_status_invalid_parameter;
}
if (slope_value->data == NULL) {
xnn_log_error(
"failed to define %s operator with slope ID #%" PRIu32 ": non-static Value",
xnn_node_type_to_string(xnn_node_type_prelu), slope_id);
return xnn_status_invalid_parameter;
}
switch (slope_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with slope ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_prelu), slope_id,
xnn_datatype_to_string(slope_value->datatype), slope_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_prelu, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_prelu, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_prelu), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_prelu;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 2;
node->inputs[0] = input_id;
node->inputs[1] = slope_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_prelu_operator;
node->reshape = reshape_prelu_operator;
node->setup = setup_prelu_operator;
return xnn_status_success;
}
| 7,625
| 30.643154
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/rope.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_rope_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t weights_id = node->inputs[1];
assert(weights_id != XNN_INVALID_VALUE_ID);
assert(weights_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
const void* weights_data = values[weights_id].fp32_data != NULL ? values[weights_id].fp32_data : values[weights_id].data;
assert(node->compute_type == xnn_compute_type_fp32);
const enum xnn_status status = xnn_create_rope_nthc_f32(
node->params.rope.max_sequence_size,
values[input_id].shape.dim[num_input_dims - 1],
weights_data,
/*flags=*/0,
&opdata->operator_objects[0]);
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_batch_dims(&values[input_id].shape, 3);
opdata->sequence_size = values[input_id].shape.dim[num_input_dims - 3];
opdata->heads = values[input_id].shape.dim[num_input_dims - 2];
opdata->inputs[0] = input_id;
opdata->inputs[1] = weights_id;
opdata->outputs[0] = output_id;
}
return status;
}
static enum xnn_status reshape_rope_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
assert(opdata->operator_objects[0]->type == xnn_operator_type_rope_nthc_f32);
return xnn_reshape_rope_nthc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->sequence_size,
opdata->heads,
threadpool);
}
static enum xnn_status setup_rope_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
assert(opdata->operator_objects[0]->type == xnn_operator_type_rope_nthc_f32);
return xnn_setup_rope_nthc_f32(
opdata->operator_objects[0],
input_data,
output_data);
}
enum xnn_status xnn_define_rope(
xnn_subgraph_t subgraph,
size_t max_sequence_size,
uint32_t input_id,
uint32_t weights_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_rope)) != xnn_status_success) {
return status;
}
if (max_sequence_size == 0) {
xnn_log_error(
"failed to define %s operator with %zu max sequence size parameter: max sequence size must be non-zero",
xnn_node_type_to_string(xnn_node_type_rope), max_sequence_size);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_rope, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_rope, weights_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_rope, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
if (input_value->datatype != xnn_datatype_fp32) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_rope), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
const struct xnn_value* weights_value = &subgraph->values[weights_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_rope, weights_id, weights_value);
if (status != xnn_status_success) {
return status;
}
if (weights_value->datatype != xnn_datatype_fp32) {
xnn_log_error(
"failed to define %s operator with weights ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_rope), weights_id,
xnn_datatype_to_string(weights_value->datatype), weights_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_rope, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_rope, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_rope, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (output_value->datatype != xnn_datatype_fp32) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_rope), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_subtract, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_rope;
node->compute_type = xnn_compute_type_fp32;
node->params.rope.max_sequence_size = max_sequence_size;
node->num_inputs = 2;
node->inputs[0] = input_id;
node->inputs[1] = weights_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_rope_operator;
node->reshape = reshape_rope_operator;
node->setup = setup_rope_operator;
return xnn_status_success;
}
| 7,050
| 31.795349
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/sigmoid.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_sigmoid_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_sigmoid_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_sigmoid_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
status = xnn_create_sigmoid_nc_qs8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(int8_t) values[input_id].quantization.zero_point,
values[input_id].quantization.scale,
(int8_t) values[output_id].quantization.zero_point,
values[output_id].quantization.scale,
INT8_MIN, INT8_MAX,
node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
status = xnn_create_sigmoid_nc_qu8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(uint8_t) values[input_id].quantization.zero_point,
values[input_id].quantization.scale,
(uint8_t) values[output_id].quantization.zero_point,
values[output_id].quantization.scale,
0, UINT8_MAX,
node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_sigmoid_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_sigmoid_nc_f16:
return xnn_reshape_sigmoid_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_sigmoid_nc_f32:
return xnn_reshape_sigmoid_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_sigmoid_nc_qs8:
return xnn_reshape_sigmoid_nc_qs8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_sigmoid_nc_qu8:
return xnn_reshape_sigmoid_nc_qu8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_sigmoid_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_sigmoid_nc_f16:
return xnn_setup_sigmoid_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_sigmoid_nc_f32:
return xnn_setup_sigmoid_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_sigmoid_nc_qs8:
return xnn_setup_sigmoid_nc_qs8(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_sigmoid_nc_qu8:
return xnn_setup_sigmoid_nc_qu8(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_sigmoid(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_sigmoid)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_sigmoid, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_sigmoid, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_sigmoid), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_sigmoid, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_sigmoid, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_sigmoid, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_sigmoid), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_subtract, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_sigmoid;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_sigmoid_operator;
node->reshape = reshape_sigmoid_operator;
node->setup = setup_sigmoid_operator;
return xnn_status_success;
}
| 8,358
| 30.543396
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/softmax.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_softmax_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
assert(num_input_dims > 0);
const size_t channel_dim = values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_datatype_fp32:
status = xnn_create_softmax_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_datatype_fp16:
status = xnn_create_softmax_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_softmax_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_softmax_nc_f32:
return xnn_reshape_softmax_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_softmax_nc_f16:
return xnn_reshape_softmax_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_softmax_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_softmax_nc_f32:
return xnn_setup_softmax_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_softmax_nc_f16:
return xnn_setup_softmax_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_softmax(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_softmax)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_softmax, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_softmax, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
if (input_value->shape.num_dims < 1) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": number of dimensions must be at least 1",
xnn_node_type_to_string(xnn_node_type_softmax), input_id);
return xnn_status_invalid_parameter;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_softmax), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_softmax, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_softmax, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_softmax, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_softmax), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_softmax;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_softmax_operator;
node->reshape = reshape_softmax_operator;
node->setup = setup_softmax_operator;
return xnn_status_success;
}
| 6,315
| 29.512077
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/space-to-depth-2d.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_space_to_depth_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t input_channel_dim = values[input_id].shape.dim[3];
const size_t output_channel_dim = values[output_id].shape.dim[3];
enum xnn_status status;
assert(values[input_id].layout == xnn_layout_type_nhwc);
assert(values[output_id].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_space_to_depth_nhwc_x16(
input_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.space_to_depth_2d.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_space_to_depth_nhwc_x32(
input_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.space_to_depth_2d.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_space_to_depth_nhwc_x8(
input_channel_dim /* output channels */,
input_channel_dim /* input stride */,
output_channel_dim /* output stride */,
node->params.space_to_depth_2d.block_size,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
opdata->output_height = values[output_id].shape.dim[1];
opdata->output_width = values[output_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_space_to_depth_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_space_to_depth_nhwc_x16:
return xnn_reshape_space_to_depth_nhwc_x16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_space_to_depth_nhwc_x32:
return xnn_reshape_space_to_depth_nhwc_x32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
case xnn_operator_type_space_to_depth_nhwc_x8:
return xnn_reshape_space_to_depth_nhwc_x8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
/*output_channels_out=*/NULL,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_space_to_depth_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_space_to_depth_nhwc_x16:
return xnn_setup_space_to_depth_nhwc_x16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_space_to_depth_nhwc_x32:
return xnn_setup_space_to_depth_nhwc_x32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_space_to_depth_nhwc_x8:
return xnn_setup_space_to_depth_nhwc_x8(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_space_to_depth_2d(
xnn_subgraph_t subgraph,
uint32_t block_size,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_space_to_depth_2d);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_space_to_depth_2d, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_space_to_depth_2d, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_space_to_depth_2d), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_space_to_depth_2d, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_space_to_depth_2d, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_space_to_depth_2d), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
assert(compute_type != xnn_compute_type_invalid);
status = xnn_subgraph_check_datatype_matches(
xnn_node_type_space_to_depth_2d, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_clamp, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (block_size < 2) {
xnn_log_error(
"failed to define %s operator with block size #%" PRIu32 ": block_size must be >= 2",
xnn_node_type_to_string(xnn_node_type_space_to_depth_2d), block_size);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_space_to_depth_2d;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->params.space_to_depth_2d.block_size = block_size;
node->flags = flags;
node->create = create_space_to_depth_operator;
node->reshape = reshape_space_to_depth_operator;
node->setup = setup_space_to_depth_operator;
return xnn_status_success;
}
| 9,144
| 31.895683
| 111
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/square-root.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_square_root_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_square_root_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_square_root_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_square_root_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_square_root_nc_f32:
return xnn_reshape_square_root_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_square_root_nc_f16:
return xnn_reshape_square_root_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_square_root_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_square_root_nc_f32:
return xnn_setup_square_root_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_square_root_nc_f16:
return xnn_setup_square_root_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_square_root(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_square_root)) != xnn_status_success) {
return status;
}
if (input_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_square_root), input_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_square_root, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_square_root), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_square_root, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_square_root, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_square_root, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_square_root), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_square_root;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_square_root_operator;
node->reshape = reshape_square_root_operator;
node->setup = setup_square_root_operator;
return xnn_status_success;
}
| 6,241
| 30.054726
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/square.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_square_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_square_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_square_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_square_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_square_nc_f32:
return xnn_reshape_square_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_square_nc_f16:
return xnn_reshape_square_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_square_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_square_nc_f32:
return xnn_setup_square_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_square_nc_f16:
return xnn_setup_square_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_square(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_square)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_square, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_square, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_square), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_square, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_square, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_square, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_square), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_square;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_square_operator;
node->reshape = reshape_square_operator;
node->setup = setup_square_operator;
return xnn_status_success;
}
| 6,016
| 29.236181
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/squared-difference.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_squared_difference_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_squared_difference_nd_f16(
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_squared_difference_nd_f32(
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
}
return status;
}
static enum xnn_status reshape_squared_difference_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_squared_difference_nd_f16:
return xnn_reshape_squared_difference_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_squared_difference_nd_f32:
return xnn_reshape_squared_difference_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_squared_difference_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_squared_difference_nd_f16:
return xnn_setup_squared_difference_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_squared_difference_nd_f32:
return xnn_setup_squared_difference_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_squared_difference(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_squared_difference)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_squared_difference, input1_id, subgraph->num_values, 1)) != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_squared_difference, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_squared_difference), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_squared_difference, input2_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_squared_difference, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_squared_difference), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_squared_difference, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_squared_difference, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_squared_difference), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_squared_difference;
node->compute_type = xnn_compute_type_fp32;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_squared_difference_operator;
node->reshape = reshape_squared_difference_operator;
node->setup = setup_squared_difference_operator;
return xnn_status_success;
}
| 8,824
| 33.881423
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-constant-pad.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <xnnpack/operator.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <fp16/fp16.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_constant_pad_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_constant_pad_nd_x16(
&node->params.static_pad.padding_value,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_constant_pad_nd_x32(
&node->params.static_pad.padding_value,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_constant_pad_nd_x8(
&node->params.static_pad.padding_value,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1 = values[input_id].shape;
memcpy(opdata->pre_paddings, node->params.static_pad.pre_paddings, sizeof(size_t) * XNN_MAX_TENSOR_DIMS);
memcpy(opdata->post_paddings, node->params.static_pad.post_paddings, sizeof(size_t) * XNN_MAX_TENSOR_DIMS);
}
return status;
}
static enum xnn_status reshape_constant_pad_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_constant_pad_nd_x8:
return xnn_reshape_constant_pad_nd_x8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->pre_paddings,
opdata->post_paddings,
threadpool);
break;
case xnn_operator_type_constant_pad_nd_x16:
return xnn_reshape_constant_pad_nd_x16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->pre_paddings,
opdata->post_paddings,
threadpool);
break;
case xnn_operator_type_constant_pad_nd_x32:
return xnn_reshape_constant_pad_nd_x32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->pre_paddings,
opdata->post_paddings,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_constant_pad_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_constant_pad_nd_x8:
return xnn_setup_constant_pad_nd_x8(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_constant_pad_nd_x16:
return xnn_setup_constant_pad_nd_x16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_constant_pad_nd_x32:
return xnn_setup_constant_pad_nd_x32(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_static_constant_pad(
xnn_subgraph_t subgraph,
const size_t* pre_paddings,
const size_t* post_paddings,
float padding_value,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_constant_pad)) != xnn_status_success) {
return status;
}
if (input_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_static_constant_pad), input_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_static_constant_pad, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_constant_pad), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_constant_pad, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_constant_pad, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_constant_pad), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(
xnn_node_type_static_constant_pad, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_static_constant_pad, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
const size_t num_dims = subgraph->values[input_id].shape.num_dims;
memcpy(&node->params.static_pad.pre_paddings, pre_paddings, num_dims * sizeof(size_t));
memcpy(&node->params.static_pad.post_paddings, post_paddings, num_dims * sizeof(size_t));
switch (output_value->datatype) {
case xnn_datatype_fp32:
node->params.static_pad.padding_value = float_as_uint32(padding_value);
break;
case xnn_datatype_qint8:
{
const float output_scale = output_value->quantization.scale;
const int32_t output_zero_point = output_value->quantization.zero_point;
node->params.static_pad.padding_value = xnn_qs8_quantize(padding_value, output_scale, output_zero_point);
break;
}
case xnn_datatype_quint8:
{
const float output_scale = output_value->quantization.scale;
const int32_t output_zero_point = output_value->quantization.zero_point;
node->params.static_pad.padding_value = xnn_qu8_quantize(padding_value, output_scale, output_zero_point);
break;
}
default:
XNN_UNREACHABLE;
}
node->type = xnn_node_type_static_constant_pad;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_constant_pad_operator;
node->reshape = reshape_constant_pad_operator;
node->setup = setup_constant_pad_operator;
return xnn_status_success;
}
| 8,975
| 30.94306
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-mean.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_mean_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_mean_nd_f16(
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_mean_nd_f32(
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
const size_t num_reduction_axes = node->params.reduce.num_reduction_axes;
opdata->num_reduction_axes = num_reduction_axes;
memcpy(opdata->reduction_axes, node->params.reduce.reduction_axes, num_reduction_axes * sizeof(size_t));
}
return status;
}
static enum xnn_status reshape_mean_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const struct xnn_value* input_value = values + input_id;
assert(input_value->type == xnn_value_type_dense_tensor);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_mean_nd_f16:
return xnn_reshape_mean_nd_f16(
opdata->operator_objects[0],
opdata->num_reduction_axes,
opdata->reduction_axes,
input_value->shape.num_dims,
input_value->shape.dim,
threadpool);
case xnn_operator_type_mean_nd_f32:
return xnn_reshape_mean_nd_f32(
opdata->operator_objects[0],
opdata->num_reduction_axes,
opdata->reduction_axes,
input_value->shape.num_dims,
input_value->shape.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_mean_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
assert(input_value->type == xnn_value_type_dense_tensor);
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
assert(output_value->type == xnn_value_type_dense_tensor);
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_mean_nd_f16:
return xnn_setup_mean_nd_f16(
opdata->operator_objects[0],
input_data, output_data);
case xnn_operator_type_mean_nd_f32:
return xnn_setup_mean_nd_f32(
opdata->operator_objects[0],
input_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_static_mean(
xnn_subgraph_t subgraph,
size_t num_reduction_axes,
const size_t* reduction_axes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_mean)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_nth_input_node_id(xnn_node_type_static_mean, input_id, subgraph->num_values, 1);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_static_mean, input_id, input_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_mean), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_mean, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_mean, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_mean), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
if (num_reduction_axes > input_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with %zu reduction axes: "
"the number of reduction axes must not exceed the number of input dimensions %zu",
xnn_node_type_to_string(xnn_node_type_static_mean), num_reduction_axes, input_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
if (num_reduction_axes == 0) {
xnn_log_error(
"failed to define %s operator with %zu reduction axes: the number of reduction axes must be non-zero",
xnn_node_type_to_string(xnn_node_type_static_mean), num_reduction_axes);
return xnn_status_invalid_parameter;
}
size_t last_axis = 0;
for (size_t i = 0; i < num_reduction_axes; i++) {
const size_t axis = reduction_axes[i];
if (axis > input_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with #%zu reduction axis of %zu: the index is out of bounds for a %zuD input shape",
xnn_node_type_to_string(xnn_node_type_static_mean), i, axis, input_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
if (i != 0) {
if (axis != last_axis + 1) {
xnn_log_error(
"failed to define %s operator with #%zu reduction axis of %zu: the axis is disjoint with #%zu reduction axis of %zu",
xnn_node_type_to_string(xnn_node_type_static_mean), i, axis, i - 1, last_axis);
return xnn_status_invalid_parameter;
}
}
last_axis = axis;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_static_mean;
node->compute_type = xnn_compute_type_fp32;
node->params.reduce.num_reduction_axes = num_reduction_axes;
memcpy(node->params.reduce.reduction_axes, reduction_axes, num_reduction_axes * sizeof(size_t));
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_mean_operator;
node->reshape = reshape_mean_operator;
node->setup = setup_mean_operator;
return xnn_status_success;
}
| 7,835
| 31.786611
| 127
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-reshape.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_copy_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_copy_nc_x16(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_copy_nc_x32(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_copy_nc_x8(
1 /* channels */, 1 /* input stride */, 1 /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_all_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_copy_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_copy_nc_x8:
return xnn_reshape_copy_nc_x8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
case xnn_operator_type_copy_nc_x16:
return xnn_reshape_copy_nc_x16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
case xnn_operator_type_copy_nc_x32:
return xnn_reshape_copy_nc_x32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_copy_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_copy_nc_x8:
return xnn_setup_copy_nc_x8(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_copy_nc_x16:
return xnn_setup_copy_nc_x16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_copy_nc_x32:
return xnn_setup_copy_nc_x32(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_static_reshape(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* new_shape,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_reshape)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_static_reshape, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_static_reshape, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_reshape), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_reshape, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_reshape, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
const size_t num_input_elements = xnn_shape_multiply_all_dims(&input_value->shape);
const size_t num_output_elements = xnn_shape_multiply_all_dims(&output_value->shape);
if (num_input_elements != num_output_elements) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": number of input elements, %zu, does not match number of output elements %zu",
xnn_node_type_to_string(xnn_node_type_static_reshape), input_id, output_id, num_input_elements,
num_output_elements);
return xnn_status_invalid_parameter;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_reshape), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_static_reshape, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_static_reshape, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to define %s operator with %zu-dimensional output shape: at most %zu dimensions are supported",
xnn_node_type_to_string(xnn_node_type_static_reshape), num_dims, (size_t) XNN_MAX_TENSOR_DIMS);
return xnn_status_unsupported_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->params.static_reshape.new_shape.num_dims = num_dims;
memcpy(&node->params.static_reshape.new_shape.dim, new_shape, num_dims * sizeof(size_t));
node->type = xnn_node_type_static_reshape;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_copy_operator;
node->reshape = reshape_copy_operator;
node->setup = setup_copy_operator;
return xnn_status_success;
}
| 8,154
| 30.365385
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-resize-bilinear-2d.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_resize_bilinear_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t channel_dim = values[input_id].shape.dim[3];
assert(channel_dim == values[node->outputs[0]].shape.dim[3]);
enum xnn_status status;
if (values[input_id].layout == xnn_layout_type_nchw) {
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_resize_bilinear2d_nchw_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_resize_bilinear2d_nchw_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
} else {
assert(values[input_id].layout == xnn_layout_type_nhwc);
assert(values[output_id].layout == xnn_layout_type_nhwc);
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_resize_bilinear2d_nhwc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_resize_bilinear2d_nhwc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
status = xnn_create_resize_bilinear2d_nhwc_s8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qu8:
status = xnn_create_resize_bilinear2d_nhwc_u8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
}
if (status == xnn_status_success) {
opdata->batch_size = values[input_id].shape.dim[0];
opdata->input_height = values[input_id].shape.dim[1];
opdata->input_width = values[input_id].shape.dim[2];
opdata->output_height = values[output_id].shape.dim[1];
opdata->output_width = values[output_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_resize_bilinear_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_resize_bilinear_nchw_f16:
return xnn_reshape_resize_bilinear2d_nchw_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
case xnn_operator_type_resize_bilinear_nchw_f32:
return xnn_reshape_resize_bilinear2d_nchw_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
case xnn_operator_type_resize_bilinear_nhwc_f16:
return xnn_reshape_resize_bilinear2d_nhwc_f16(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
case xnn_operator_type_resize_bilinear_nhwc_f32:
return xnn_reshape_resize_bilinear2d_nhwc_f32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
case xnn_operator_type_resize_bilinear_nhwc_s8:
return xnn_reshape_resize_bilinear2d_nhwc_s8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
case xnn_operator_type_resize_bilinear_nhwc_u8:
return xnn_reshape_resize_bilinear2d_nhwc_u8(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
opdata->output_height,
opdata->output_width,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_resize_bilinear_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_resize_bilinear_nchw_f16:
return xnn_setup_resize_bilinear2d_nchw_f16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_resize_bilinear_nchw_f32:
return xnn_setup_resize_bilinear2d_nchw_f32(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_resize_bilinear_nhwc_f16:
return xnn_setup_resize_bilinear2d_nhwc_f16(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_resize_bilinear_nhwc_f32:
return xnn_setup_resize_bilinear2d_nhwc_f32(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_resize_bilinear_nhwc_s8:
return xnn_setup_resize_bilinear2d_nhwc_s8(
opdata->operator_objects[0],
input_data,
output_data);
break;
case xnn_operator_type_resize_bilinear_nhwc_u8:
return xnn_setup_resize_bilinear2d_nhwc_u8(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_static_resize_bilinear_2d(
xnn_subgraph_t subgraph,
size_t new_height,
size_t new_width,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_resize_bilinear_2d)) != xnn_status_success) {
return status;
}
if (new_width == 0 || new_height == 0) {
xnn_log_error(
"failed to define %s operator with %zux%zu output: output dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), new_width, new_height);
return xnn_status_invalid_parameter;
}
if (max(new_width, new_height) >= 16777216) {
xnn_log_error(
"failed to define %s operator with %zux%zu output: output dimensions must be below 2**24",
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), new_width, new_height);
return xnn_status_unsupported_parameter;
}
const uint32_t supported_flags = XNN_FLAG_TENSORFLOW_LEGACY_MODE | XNN_FLAG_ALIGN_CORNERS;
const uint32_t invalid_flags = flags & ~supported_flags;
if (invalid_flags != 0) {
xnn_log_error(
"failed to define %s operator with 0x%08" PRIx32 " flags: invalid flags 0x%08" PRIx32,
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), flags, invalid_flags);
return xnn_status_invalid_parameter;
}
const uint32_t exclusive_flags = XNN_FLAG_TENSORFLOW_LEGACY_MODE | XNN_FLAG_ALIGN_CORNERS;
if ((flags & exclusive_flags) == exclusive_flags) {
xnn_log_error(
"failed to define %s operator with both XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS flags: "
"the two flags are mutually exclusive",
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d));
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_static_resize_bilinear_2d, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_static_resize_bilinear_2d, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_resize_bilinear_2d, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_resize_bilinear_2d, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_static_resize_bilinear_2d, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->params.static_resize.new_height = new_height;
node->params.static_resize.new_width = new_width;
node->type = xnn_node_type_static_resize_bilinear_2d;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_resize_bilinear_operator;
node->reshape = reshape_resize_bilinear_operator;
node->setup = setup_resize_bilinear_operator;
return xnn_status_success;
}
| 12,413
| 33.198347
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-slice.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_slice_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_slice_nd_x16(/*flags=*/0, &opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_slice_nd_x32(/*flags=*/0, &opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_slice_nd_x8(/*flags=*/0, &opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
memcpy(opdata->offsets, node->params.slice.offsets, sizeof(opdata->offsets));
memcpy(opdata->sizes, node->params.slice.sizes, sizeof(opdata->sizes));
opdata->shape1 = values[input_id].shape;
}
return status;
}
static enum xnn_status reshape_slice_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const size_t num_dims = opdata->shape1.num_dims;
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_slice_nd_x8:
return xnn_reshape_slice_nd_x8(
opdata->operator_objects[0], num_dims,
opdata->shape1.dim, opdata->offsets, opdata->sizes,
threadpool);
break;
case xnn_operator_type_slice_nd_x16:
return xnn_reshape_slice_nd_x16(
opdata->operator_objects[0], num_dims,
opdata->shape1.dim, opdata->offsets, opdata->sizes,
threadpool);
break;
case xnn_operator_type_slice_nd_x32:
return xnn_reshape_slice_nd_x32(
opdata->operator_objects[0], num_dims,
opdata->shape1.dim, opdata->offsets, opdata->sizes,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_slice_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_slice_nd_x8:
return xnn_setup_slice_nd_x8(
opdata->operator_objects[0],
input_data, output_data);
break;
case xnn_operator_type_slice_nd_x16:
return xnn_setup_slice_nd_x16(
opdata->operator_objects[0],
input_data, output_data);
break;
case xnn_operator_type_slice_nd_x32:
return xnn_setup_slice_nd_x32(
opdata->operator_objects[0],
input_data, output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_static_slice(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* offsets,
const size_t* sizes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_slice);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_input_node_id(xnn_node_type_static_slice, input_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_static_slice, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
if (num_dims == 0) {
xnn_log_error(
"failed to create %s operator with %zu dimensions: number of dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_static_slice), num_dims);
return xnn_status_invalid_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to create %s operator with %zu dimensions: number of dimensions must not exceed %d",
xnn_node_type_to_string(xnn_node_type_static_slice), num_dims, XNN_MAX_TENSOR_DIMS);
return xnn_status_invalid_parameter;
}
if (num_dims != input_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32
": number of dimensions %zu must match number of dimensions of input value %zu",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id, num_dims, input_value->shape.num_dims);
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_slice, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_slice, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
if (input_value->shape.num_dims != output_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": number of dimensions of input, %zu, does not match number of dimensions of output %zu",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id, output_id, input_value->shape.num_dims,
output_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < num_dims; i++) {
if (offsets[i] >= input_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": offset %zu exceeds input size %zu in dimension %zu",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id, output_id, offsets[i], input_value->shape.dim[i], i);
return xnn_status_invalid_parameter;
}
if (sizes[i] != output_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": size %zu does not match output size %zu in dimension %zu",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id, output_id, output_value->shape.dim[i], sizes[i], i);
return xnn_status_invalid_parameter;
}
if (offsets[i] + sizes[i] > input_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": size of dimension slice %zu, %zu + %zu = %zu, exceeds input dimension size %zu",
xnn_node_type_to_string(xnn_node_type_static_slice), input_id, output_id, i, offsets[i], sizes[i],
offsets[i] + sizes[i], output_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_slice), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status =
xnn_subgraph_check_datatype_matches(xnn_node_type_static_slice, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_quantization_parameter_matches(
xnn_node_type_static_slice, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_static_slice;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->params.slice.num_dims = num_dims;
memcpy(node->params.slice.offsets, offsets, num_dims * sizeof(size_t));
memcpy(node->params.slice.sizes, sizes, num_dims * sizeof(size_t));
node->create = create_slice_operator;
node->reshape = reshape_slice_operator;
node->setup = setup_slice_operator;
return xnn_status_success;
}
| 9,955
| 33.811189
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/static-transpose.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_transpose_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp32:
status = xnn_create_transpose_nd_x32(node->flags, &opdata->operator_objects[0]);
break;
case xnn_compute_type_fp16:
status = xnn_create_transpose_nd_x16(node->flags, &opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
case xnn_compute_type_qu8:
status = xnn_create_transpose_nd_x8(node->flags, &opdata->operator_objects[0]);
break;
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = node->params.transpose.num_dims;
opdata->shape2.num_dims = node->params.transpose.num_dims;
memcpy(opdata->shape1.dim, values[input_id].shape.dim, opdata->shape1.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, node->params.transpose.perm, opdata->shape2.num_dims * sizeof(size_t));
}
return status;
}
static enum xnn_status reshape_transpose_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
enum xnn_status status;
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_transpose_nd_x16: {
status = xnn_reshape_transpose_nd_x16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.dim,
threadpool);
break;
}
case xnn_operator_type_transpose_nd_x32: {
status = xnn_reshape_transpose_nd_x32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.dim,
threadpool);
break;
}
case xnn_operator_type_transpose_nd_x8: {
status = xnn_reshape_transpose_nd_x8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.dim,
threadpool);
break;
}
default:
XNN_UNREACHABLE;
}
return status;
}
static enum xnn_status setup_transpose_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
enum xnn_status status;
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_transpose_nd_x16: {
status = xnn_setup_transpose_nd_x16(
opdata->operator_objects[0],
input_data,
output_data);
break;
}
case xnn_operator_type_transpose_nd_x32: {
status = xnn_setup_transpose_nd_x32(
opdata->operator_objects[0],
input_data,
output_data);
break;
}
case xnn_operator_type_transpose_nd_x8: {
status = xnn_setup_transpose_nd_x8(
opdata->operator_objects[0],
input_data,
output_data);
break;
}
default:
XNN_UNREACHABLE;
}
return status;
}
enum xnn_status xnn_define_static_transpose(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* perm,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_static_transpose)) != xnn_status_success) {
return status;
}
if (num_dims == 0) {
xnn_log_error(
"failed to create %s operator with %zu num_dims: num_dims must be non-zero",
xnn_node_type_to_string(xnn_node_type_static_transpose), num_dims);
return xnn_status_invalid_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to create %s operator with %zu num_dims: num_dims must be <= %d",
xnn_node_type_to_string(xnn_node_type_static_transpose), num_dims, XNN_MAX_TENSOR_DIMS);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < num_dims; ++i) {
if (perm[i] >= num_dims) {
xnn_log_error(
"failed to create %s operator with %zu perm and %zu num_dims: 0 <= perm < num_dims",
xnn_node_type_to_string(xnn_node_type_static_transpose), perm[i], num_dims);
return xnn_status_invalid_parameter;
}
}
for (size_t i = 0; i < num_dims - 1; ++i) {
for (size_t j = i + 1; j < num_dims; ++j) {
if (perm[i] == perm[j]) {
xnn_log_error(
"failed to create %s operator with duplicate entries in perm",
xnn_node_type_to_string(xnn_node_type_static_transpose));
return xnn_status_invalid_parameter;
}
}
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_static_transpose, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_static_transpose, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_static_transpose, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_static_transpose, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_transpose), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_static_transpose), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(
xnn_node_type_static_transpose, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->compute_type = compute_type;
node->inputs[0] = input_id;
node->flags = flags;
node->num_inputs = 1;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->type = xnn_node_type_static_transpose;
node->params.transpose.num_dims = num_dims;
node->create = create_transpose_operator;
node->reshape = reshape_transpose_operator;
node->setup = setup_transpose_operator;
memcpy(node->params.transpose.perm, perm, num_dims * sizeof(size_t));
return xnn_status_success;
}
| 8,623
| 29.58156
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/subtract.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/requantization.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_subtract_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 2);
const uint32_t input1_id = node->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = node->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_subtract_nd_f16(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_subtract_nd_f32(
node->activation.output_min,
node->activation.output_max,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_point);
const int8_t output_max = xnn_qs8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_subtract_nd_qs8(
(int8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(int8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(int8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
const float output_scale = values[output_id].quantization.scale;
const int32_t output_zero_point = values[output_id].quantization.zero_point;
const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero_point);
const uint8_t output_max = xnn_qu8_quantize(node->activation.output_max, output_scale, output_zero_point);
status = xnn_create_subtract_nd_qu8(
(uint8_t) values[input1_id].quantization.zero_point,
values[input1_id].quantization.scale,
(uint8_t) values[input2_id].quantization.zero_point,
values[input2_id].quantization.scale,
(uint8_t) output_zero_point,
output_scale, output_min, output_max, node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->shape1.num_dims = values[input1_id].shape.num_dims;
opdata->shape2.num_dims = values[input2_id].shape.num_dims;
if (values[output_id].layout == xnn_layout_type_nchw) {
assert(values[input1_id].layout == xnn_layout_type_nchw);
assert(values[input2_id].layout == xnn_layout_type_nchw);
opdata->shape1.dim[0] = values[input1_id].shape.dim[0];
opdata->shape1.dim[1] = values[input1_id].shape.dim[values[input1_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape1.dim[2], &values[input1_id].shape.dim[1], (values[input1_id].shape.num_dims - 2) * sizeof(size_t));
}
opdata->shape2.dim[0] = values[input2_id].shape.dim[0];
opdata->shape2.dim[1] = values[input2_id].shape.dim[values[input2_id].shape.num_dims - 1];
if (values[input1_id].shape.num_dims > 2) {
memcpy(&opdata->shape2.dim[2], &values[input2_id].shape.dim[1], (values[input2_id].shape.num_dims - 2) * sizeof(size_t));
}
} else {
assert(values[output_id].layout == xnn_layout_type_nhwc);
assert(values[input1_id].layout == xnn_layout_type_nhwc);
assert(values[input2_id].layout == xnn_layout_type_nhwc);
memcpy(opdata->shape1.dim, values[input1_id].shape.dim, values[input1_id].shape.num_dims * sizeof(size_t));
memcpy(opdata->shape2.dim, values[input2_id].shape.dim, values[input2_id].shape.num_dims * sizeof(size_t));
}
opdata->outputs[0] = output_id;
}
return status;
}
static enum xnn_status reshape_subtract_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_subtract_nd_f16:
return xnn_reshape_subtract_nd_f16(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_subtract_nd_f32:
return xnn_reshape_subtract_nd_f32(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_subtract_nd_qs8:
return xnn_reshape_subtract_nd_qs8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
case xnn_operator_type_subtract_nd_qu8:
return xnn_reshape_subtract_nd_qu8(
opdata->operator_objects[0],
opdata->shape1.num_dims,
opdata->shape1.dim,
opdata->shape2.num_dims,
opdata->shape2.dim,
threadpool);
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_subtract_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input1_id = opdata->inputs[0];
assert(input1_id != XNN_INVALID_VALUE_ID);
assert(input1_id < num_values);
const uint32_t input2_id = opdata->inputs[1];
assert(input2_id != XNN_INVALID_VALUE_ID);
assert(input2_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input1_value = values + input1_id;
const void* input1_data = input1_value->data;
assert(input1_data != NULL);
const struct xnn_value* input2_value = values + input2_id;
const void* input2_data = input2_value->data;
assert(input2_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_subtract_nd_f16:
return xnn_setup_subtract_nd_f16(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_subtract_nd_f32:
return xnn_setup_subtract_nd_f32(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_subtract_nd_qs8:
return xnn_setup_subtract_nd_qs8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
case xnn_operator_type_subtract_nd_qu8:
return xnn_setup_subtract_nd_qu8(
opdata->operator_objects[0],
input1_data, input2_data, output_data);
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_subtract(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_subtract)) != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_output_min_max(xnn_node_type_subtract, output_min, output_max);
if (status != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_subtract, input1_id, subgraph->num_values, 2)) != xnn_status_success) {
return status;
}
const struct xnn_value* input1_value = &subgraph->values[input1_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_subtract, input1_id, input1_value, 1);
if (status != xnn_status_success) {
return status;
}
switch (input1_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the first input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_subtract), input1_id,
xnn_datatype_to_string(input1_value->datatype), input1_value->datatype);
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_nth_input_node_id(
xnn_node_type_subtract, input2_id, subgraph->num_values, 1)) != xnn_status_success) {
return status;
}
const struct xnn_value* input2_value = &subgraph->values[input2_id];
status = xnn_subgraph_check_nth_input_type_dense(xnn_node_type_subtract, input2_id, input2_value, 2);
if (status != xnn_status_success) {
return status;
}
switch (input2_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with the second input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_subtract), input2_id,
xnn_datatype_to_string(input2_value->datatype), input2_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_subtract, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_subtract, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_subtract), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches_two_inputs(
xnn_node_type_subtract, input1_id, input1_value, input2_id, input2_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_subtract;
node->compute_type = compute_type;
node->activation.output_min = output_min;
node->activation.output_max = output_max;
node->num_inputs = 2;
node->inputs[0] = input1_id;
node->inputs[1] = input2_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_subtract_operator;
node->reshape = reshape_subtract_operator;
node->setup = setup_subtract_operator;
return xnn_status_success;
}
| 12,193
| 34.654971
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/tanh.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_tanh_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->num_inputs == 1);
const uint32_t input_id = node->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
assert(node->num_outputs == 1);
const uint32_t output_id = node->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const size_t num_input_dims = values[input_id].shape.num_dims;
const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
enum xnn_status status;
switch (node->compute_type) {
case xnn_compute_type_fp16:
status = xnn_create_tanh_nc_f16(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_fp32:
status = xnn_create_tanh_nc_f32(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
break;
case xnn_compute_type_qs8:
{
status = xnn_create_tanh_nc_qs8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(int8_t) values[input_id].quantization.zero_point,
values[input_id].quantization.scale,
(int8_t) values[output_id].quantization.zero_point,
values[output_id].quantization.scale,
INT8_MIN, INT8_MAX,
node->flags,
&opdata->operator_objects[0]);
break;
}
case xnn_compute_type_qu8:
{
status = xnn_create_tanh_nc_qu8(
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
(uint8_t) values[input_id].quantization.zero_point,
values[input_id].quantization.scale,
(uint8_t) values[output_id].quantization.zero_point,
values[output_id].quantization.scale,
0, UINT8_MAX,
node->flags,
&opdata->operator_objects[0]);
break;
}
default:
XNN_UNREACHABLE;
}
if (status == xnn_status_success) {
opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
}
return status;
}
static enum xnn_status reshape_tanh_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_tanh_nc_f16:
return xnn_reshape_tanh_nc_f16(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_tanh_nc_f32:
return xnn_reshape_tanh_nc_f32(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_tanh_nc_qs8:
return xnn_reshape_tanh_nc_qs8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
case xnn_operator_type_tanh_nc_qu8:
return xnn_reshape_tanh_nc_qu8(
opdata->operator_objects[0],
opdata->batch_size,
threadpool);
break;
default:
XNN_UNREACHABLE;
}
}
static enum xnn_status setup_tanh_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_id = opdata->inputs[0];
assert(input_id != XNN_INVALID_VALUE_ID);
assert(input_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value = values + input_id;
const void* input_data = input_value->data;
assert(input_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
switch (opdata->operator_objects[0]->type) {
case xnn_operator_type_tanh_nc_f16:
return xnn_setup_tanh_nc_f16(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_tanh_nc_f32:
return xnn_setup_tanh_nc_f32(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_tanh_nc_qs8:
return xnn_setup_tanh_nc_qs8(
opdata->operator_objects[0],
input_data,
output_data);
case xnn_operator_type_tanh_nc_qu8:
return xnn_setup_tanh_nc_qu8(
opdata->operator_objects[0],
input_data,
output_data);
break;
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_define_tanh(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_tanh)) != xnn_status_success) {
return status;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_tanh, input_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value = &subgraph->values[input_id];
status = xnn_subgraph_check_input_type_dense(xnn_node_type_tanh, input_id, input_value);
if (status != xnn_status_success) {
return status;
}
switch (input_value->datatype) {
case xnn_datatype_fp32:
case xnn_datatype_qint8:
case xnn_datatype_quint8:
break;
default:
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_tanh), input_id,
xnn_datatype_to_string(input_value->datatype), input_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_tanh, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_tanh, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
status = xnn_subgraph_check_all_dims_match(xnn_node_type_tanh, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
enum xnn_compute_type compute_type = xnn_compute_type_invalid;
switch (output_value->datatype) {
case xnn_datatype_fp32:
compute_type = xnn_compute_type_fp32;
break;
case xnn_datatype_qint8:
compute_type = xnn_compute_type_qs8;
break;
case xnn_datatype_quint8:
compute_type = xnn_compute_type_qu8;
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_tanh), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_datatype_matches(xnn_node_type_subtract, input_id, input_value, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_tanh;
node->compute_type = compute_type;
node->num_inputs = 1;
node->inputs[0] = input_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_tanh_operator;
node->reshape = reshape_tanh_operator;
node->setup = setup_tanh_operator;
return xnn_status_success;
}
| 8,250
| 30.135849
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/unpooling-2d.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
static enum xnn_status create_unpooling_operator(
const struct xnn_node* node,
const struct xnn_value* values,
size_t num_values,
struct xnn_operator_data* opdata,
struct xnn_code_cache* code_cache,
struct xnn_weights_cache* weights_cache)
{
assert(node->compute_type == xnn_compute_type_fp32);
assert(node->num_inputs == 2);
const uint32_t input_value_id = node->inputs[0];
assert(input_value_id != XNN_INVALID_VALUE_ID);
assert(input_value_id < num_values);
assert(node->num_outputs == 1);
const size_t channel_dim = values[input_value_id].shape.dim[3];
assert(channel_dim == values[node->inputs[1]].shape.dim[3]);
assert(channel_dim == values[node->outputs[0]].shape.dim[3]);
const enum xnn_status status = xnn_create_unpooling2d_nhwc_x32(
node->params.pooling_2d.padding_top,
node->params.pooling_2d.padding_right,
node->params.pooling_2d.padding_bottom,
node->params.pooling_2d.padding_left,
node->params.pooling_2d.pooling_height,
node->params.pooling_2d.pooling_width,
channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
node->flags,
&opdata->operator_objects[0]);
if (status == xnn_status_success) {
opdata->batch_size = values[input_value_id].shape.dim[0];
opdata->input_height = values[input_value_id].shape.dim[1];
opdata->input_width = values[input_value_id].shape.dim[2];
}
return status;
}
static enum xnn_status reshape_unpooling_operator(
struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
return xnn_reshape_unpooling2d_nhwc_x32(
opdata->operator_objects[0],
opdata->batch_size,
opdata->input_height,
opdata->input_width,
/*output_height_out=*/NULL,
/*output_width_out=*/NULL,
threadpool);
}
static enum xnn_status setup_unpooling_operator(
const struct xnn_operator_data* opdata,
const struct xnn_value* values,
size_t num_values,
pthreadpool_t threadpool)
{
const uint32_t input_value_id = opdata->inputs[0];
assert(input_value_id != XNN_INVALID_VALUE_ID);
assert(input_value_id < num_values);
const uint32_t input_index_id = opdata->inputs[1];
assert(input_index_id != XNN_INVALID_VALUE_ID);
assert(input_index_id < num_values);
const uint32_t output_id = opdata->outputs[0];
assert(output_id != XNN_INVALID_VALUE_ID);
assert(output_id < num_values);
const struct xnn_value* input_value_value = values + input_value_id;
const void* input_value_data = input_value_value->data;
assert(input_value_data != NULL);
const struct xnn_value* input_index_value = values + input_index_id;
const void* input_index_data = input_index_value->data;
assert(input_index_data != NULL);
const struct xnn_value* output_value = values + output_id;
void* output_data = output_value->data;
assert(output_data != NULL);
return xnn_setup_unpooling2d_nhwc_x32(
opdata->operator_objects[0],
input_value_data,
input_index_data,
output_data);
}
enum xnn_status xnn_define_unpooling_2d(
xnn_subgraph_t subgraph,
uint32_t padding_top,
uint32_t padding_right,
uint32_t padding_bottom,
uint32_t padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t input_value_id,
uint32_t input_index_id,
uint32_t output_id,
uint32_t flags)
{
enum xnn_status status;
if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_unpooling_2d)) != xnn_status_success) {
return status;
}
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
xnn_log_error(
"failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
"pooling size dimensions must be non-zero",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), pooling_width, pooling_height);
return xnn_status_invalid_parameter;
}
if (pooling_size == 1) {
xnn_log_error(
"failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
xnn_node_type_to_string(xnn_node_type_unpooling_2d));
return xnn_status_invalid_parameter;
}
if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_unpooling_2d, input_value_id, subgraph->num_values)) !=
xnn_status_success) {
return status;
}
const struct xnn_value* input_value_value = &subgraph->values[input_value_id];
if (input_value_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with input value ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_value_id, input_value_value->type);
return xnn_status_invalid_parameter;
}
switch (input_value_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with input value ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_value_id,
xnn_datatype_to_string(input_value_value->datatype), input_value_value->datatype);
return xnn_status_invalid_parameter;
}
if (input_index_id >= subgraph->num_values) {
xnn_log_error(
"failed to define %s operator with input index ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_index_id);
return xnn_status_invalid_parameter;
}
const struct xnn_value* input_index_value = &subgraph->values[input_index_id];
if (input_index_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with input index ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_index_id, input_index_value->type);
return xnn_status_invalid_parameter;
}
status = xnn_subgraph_check_output_node_id(xnn_node_type_unpooling_2d, output_id, subgraph->num_values);
if (status != xnn_status_success) {
return status;
}
const struct xnn_value* output_value = &subgraph->values[output_id];
status = xnn_subgraph_check_output_type_dense(xnn_node_type_unpooling_2d, output_id, output_value);
if (status != xnn_status_success) {
return status;
}
switch (output_value->datatype) {
case xnn_datatype_fp32:
break;
default:
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
xnn_node_type_to_string(xnn_node_type_unpooling_2d), output_id,
xnn_datatype_to_string(output_value->datatype), output_value->datatype);
return xnn_status_invalid_parameter;
}
struct xnn_node* node = xnn_subgraph_new_node(subgraph);
if (node == NULL) {
return xnn_status_out_of_memory;
}
node->type = xnn_node_type_unpooling_2d;
node->compute_type = xnn_compute_type_fp32;
node->params.pooling_2d.padding_top = padding_top;
node->params.pooling_2d.padding_right = padding_right;
node->params.pooling_2d.padding_bottom = padding_bottom;
node->params.pooling_2d.padding_left = padding_left;
node->params.pooling_2d.pooling_height = pooling_height;
node->params.pooling_2d.pooling_width = pooling_width;
node->num_inputs = 2;
node->inputs[0] = input_value_id;
node->inputs[1] = input_index_id;
node->num_outputs = 1;
node->outputs[0] = output_id;
node->flags = flags;
node->create = create_unpooling_operator;
node->reshape = reshape_unpooling_operator;
node->setup = setup_unpooling_operator;
return xnn_status_success;
}
| 7,996
| 33.619048
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/subgraph/validation.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack.h>
#include <xnnpack/log.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
#include <xnnpack/subgraph-validation.h>
enum xnn_status xnn_subgraph_check_xnnpack_initialized(enum xnn_node_type node_type)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to define %s operator: XNNPACK is not initialized", xnn_node_type_to_string(node_type));
return xnn_status_uninitialized;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_input_node_id(enum xnn_node_type node_type, uint32_t input_id, size_t num_values)
{
if (input_id >= num_values) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(node_type), input_id);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_nth_input_node_id(
enum xnn_node_type node_type,
uint32_t input_id,
size_t num_values,
size_t nth)
{
if (input_id >= num_values) {
xnn_log_error(
"failed to define %s operator with the input %zu ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(node_type), nth, input_id);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_input_type_dense(
enum xnn_node_type node_type,
uint32_t input_id,
const struct xnn_value* input_value)
{
if (input_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(node_type), input_id, input_value->type);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_nth_input_type_dense(
enum xnn_node_type node_type,
uint32_t input_id,
const struct xnn_value* input_value,
size_t nth)
{
if (input_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with %zu input ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(node_type), nth, input_id, input_value->type);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_output_node_id(enum xnn_node_type node_type, uint32_t output_id, size_t num_values)
{
if (output_id >= num_values) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
xnn_node_type_to_string(node_type), output_id);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_output_type_dense(
enum xnn_node_type node_type,
uint32_t output_id,
const struct xnn_value* output_value)
{
if (output_value->type != xnn_value_type_dense_tensor) {
xnn_log_error(
"failed to define %s operator with output ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
xnn_node_type_to_string(node_type), output_id, output_value->type);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_datatype_matches(
enum xnn_node_type node_type,
uint32_t input_id,
const struct xnn_value* input_value,
uint32_t output_id,
const struct xnn_value* output_value)
{
assert(input_value->datatype != xnn_datatype_invalid);
assert(output_value->datatype != xnn_datatype_invalid);
if (input_value->datatype != output_value->datatype) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching datatypes across the input (%s) and output (%s)",
xnn_node_type_to_string(node_type), input_id, output_id,
xnn_datatype_to_string(input_value->datatype),
xnn_datatype_to_string(output_value->datatype));
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_datatype_matches_two_inputs(
enum xnn_node_type node_type,
uint32_t input1_id,
const struct xnn_value* input1_value,
uint32_t input2_id,
const struct xnn_value* input2_value,
uint32_t output_id,
const struct xnn_value* output_value)
{
assert(input1_value->datatype != xnn_datatype_invalid);
assert(input2_value->datatype != xnn_datatype_invalid);
assert(output_value->datatype != xnn_datatype_invalid);
if (input1_value->datatype != input2_value->datatype ||
input1_value->datatype != output_value->datatype)
{
xnn_log_error(
"failed to define %s operator with input IDs #%" PRIu32 " and #%" PRIu32 " and output ID #%" PRIu32
": mismatching datatypes across the first input (%s), the second input (%s), and output (%s)",
xnn_node_type_to_string(node_type), input1_id, input2_id, output_id,
xnn_datatype_to_string(input1_value->datatype),
xnn_datatype_to_string(input2_value->datatype),
xnn_datatype_to_string(output_value->datatype));
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_output_min_max(enum xnn_node_type node_type, float output_min, float output_max)
{
if (isnan(output_min)) {
xnn_log_error(
"failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
xnn_node_type_to_string(node_type));
return xnn_status_invalid_parameter;
}
if (isnan(output_max)) {
xnn_log_error(
"failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
xnn_node_type_to_string(node_type));
return xnn_status_invalid_parameter;
}
if (output_min >= output_max) {
xnn_log_error(
"failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
xnn_node_type_to_string(node_type), output_min, output_max);
return xnn_status_invalid_parameter;
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_quantization_parameter_matches(
enum xnn_node_type node_type,
uint32_t input_id,
const struct xnn_value* input_value,
uint32_t output_id,
const struct xnn_value* output_value)
{
if (output_value->datatype == xnn_datatype_qint8 || output_value->datatype == xnn_datatype_quint8) {
if (input_value->quantization.zero_point != output_value->quantization.zero_point) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching zero point quantization parameter across input (%"PRId32") and output (%"PRId32")",
xnn_node_type_to_string(node_type), input_id, output_id,
input_value->quantization.zero_point, output_value->quantization.zero_point);
return xnn_status_invalid_parameter;
}
if (input_value->quantization.scale != output_value->quantization.scale) {
xnn_log_error(
"failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatching scale quantization parameter across input (%.7g) and output (%.7g)",
xnn_node_type_to_string(node_type), input_id, output_id,
input_value->quantization.scale, output_value->quantization.scale);
return xnn_status_invalid_parameter;
}
}
return xnn_status_success;
}
enum xnn_status xnn_subgraph_check_all_dims_match(
enum xnn_node_type node_type,
uint32_t tensor1_id,
const struct xnn_value* tensor1_value,
uint32_t tensor2_id,
const struct xnn_value* tensor2_value)
{
const size_t expected_num_dims = tensor1_value->shape.num_dims;
if (expected_num_dims != tensor2_value->shape.num_dims) {
xnn_log_error(
"failed to define %s operator input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatch number of dimensions across input (%zu) and output (%zu)",
xnn_node_type_to_string(node_type), tensor1_id, tensor2_id, expected_num_dims, tensor2_value->shape.num_dims);
return xnn_status_invalid_parameter;
}
for (size_t i = 0; i < expected_num_dims; i++) {
if (tensor1_value->shape.dim[i] != tensor2_value->shape.dim[i]) {
xnn_log_error(
"failed to define %s operator input ID #%" PRIu32 " and output ID #%" PRIu32
": mismatch size of dimension %zu across input (%zu) and output (%zu)",
xnn_node_type_to_string(node_type), tensor1_id, tensor2_id, i, tensor1_value->shape.dim[i],
tensor2_value->shape.dim[i]);
return xnn_status_invalid_parameter;
}
}
return xnn_status_success;
}
| 8,780
| 36.365957
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/tables/exp2-k-over-64.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <xnnpack/common.h>
// Table of exp2(k / 64) values, k = 0..63
XNN_INTERNAL const float xnn_table_exp2_k_over_64[64] = {
0x1.000000p+0f, 0x1.02C9A4p+0f, 0x1.059B0Ep+0f, 0x1.087452p+0f,
0x1.0B5586p+0f, 0x1.0E3EC4p+0f, 0x1.11301Ep+0f, 0x1.1429AAp+0f,
0x1.172B84p+0f, 0x1.1A35BEp+0f, 0x1.1D4874p+0f, 0x1.2063B8p+0f,
0x1.2387A6p+0f, 0x1.26B456p+0f, 0x1.29E9E0p+0f, 0x1.2D285Ap+0f,
0x1.306FE0p+0f, 0x1.33C08Cp+0f, 0x1.371A74p+0f, 0x1.3A7DB4p+0f,
0x1.3DEA64p+0f, 0x1.4160A2p+0f, 0x1.44E086p+0f, 0x1.486A2Cp+0f,
0x1.4BFDAEp+0f, 0x1.4F9B28p+0f, 0x1.5342B6p+0f, 0x1.56F474p+0f,
0x1.5AB07Ep+0f, 0x1.5E76F2p+0f, 0x1.6247ECp+0f, 0x1.662388p+0f,
0x1.6A09E6p+0f, 0x1.6DFB24p+0f, 0x1.71F75Ep+0f, 0x1.75FEB6p+0f,
0x1.7A1148p+0f, 0x1.7E2F34p+0f, 0x1.82589Ap+0f, 0x1.868D9Ap+0f,
0x1.8ACE54p+0f, 0x1.8F1AEAp+0f, 0x1.93737Cp+0f, 0x1.97D82Ap+0f,
0x1.9C4918p+0f, 0x1.A0C668p+0f, 0x1.A5503Cp+0f, 0x1.A9E6B6p+0f,
0x1.AE89FAp+0f, 0x1.B33A2Cp+0f, 0x1.B7F770p+0f, 0x1.BCC1EAp+0f,
0x1.C199BEp+0f, 0x1.C67F12p+0f, 0x1.CB720Ep+0f, 0x1.D072D4p+0f,
0x1.D5818Ep+0f, 0x1.DA9E60p+0f, 0x1.DFC974p+0f, 0x1.E502EEp+0f,
0x1.EA4AFAp+0f, 0x1.EFA1BEp+0f, 0x1.F50766p+0f, 0x1.FA7C18p+0f,
};
| 1,355
| 47.428571
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/tables/exp2minus-k-over-16.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdint.h>
#include <xnnpack/common.h>
// Table of exp2(k / 16) values decremented (as integer) by (k << 19), k = 0..15
XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16] = {
0x3F800000, 0x3F7DAAC3, 0x3F7B95C2, 0x3F79C3D3, 0x3F7837F0, 0x3F76F532, 0x3F75FED7, 0x3F75583F,
0x3F7504F3, 0x3F7508A4, 0x3F75672A, 0x3F76248C, 0x3F7744FD, 0x3F78CCDF, 0x3F7AC0C7, 0x3F7D257D,
};
| 562
| 34.1875
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/tables/exp2minus-k-over-32.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdint.h>
#include <xnnpack/common.h>
// Table of exp2(k / 32) values decremented (as integer) by (k << 18), k = 0..31
XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_32[32] = {
0x3F800000, 0x3F7ECD87, 0x3F7DAAC3, 0x3F7C980F, 0x3F7B95C2, 0x3F7AA43A, 0x3F79C3D3, 0x3F78F4F0,
0x3F7837F0, 0x3F778D3A, 0x3F76F532, 0x3F767043, 0x3F75FED7, 0x3F75A15B, 0x3F75583F, 0x3F7523F6,
0x3F7504F3, 0x3F74FBAF, 0x3F7508A4, 0x3F752C4D, 0x3F75672A, 0x3F75B9BE, 0x3F76248C, 0x3F76A81E,
0x3F7744FD, 0x3F77FBB8, 0x3F78CCDF, 0x3F79B907, 0x3F7AC0C7, 0x3F7BE4BA, 0x3F7D257D, 0x3F7E83B3,
};
| 758
| 41.166667
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/tables/exp2minus-k-over-64.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdint.h>
#include <xnnpack/common.h>
// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64] = {
0x3F800000, 0x3F7F64D2, 0x3F7ECD87, 0x3F7E3A29, 0x3F7DAAC3, 0x3F7D1F62, 0x3F7C980F, 0x3F7C14D5,
0x3F7B95C2, 0x3F7B1ADF, 0x3F7AA43A, 0x3F7A31DC, 0x3F79C3D3, 0x3F795A2B, 0x3F78F4F0, 0x3F78942D,
0x3F7837F0, 0x3F77E046, 0x3F778D3A, 0x3F773EDA, 0x3F76F532, 0x3F76B051, 0x3F767043, 0x3F763516,
0x3F75FED7, 0x3F75CD94, 0x3F75A15B, 0x3F757A3A, 0x3F75583F, 0x3F753B79, 0x3F7523F6, 0x3F7511C4,
0x3F7504F3, 0x3F74FD92, 0x3F74FBAF, 0x3F74FF5B, 0x3F7508A4, 0x3F75179A, 0x3F752C4D, 0x3F7546CD,
0x3F75672A, 0x3F758D75, 0x3F75B9BE, 0x3F75EC15, 0x3F76248C, 0x3F766334, 0x3F76A81E, 0x3F76F35B,
0x3F7744FD, 0x3F779D16, 0x3F77FBB8, 0x3F7860F5, 0x3F78CCDF, 0x3F793F89, 0x3F79B907, 0x3F7A396A,
0x3F7AC0C7, 0x3F7B4F30, 0x3F7BE4BA, 0x3F7C8177, 0x3F7D257D, 0x3F7DD0DF, 0x3F7E83B3, 0x3F7F3E0C,
};
| 1,150
| 51.318182
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/tables/vlog.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdint.h>
#include <xnnpack/common.h>
// Log2 table of 128 fractional segments.
XNN_INTERNAL const uint16_t xnn_table_vlog[129] = {
0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163, 2329, 2490, 2646, 2797,
2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848, 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633,
4714, 4791, 4864, 4934, 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633, 5626, 5615, 5602, 5586,
5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370, 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000,
4944, 4885, 4825, 4762, 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659, 2549, 2437, 2323, 2207,
2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224, 1094, 963, 830, 695, 559, 421, 282, 142,
0
};
| 1,100
| 46.869565
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-filterbank-accumulate/gen/u32-filterbank-accumulate-neon-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-filterbank-accumulate/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/filterbank.h>
void xnn_u32_filterbank_accumulate_ukernel__neon_x1(
size_t rows,
const uint32_t* input,
const uint8_t* weight_widths,
const uint16_t* weights,
uint64_t* output) {
assert(rows != 0);
assert(input != NULL);
assert(weight_widths != NULL);
assert(weights != NULL);
assert(output != NULL);
// Compute unweight as initial weight
size_t n = (size_t) *weight_widths++;
assert(n != 0);
uint64x2_t weight_accumulator = vdupq_n_u64(0);
do {
const uint32x2_t vi = vld1_dup_u32(input); input += 1;
const uint16x4_t vw = vreinterpret_u16_u32(vld1_dup_u32((const void*) weights)); weights += 2;
const uint32x2_t vw32 = vget_low_u32(vmovl_u16(vw));
weight_accumulator = vmlal_u32(weight_accumulator, vw32, vi);
} while (--n != 0);
do {
size_t n = (size_t) *weight_widths++;
assert(n != 0);
weight_accumulator = vcombine_u64(vget_high_u64(weight_accumulator), vdup_n_u64(0));
do {
const uint32x2_t vi = vld1_dup_u32(input); input += 1;
const uint16x4_t vw = vreinterpret_u16_u32(vld1_dup_u32((const void*) weights)); weights += 2;
const uint32x2_t vw32 = vget_low_u32(vmovl_u16(vw));
weight_accumulator = vmlal_u32(weight_accumulator, vw32, vi);
} while (--n != 0);
vst1_u64(output, vget_low_u64(weight_accumulator)); output += 1;
} while (--rows != 0);
}
| 1,792
| 27.460317
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-filterbank-accumulate/gen/u32-filterbank-accumulate-neon-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-filterbank-accumulate/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/filterbank.h>
void xnn_u32_filterbank_accumulate_ukernel__neon_x2(
size_t rows,
const uint32_t* input,
const uint8_t* weight_widths,
const uint16_t* weights,
uint64_t* output) {
assert(rows != 0);
assert(input != NULL);
assert(weight_widths != NULL);
assert(weights != NULL);
assert(output != NULL);
// Compute unweight as initial weight
size_t n = (size_t) *weight_widths++;
assert(n != 0);
uint64x2_t weight_accumulator = vdupq_n_u64(0);
do {
const uint32x2_t vi = vld1_dup_u32(input); input += 1;
const uint16x4_t vw = vreinterpret_u16_u32(vld1_dup_u32((const void*) weights)); weights += 2;
const uint32x2_t vw32 = vget_low_u32(vmovl_u16(vw));
weight_accumulator = vmlal_u32(weight_accumulator, vw32, vi);
} while (--n != 0);
do {
size_t n = (size_t) *weight_widths++;
assert(n != 0);
weight_accumulator = vcombine_u64(vget_high_u64(weight_accumulator), vdup_n_u64(0));
for (; n >= 2; n -= 2) {
const uint32x2_t vi = vld1_u32(input); input += 2;
const uint16x4_t vw = vld1_u16(weights); weights += 4;
const uint32x4_t vw32 = vmovl_u16(vw);
weight_accumulator = vmlal_lane_u32(weight_accumulator, vget_low_u32(vw32), vi, 0);
weight_accumulator = vmlal_lane_u32(weight_accumulator, vget_high_u32(vw32), vi, 1);
}
if XNN_UNPREDICTABLE(n != 0) {
const uint32x2_t vi = vld1_dup_u32(input); input += 1;
const uint16x4_t vw = vreinterpret_u16_u32(vld1_dup_u32((const void*) weights)); weights += 2;
const uint32x2_t vw32 = vget_low_u32(vmovl_u16(vw));
weight_accumulator = vmlal_u32(weight_accumulator, vw32, vi);
}
vst1_u64(output, vget_low_u64(weight_accumulator)); output += 1;
} while (--rows != 0);
}
| 2,181
| 29.305556
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-filterbank-accumulate/gen/u32-filterbank-accumulate-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-filterbank-accumulate/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/filterbank.h>
#include <xnnpack/math.h>
void xnn_u32_filterbank_accumulate_ukernel__scalar_x1(
size_t rows,
const uint32_t* input,
const uint8_t* weight_widths,
const uint16_t* weights,
uint64_t* output) {
assert(rows != 0);
assert(input != NULL);
assert(weight_widths != NULL);
assert(weights != NULL);
assert(output != NULL);
uint64_t weight_accumulator = 0;
uint64_t unweight_accumulator = 0;
// compute unweight as initial weight
size_t n = (size_t) *weight_widths++;
assert(n != 0);
do {
const uint32_t vi = *input++;
const uint32_t vu = (uint32_t) weights[1]; // unweight
weights += 2;
const uint64_t vuacc = math_mulext_u32(vi, vu);
weight_accumulator += vuacc;
} while (--n != 0);
do {
size_t n = (size_t) *weight_widths++;
assert(n != 0);
do {
const uint32_t vi = *input++;
const uint32_t vw = (uint32_t) weights[0]; // weight
const uint32_t vu = (uint32_t) weights[1]; // unweight
weights += 2;
const uint64_t vwacc = math_mulext_u32(vi, vw);
const uint64_t vuacc = math_mulext_u32(vi, vu);
weight_accumulator += vwacc;
unweight_accumulator += vuacc;
} while (--n != 0);
*output++ = weight_accumulator;
weight_accumulator = unweight_accumulator;
unweight_accumulator = 0;
} while (--rows != 0);
}
| 1,737
| 23.478873
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-filterbank-subtract/u32-filterbank-subtract-scalar-x2.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/filterbank.h>
void xnn_u32_filterbank_subtract_ukernel__scalar_x2(
size_t batch_size,
const uint32_t* input,
uint32_t smoothing,
uint32_t alternate_smoothing,
uint32_t one_minus_smoothing,
uint32_t alternate_one_minus_smoothing,
uint32_t min_signal_remaining,
uint32_t smoothing_bits, /* 0 in FE */
uint32_t spectral_subtraction_bits, /* 14 in FE */
uint32_t* noise_estimate,
uint32_t* output) {
assert(batch_size != 0);
assert(batch_size % 2 == 0);
assert(input != NULL);
assert(output != NULL);
assert(noise_estimate != NULL);
batch_size >>= 1; /* 48 in FE */
do {
const uint32_t vinput0 = input[0];
const uint32_t vinput1 = input[1];
input += 2;
uint32_t vnoise_estimate0 = noise_estimate[0];
uint32_t vnoise_estimate1 = noise_estimate[1];
// Scale up signal for smoothing filter computation.
const uint32_t vsignal_scaled_up0 = vinput0 << smoothing_bits;
const uint32_t vsignal_scaled_up1 = vinput1 << smoothing_bits;
vnoise_estimate0 = (uint32_t) ((math_mulext_u32(vsignal_scaled_up0, smoothing) +
math_mulext_u32(vnoise_estimate0, one_minus_smoothing)) >> spectral_subtraction_bits);
vnoise_estimate1 = (uint32_t) ((math_mulext_u32(vsignal_scaled_up1, alternate_smoothing) +
math_mulext_u32(vnoise_estimate1, alternate_one_minus_smoothing)) >> spectral_subtraction_bits);
noise_estimate[0] = vnoise_estimate0;
noise_estimate[1] = vnoise_estimate1;
noise_estimate += 2;
const uint32_t vfloor0 = (uint32_t) (math_mulext_u32(vinput0, min_signal_remaining) >> spectral_subtraction_bits);
const uint32_t vfloor1 = (uint32_t) (math_mulext_u32(vinput1, min_signal_remaining) >> spectral_subtraction_bits);
const uint32_t vsubtracted0 = math_doz_u32(vsignal_scaled_up0, vnoise_estimate0) >> smoothing_bits;
const uint32_t vsubtracted1 = math_doz_u32(vsignal_scaled_up1, vnoise_estimate1) >> smoothing_bits;
const uint32_t vout0 = math_max_u32(vsubtracted0, vfloor0);
const uint32_t vout1 = math_max_u32(vsubtracted1, vfloor1);
output[0] = vout0;
output[1] = vout1;
output += 2;
} while (--batch_size != 0);
}
| 2,516
| 35.478261
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-vlog/gen/u32-vlog-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-vlog/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlog.h>
extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];
#define LOG_SEGMENTS_LOG2 7
#define LOG_SCALE 65536
#define LOG_SCALE_LOG2 16
#define LOG_COEFF 45426
static uint32_t xnn_u32_log32(uint32_t x, uint32_t out_scale) {
const uint32_t log2x = math_clz_nonzero_u32(x) ^ 31;
int32_t frac = x - (UINT32_C(1) << log2x);
frac <<= math_doz_u32(LOG_SCALE_LOG2, log2x);
frac >>= math_doz_u32(log2x, LOG_SCALE_LOG2);
const uint32_t base_seg = frac >> (LOG_SCALE_LOG2 - LOG_SEGMENTS_LOG2);
const uint32_t seg_unit = (UINT32_C(1) << LOG_SCALE_LOG2) >> LOG_SEGMENTS_LOG2;
const int32_t c0 = xnn_table_vlog[base_seg];
const int32_t c1 = xnn_table_vlog[base_seg + 1];
const int32_t seg_base = seg_unit * base_seg;
const int32_t rel_pos = math_asr_s32((c1 - c0) * (frac - seg_base), LOG_SCALE_LOG2);
const uint32_t fraction = frac + c0 + rel_pos;
const uint32_t log2 = (log2x << LOG_SCALE_LOG2) + fraction;
const uint32_t round = LOG_SCALE >> 1;
const uint32_t loge = (math_mulext_u32(log2, LOG_COEFF) + round) >> LOG_SCALE_LOG2;
const uint32_t loge_scaled = (out_scale * loge + round) >> LOG_SCALE_LOG2;
return loge_scaled;
}
void xnn_u32_vlog_ukernel__scalar_x1(
size_t batch,
const uint32_t* input,
uint32_t input_lshift,
uint32_t output_scale,
uint16_t* output) {
assert(batch != 0);
assert(input != NULL);
assert(input_lshift < 32);
assert(output != NULL);
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vi = *input++;
const uint32_t scaled = vi << input_lshift;
const uint32_t log_value = XNN_LIKELY(scaled != 0) ? xnn_u32_log32(scaled, output_scale) : 0;
const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
*output++ = (uint16_t) vout;
} while (--batch != 0);
}
}
| 2,178
| 29.690141
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-vlog/gen/u32-vlog-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-vlog/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlog.h>
extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];
#define LOG_SEGMENTS_LOG2 7
#define LOG_SCALE 65536
#define LOG_SCALE_LOG2 16
#define LOG_COEFF 45426
static uint32_t xnn_u32_log32(uint32_t x, uint32_t out_scale) {
const uint32_t log2x = math_clz_nonzero_u32(x) ^ 31;
int32_t frac = x - (UINT32_C(1) << log2x);
frac <<= math_doz_u32(LOG_SCALE_LOG2, log2x);
frac >>= math_doz_u32(log2x, LOG_SCALE_LOG2);
const uint32_t base_seg = frac >> (LOG_SCALE_LOG2 - LOG_SEGMENTS_LOG2);
const uint32_t seg_unit = (UINT32_C(1) << LOG_SCALE_LOG2) >> LOG_SEGMENTS_LOG2;
const int32_t c0 = xnn_table_vlog[base_seg];
const int32_t c1 = xnn_table_vlog[base_seg + 1];
const int32_t seg_base = seg_unit * base_seg;
const int32_t rel_pos = math_asr_s32((c1 - c0) * (frac - seg_base), LOG_SCALE_LOG2);
const uint32_t fraction = frac + c0 + rel_pos;
const uint32_t log2 = (log2x << LOG_SCALE_LOG2) + fraction;
const uint32_t round = LOG_SCALE >> 1;
const uint32_t loge = (math_mulext_u32(log2, LOG_COEFF) + round) >> LOG_SCALE_LOG2;
const uint32_t loge_scaled = (out_scale * loge + round) >> LOG_SCALE_LOG2;
return loge_scaled;
}
void xnn_u32_vlog_ukernel__scalar_x2(
size_t batch,
const uint32_t* input,
uint32_t input_lshift,
uint32_t output_scale,
uint16_t* output) {
assert(batch != 0);
assert(input != NULL);
assert(input_lshift < 32);
assert(output != NULL);
for (; batch >= 2; batch -= 2) {
const uint32_t vi0 = input[0];
const uint32_t vi1 = input[1];
input += 2;
const uint32_t scaled0 = vi0 << input_lshift;
const uint32_t scaled1 = vi1 << input_lshift;
const uint32_t log_value0 = XNN_LIKELY(scaled0 != 0) ? xnn_u32_log32(scaled0, output_scale) : 0;
const uint32_t vout0 = math_min_u32(log_value0, (uint32_t) INT16_MAX); // signed max value
output[0] = (uint16_t) vout0;
const uint32_t log_value1 = XNN_LIKELY(scaled1 != 0) ? xnn_u32_log32(scaled1, output_scale) : 0;
const uint32_t vout1 = math_min_u32(log_value1, (uint32_t) INT16_MAX); // signed max value
output[1] = (uint16_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vi = *input++;
const uint32_t scaled = vi << input_lshift;
const uint32_t log_value = XNN_LIKELY(scaled != 0) ? xnn_u32_log32(scaled, output_scale) : 0;
const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
*output++ = (uint16_t) vout;
} while (--batch != 0);
}
}
| 2,887
| 31.088889
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-vlog/gen/u32-vlog-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-vlog/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlog.h>
extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];
#define LOG_SEGMENTS_LOG2 7
#define LOG_SCALE 65536
#define LOG_SCALE_LOG2 16
#define LOG_COEFF 45426
static uint32_t xnn_u32_log32(uint32_t x, uint32_t out_scale) {
const uint32_t log2x = math_clz_nonzero_u32(x) ^ 31;
int32_t frac = x - (UINT32_C(1) << log2x);
frac <<= math_doz_u32(LOG_SCALE_LOG2, log2x);
frac >>= math_doz_u32(log2x, LOG_SCALE_LOG2);
const uint32_t base_seg = frac >> (LOG_SCALE_LOG2 - LOG_SEGMENTS_LOG2);
const uint32_t seg_unit = (UINT32_C(1) << LOG_SCALE_LOG2) >> LOG_SEGMENTS_LOG2;
const int32_t c0 = xnn_table_vlog[base_seg];
const int32_t c1 = xnn_table_vlog[base_seg + 1];
const int32_t seg_base = seg_unit * base_seg;
const int32_t rel_pos = math_asr_s32((c1 - c0) * (frac - seg_base), LOG_SCALE_LOG2);
const uint32_t fraction = frac + c0 + rel_pos;
const uint32_t log2 = (log2x << LOG_SCALE_LOG2) + fraction;
const uint32_t round = LOG_SCALE >> 1;
const uint32_t loge = (math_mulext_u32(log2, LOG_COEFF) + round) >> LOG_SCALE_LOG2;
const uint32_t loge_scaled = (out_scale * loge + round) >> LOG_SCALE_LOG2;
return loge_scaled;
}
void xnn_u32_vlog_ukernel__scalar_x3(
size_t batch,
const uint32_t* input,
uint32_t input_lshift,
uint32_t output_scale,
uint16_t* output) {
assert(batch != 0);
assert(input != NULL);
assert(input_lshift < 32);
assert(output != NULL);
for (; batch >= 3; batch -= 3) {
const uint32_t vi0 = input[0];
const uint32_t vi1 = input[1];
const uint32_t vi2 = input[2];
input += 3;
const uint32_t scaled0 = vi0 << input_lshift;
const uint32_t scaled1 = vi1 << input_lshift;
const uint32_t scaled2 = vi2 << input_lshift;
const uint32_t log_value0 = XNN_LIKELY(scaled0 != 0) ? xnn_u32_log32(scaled0, output_scale) : 0;
const uint32_t vout0 = math_min_u32(log_value0, (uint32_t) INT16_MAX); // signed max value
output[0] = (uint16_t) vout0;
const uint32_t log_value1 = XNN_LIKELY(scaled1 != 0) ? xnn_u32_log32(scaled1, output_scale) : 0;
const uint32_t vout1 = math_min_u32(log_value1, (uint32_t) INT16_MAX); // signed max value
output[1] = (uint16_t) vout1;
const uint32_t log_value2 = XNN_LIKELY(scaled2 != 0) ? xnn_u32_log32(scaled2, output_scale) : 0;
const uint32_t vout2 = math_min_u32(log_value2, (uint32_t) INT16_MAX); // signed max value
output[2] = (uint16_t) vout2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vi = *input++;
const uint32_t scaled = vi << input_lshift;
const uint32_t log_value = XNN_LIKELY(scaled != 0) ? xnn_u32_log32(scaled, output_scale) : 0;
const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
*output++ = (uint16_t) vout;
} while (--batch != 0);
}
}
| 3,204
| 32.385417
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/u32-vlog/gen/u32-vlog-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/u32-vlog/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/vlog.h>
extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];
#define LOG_SEGMENTS_LOG2 7
#define LOG_SCALE 65536
#define LOG_SCALE_LOG2 16
#define LOG_COEFF 45426
static uint32_t xnn_u32_log32(uint32_t x, uint32_t out_scale) {
const uint32_t log2x = math_clz_nonzero_u32(x) ^ 31;
int32_t frac = x - (UINT32_C(1) << log2x);
frac <<= math_doz_u32(LOG_SCALE_LOG2, log2x);
frac >>= math_doz_u32(log2x, LOG_SCALE_LOG2);
const uint32_t base_seg = frac >> (LOG_SCALE_LOG2 - LOG_SEGMENTS_LOG2);
const uint32_t seg_unit = (UINT32_C(1) << LOG_SCALE_LOG2) >> LOG_SEGMENTS_LOG2;
const int32_t c0 = xnn_table_vlog[base_seg];
const int32_t c1 = xnn_table_vlog[base_seg + 1];
const int32_t seg_base = seg_unit * base_seg;
const int32_t rel_pos = math_asr_s32((c1 - c0) * (frac - seg_base), LOG_SCALE_LOG2);
const uint32_t fraction = frac + c0 + rel_pos;
const uint32_t log2 = (log2x << LOG_SCALE_LOG2) + fraction;
const uint32_t round = LOG_SCALE >> 1;
const uint32_t loge = (math_mulext_u32(log2, LOG_COEFF) + round) >> LOG_SCALE_LOG2;
const uint32_t loge_scaled = (out_scale * loge + round) >> LOG_SCALE_LOG2;
return loge_scaled;
}
void xnn_u32_vlog_ukernel__scalar_x4(
size_t batch,
const uint32_t* input,
uint32_t input_lshift,
uint32_t output_scale,
uint16_t* output) {
assert(batch != 0);
assert(input != NULL);
assert(input_lshift < 32);
assert(output != NULL);
for (; batch >= 4; batch -= 4) {
const uint32_t vi0 = input[0];
const uint32_t vi1 = input[1];
const uint32_t vi2 = input[2];
const uint32_t vi3 = input[3];
input += 4;
const uint32_t scaled0 = vi0 << input_lshift;
const uint32_t scaled1 = vi1 << input_lshift;
const uint32_t scaled2 = vi2 << input_lshift;
const uint32_t scaled3 = vi3 << input_lshift;
const uint32_t log_value0 = XNN_LIKELY(scaled0 != 0) ? xnn_u32_log32(scaled0, output_scale) : 0;
const uint32_t vout0 = math_min_u32(log_value0, (uint32_t) INT16_MAX); // signed max value
output[0] = (uint16_t) vout0;
const uint32_t log_value1 = XNN_LIKELY(scaled1 != 0) ? xnn_u32_log32(scaled1, output_scale) : 0;
const uint32_t vout1 = math_min_u32(log_value1, (uint32_t) INT16_MAX); // signed max value
output[1] = (uint16_t) vout1;
const uint32_t log_value2 = XNN_LIKELY(scaled2 != 0) ? xnn_u32_log32(scaled2, output_scale) : 0;
const uint32_t vout2 = math_min_u32(log_value2, (uint32_t) INT16_MAX); // signed max value
output[2] = (uint16_t) vout2;
const uint32_t log_value3 = XNN_LIKELY(scaled3 != 0) ? xnn_u32_log32(scaled3, output_scale) : 0;
const uint32_t vout3 = math_min_u32(log_value3, (uint32_t) INT16_MAX); // signed max value
output[3] = (uint16_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vi = *input++;
const uint32_t scaled = vi << input_lshift;
const uint32_t log_value = XNN_LIKELY(scaled != 0) ? xnn_u32_log32(scaled, output_scale) : 0;
const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
*output++ = (uint16_t) vout;
} while (--batch != 0);
}
}
| 3,521
| 33.529412
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/u64-u32-vsqrtshift/u64-u32-vsqrtshift-scalar-cvtu32-sqrt-cvtu32f64-x1.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_u64_u32_vsqrtshift_ukernel__scalar_cvtu32_sqrt_cvtu32f64_x1(
size_t batch,
const uint64_t* input,
uint32_t* output,
uint32_t shift)
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
assert(shift < 32);
do {
const uint64_t vx = *input++;
uint64_t vy = vx;
const uint32_t vx_hi = (uint32_t) (vx >> 32);
const uint32_t vx_lo = (uint32_t) vx;
if XNN_LIKELY(vx != 0) {
const double vf_hi = (double) vx_hi;
const double vf_lo = (double) vx_lo;
double vf = vf_hi * 0x1.0p+32 + vf_lo;
vf = sqrt(vf);
vy = math_cvt_sat_u32_f64(vf);
#if XNN_ARCH_ARM || XNN_ARCH_X86
const uint64_t vsquared_y_less_x = math_mulext_u32((uint32_t) vy, (uint32_t) vy) - vx;
#else
const uint64_t vsquared_y_less_x = vy * vy - vx;
#endif
if XNN_UNPREDICTABLE((int64_t) (vsquared_y_less_x + vy) < 0) {
vy += 1;
} else if XNN_UNPREDICTABLE((int64_t) (vsquared_y_less_x - vy) >= 0) {
vy -= 1;
}
}
// Match TFLM is producing incorrect result for high 64-bit inputs
const uint32_t vy_lo = (uint32_t) vy;
const uint32_t vy_hi = (uint32_t) (vy >> 32);
uint32_t vout = vy_lo | -vy_hi;
// Match TFLM is producing incorrect result for high 32-bit inputs
if XNN_LIKELY(vx_hi == 0) {
if (vout == UINT32_C(0x00010000)) {
vout -= 1;
}
}
*output++ = vout >> shift;
batch -= sizeof(uint64_t);
} while (batch != 0);
}
| 1,807
| 26.393939
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-scalar-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_u8_ibilinear_ukernel__scalar_c1(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
do {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
c -= sizeof(uint8_t);
} while (c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 2,001
| 28.441176
| 79
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-scalar-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_u8_ibilinear_ukernel__scalar_c2(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
for (; c >= 2 * sizeof(uint8_t); c -= 2 * sizeof(uint8_t)) {
const int32_t vtl0 = (int32_t) i0[0];
const int32_t vtr0 = (int32_t) i1[0];
const int32_t vbl0 = (int32_t) i2[0];
const int32_t vbr0 = (int32_t) i3[0];
const int32_t vtl1 = (int32_t) i0[1];
const int32_t vtr1 = (int32_t) i1[1];
const int32_t vbl1 = (int32_t) i2[1];
const int32_t vbr1 = (int32_t) i3[1];
i0 += 2;
i1 += 2;
i2 += 2;
i3 += 2;
const int32_t vtd0 = vtr0 - vtl0;
const int32_t vbd0 = vbr0 - vbl0;
const int32_t vtd1 = vtr1 - vtl1;
const int32_t vbd1 = vbr1 - vbl1;
const int32_t vt0 = (int32_t) ((uint32_t) vtl0 << 11) + vtd0 * valphah;
const int32_t vb0 = (int32_t) ((uint32_t) vbl0 << 11) + vbd0 * valphah;
const int32_t vt1 = (int32_t) ((uint32_t) vtl1 << 11) + vtd1 * valphah;
const int32_t vb1 = (int32_t) ((uint32_t) vbl1 << 11) + vbd1 * valphah;
const int32_t vd0 = vb0 - vt0;
const int32_t vd1 = vb1 - vt1;
const int32_t vacc0 = (int32_t) ((uint32_t) vt0 << 11) + vd0 * valphav;
const int32_t vacc1 = (int32_t) ((uint32_t) vt1 << 11) + vd1 * valphav;
const int32_t vo0 = math_asr_s32(vacc0 + vrounding, 22);
const int32_t vo1 = math_asr_s32(vacc1 + vrounding, 22);
output[0] = (uint8_t) vo0;
output[1] = (uint8_t) vo1;
output += 2;
}
for (; c >= sizeof(uint8_t); c -= sizeof(uint8_t)) {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,406
| 32.07767
| 79
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-scalar-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_u8_ibilinear_ukernel__scalar_c4(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
for (; c >= 4 * sizeof(uint8_t); c -= 4 * sizeof(uint8_t)) {
const int32_t vtl0 = (int32_t) i0[0];
const int32_t vtr0 = (int32_t) i1[0];
const int32_t vbl0 = (int32_t) i2[0];
const int32_t vbr0 = (int32_t) i3[0];
const int32_t vtl1 = (int32_t) i0[1];
const int32_t vtr1 = (int32_t) i1[1];
const int32_t vbl1 = (int32_t) i2[1];
const int32_t vbr1 = (int32_t) i3[1];
const int32_t vtl2 = (int32_t) i0[2];
const int32_t vtr2 = (int32_t) i1[2];
const int32_t vbl2 = (int32_t) i2[2];
const int32_t vbr2 = (int32_t) i3[2];
const int32_t vtl3 = (int32_t) i0[3];
const int32_t vtr3 = (int32_t) i1[3];
const int32_t vbl3 = (int32_t) i2[3];
const int32_t vbr3 = (int32_t) i3[3];
i0 += 4;
i1 += 4;
i2 += 4;
i3 += 4;
const int32_t vtd0 = vtr0 - vtl0;
const int32_t vbd0 = vbr0 - vbl0;
const int32_t vtd1 = vtr1 - vtl1;
const int32_t vbd1 = vbr1 - vbl1;
const int32_t vtd2 = vtr2 - vtl2;
const int32_t vbd2 = vbr2 - vbl2;
const int32_t vtd3 = vtr3 - vtl3;
const int32_t vbd3 = vbr3 - vbl3;
const int32_t vt0 = (int32_t) ((uint32_t) vtl0 << 11) + vtd0 * valphah;
const int32_t vb0 = (int32_t) ((uint32_t) vbl0 << 11) + vbd0 * valphah;
const int32_t vt1 = (int32_t) ((uint32_t) vtl1 << 11) + vtd1 * valphah;
const int32_t vb1 = (int32_t) ((uint32_t) vbl1 << 11) + vbd1 * valphah;
const int32_t vt2 = (int32_t) ((uint32_t) vtl2 << 11) + vtd2 * valphah;
const int32_t vb2 = (int32_t) ((uint32_t) vbl2 << 11) + vbd2 * valphah;
const int32_t vt3 = (int32_t) ((uint32_t) vtl3 << 11) + vtd3 * valphah;
const int32_t vb3 = (int32_t) ((uint32_t) vbl3 << 11) + vbd3 * valphah;
const int32_t vd0 = vb0 - vt0;
const int32_t vd1 = vb1 - vt1;
const int32_t vd2 = vb2 - vt2;
const int32_t vd3 = vb3 - vt3;
const int32_t vacc0 = (int32_t) ((uint32_t) vt0 << 11) + vd0 * valphav;
const int32_t vacc1 = (int32_t) ((uint32_t) vt1 << 11) + vd1 * valphav;
const int32_t vacc2 = (int32_t) ((uint32_t) vt2 << 11) + vd2 * valphav;
const int32_t vacc3 = (int32_t) ((uint32_t) vt3 << 11) + vd3 * valphav;
const int32_t vo0 = math_asr_s32(vacc0 + vrounding, 22);
const int32_t vo1 = math_asr_s32(vacc1 + vrounding, 22);
const int32_t vo2 = math_asr_s32(vacc2 + vrounding, 22);
const int32_t vo3 = math_asr_s32(vacc3 + vrounding, 22);
output[0] = (uint8_t) vo0;
output[1] = (uint8_t) vo1;
output[2] = (uint8_t) vo2;
output[3] = (uint8_t) vo3;
output += 4;
}
for (; c >= sizeof(uint8_t); c -= sizeof(uint8_t)) {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 4,652
| 35.637795
| 79
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_u8_ibilinear_ukernel__sse2_c16(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
valphav = _mm_unpacklo_epi64(valphav, valphav);
valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vtl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
__m128i vtr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
__m128i vbl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
__m128i vbr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
__m128i vzero = _mm_setzero_si128();
vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
vtl89ABCDEF = _mm_unpacklo_epi8(vtl89ABCDEF, vzero);
vtr89ABCDEF = _mm_unpacklo_epi8(vtr89ABCDEF, vzero);
vbl89ABCDEF = _mm_unpacklo_epi8(vbl89ABCDEF, vzero);
vbr89ABCDEF = _mm_unpacklo_epi8(vbr89ABCDEF, vzero);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
__m128i vacc89AB = _mm_slli_epi32(_mm_mulhi_epu16(vd89AB, valphav), 16);
__m128i vaccCDEF = _mm_slli_epi32(_mm_mulhi_epu16(vdCDEF, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc89AB = _mm_add_epi16(_mm_mullo_epi16(vd89AB, valphav), vacc89AB);
vaccCDEF = _mm_add_epi16(_mm_mullo_epi16(vdCDEF, valphav), vaccCDEF);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
vacc89AB = _mm_srli_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
vaccCDEF = _mm_srli_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
const __m128i vo0123456789ABCDEF = _mm_packus_epi16(vacc01234567, vacc89ABCDEF);
_mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vzero = _mm_setzero_si128();
vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vzero = _mm_setzero_si128();
vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
if (c & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vo0123);
output += 2;
vo0123 >>= 16;
}
if (c & (1 * sizeof(uint8_t))) {
*output++ = (uint8_t) vo0123;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 9,990
| 45.469767
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_u8_ibilinear_ukernel__sse2_c8(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
valphav = _mm_unpacklo_epi64(valphav, valphav);
valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vzero = _mm_setzero_si128();
vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
__m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
__m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
__m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
__m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vzero = _mm_setzero_si128();
vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
__m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
if (c & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vo0123);
output += 2;
vo0123 >>= 16;
}
if (c & (1 * sizeof(uint8_t))) {
*output++ = (uint8_t) vo0123;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 6,169
| 40.409396
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_u8_ibilinear_ukernel__sse41_c16(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_srli_epi32(valpha, 16);
valphav = _mm_shuffle_epi32(valphav, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_blend_epi16(valphah, _mm_sub_epi16(_mm_set1_epi32(0x08000000), valphah), 0xAA);
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vtl89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vtr89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vbl89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
const __m128i vbr89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
__m128i vacc89AB = _mm_mullo_epi32(vd89AB, valphav);
__m128i vaccCDEF = _mm_mullo_epi32(vdCDEF, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
vacc89AB = _mm_srli_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
vaccCDEF = _mm_srli_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
const __m128i vo0123456789ABCDEF = _mm_packus_epi16(vacc01234567, vacc89ABCDEF);
_mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
if (c & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vo01234567, 0));
output += 2;
vo01234567 = _mm_srli_epi32(vo01234567, 16);
}
if (c & (1 * sizeof(uint8_t))) {
*output++ = (uint8_t) _mm_extract_epi8(vo01234567, 0);
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,517
| 45.546448
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/unaligned.h>
void xnn_u8_ibilinear_ukernel__sse41_c8(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
weights += 2;
__m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_unpacklo_epi64(valphah, valphah);
__m128i valphav = _mm_srli_epi32(valpha, 16);
valphav = _mm_shuffle_epi32(valphav, _MM_SHUFFLE(0, 0, 0, 0));
valphah = _mm_blend_epi16(valphah, _mm_sub_epi16(_mm_set1_epi32(0x08000000), valphah), 0xAA);
const __m128i vrounding = _mm_set1_epi32(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
_mm_storel_epi64((__m128i*) output, vo01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
__m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
__m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
__m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
output += 4;
vo01234567 = _mm_srli_epi64(vo01234567, 32);
}
if (c & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vo01234567, 0));
output += 2;
vo01234567 = _mm_srli_epi32(vo01234567, 16);
}
if (c & (1 * sizeof(uint8_t))) {
*output++ = (uint8_t) _mm_extract_epi8(vo01234567, 0);
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,396
| 40.198473
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-wasmsimd-dot16x2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c16(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const v128_t valphah =
wasm_i16x8_add(
wasm_v128_xor(
wasm_v128_load16_splat(weights),
wasm_i32x4_const_splat(0xFFFF0000)),
wasm_i32x4_const_splat(0x08010000));
const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
weights += 2;
const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
size_t c = channels;
for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
const v128_t vtl89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vtr89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vbl89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
const v128_t vbr89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
i0 += 16;
i1 += 16;
i2 += 16;
i3 += 16;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vdr89ABCDEF = wasm_i16x8_sub(vbr89ABCDEF, vtr89ABCDEF);
const v128_t vt89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl89ABCDEF = wasm_i16x8_sub(vbl89ABCDEF, vtl89ABCDEF);
const v128_t vtCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
v128_t vacc89AB = wasm_i32x4_mul(vd89AB, valphav);
v128_t vaccCDEF = wasm_i32x4_mul(vdCDEF, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc89AB = wasm_i32x4_add(wasm_i32x4_shl(vt89AB, 11), vacc89AB);
vaccCDEF = wasm_i32x4_add(wasm_i32x4_shl(vtCDEF, 11), vaccCDEF);
vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
vacc89AB = wasm_u32x4_shr(wasm_i16x8_add(vacc89AB, vrounding), 22);
vaccCDEF = wasm_u32x4_shr(wasm_i16x8_add(vaccCDEF, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vo0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
wasm_v128_store(output, vo0123456789ABCDEF);
output += 16;
}
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
wasm_v128_store64_lane(output, vo01234567, 0);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vo01234567, 0);
vo01234567 = wasm_u64x2_shr(vo01234567, 32);
output += 4;
}
if (c & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vo01234567, 0);
vo01234567 = wasm_u32x4_shr(vo01234567, 16);
output += 2;
}
if (c & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vo01234567, 0);
output += 1;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,355
| 45.422222
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-ibilinear/gen/u8-ibilinear-wasmsimd-dot16x2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c8(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const v128_t valphah =
wasm_i16x8_add(
wasm_v128_xor(
wasm_v128_load16_splat(weights),
wasm_i32x4_const_splat(0xFFFF0000)),
wasm_i32x4_const_splat(0x08010000));
const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
weights += 2;
const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
size_t c = channels;
for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
wasm_v128_store64_lane(output, vo01234567, 0);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
if (c & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vo01234567, 0);
vo01234567 = wasm_u64x2_shr(vo01234567, 32);
output += 4;
}
if (c & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vo01234567, 0);
vo01234567 = wasm_u32x4_shr(vo01234567, 16);
output += 2;
}
if (c & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vo01234567, 0);
output += 1;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,251
| 39.713178
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-lut32norm/u8-lut32norm-scalar.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <fxdiv.h>
#include <xnnpack/lut.h>
static inline uint32_t compute_sum(
size_t n,
const uint8_t* x,
const uint32_t* t)
{
assert(n != 0);
uint32_t vsum = 0;
do {
const size_t vx = *x++;
vsum += t[vx];
} while (--n != 0);
return vsum;
}
void xnn_u8_lut32norm_ukernel__scalar(
size_t n,
const uint8_t* x,
const uint32_t* t,
uint8_t* y)
{
assert(n != 0);
const uint32_t vsum = compute_sum(n, x, t);
assert(vsum != 0);
struct fxdiv_divisor_uint32_t vsum_divisor = fxdiv_init_uint32_t(vsum);
const uint32_t vrounding = (vsum >> 1);
do {
const size_t vx = *x++;
const uint32_t vt = t[vx];
const uint32_t vq = fxdiv_quotient_uint32_t((vt << 8) + vrounding, vsum_divisor);
const uint8_t vy = vq > 255 ? UINT8_C(255) : (uint8_t) vq;
*y++ = vy;
} while (--n != 0);
}
| 1,116
| 20.480769
| 85
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-maxpool/u8-maxpool-9p8x-minmax-neon-c16.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.min);
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const uint8x16_t vi0 = vld1q_u8(i0); i0 += 16;
const uint8x16_t vi1 = vld1q_u8(i1); i1 += 16;
const uint8x16_t vi2 = vld1q_u8(i2); i2 += 16;
const uint8x16_t vi3 = vld1q_u8(i3); i3 += 16;
const uint8x16_t vi4 = vld1q_u8(i4); i4 += 16;
const uint8x16_t vi5 = vld1q_u8(i5); i5 += 16;
const uint8x16_t vi6 = vld1q_u8(i6); i6 += 16;
const uint8x16_t vi7 = vld1q_u8(i7); i7 += 16;
const uint8x16_t vi8 = vld1q_u8(i8); i8 += 16;
const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax01678 = vmaxq_u8(vmax018, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax01678);
const uint8x16_t vout = vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout); o += 16;
}
if (c != 0) {
const uint8x16_t vi0 = vld1q_u8(i0);
const uint8x16_t vi1 = vld1q_u8(i1);
const uint8x16_t vi2 = vld1q_u8(i2);
const uint8x16_t vi3 = vld1q_u8(i3);
const uint8x16_t vi4 = vld1q_u8(i4);
const uint8x16_t vi5 = vld1q_u8(i5);
const uint8x16_t vi6 = vld1q_u8(i6);
const uint8x16_t vi7 = vld1q_u8(i7);
const uint8x16_t vi8 = vld1q_u8(i8);
const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax01678 = vmaxq_u8(vmax018, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax01678);
const uint8x16_t vout = vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
uint8x8_t vout_lo = vget_low_u8(vout);
if (c & 8) {
vst1_u8(o, vout_lo); o += 8;
vout_lo = vget_high_u8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_u8(vout_lo), 0); o += 4;
vout_lo = vext_u8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_u8(vout_lo), 0); o += 2;
vout_lo = vext_u8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u8(o, vout_lo, 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const uint8x16_t vi0 = vld1q_u8(i0); i0 += 16;
const uint8x16_t vi1 = vld1q_u8(i1); i1 += 16;
const uint8x16_t vi2 = vld1q_u8(i2); i2 += 16;
const uint8x16_t vi3 = vld1q_u8(i3); i3 += 16;
const uint8x16_t vi4 = vld1q_u8(i4); i4 += 16;
const uint8x16_t vi5 = vld1q_u8(i5); i5 += 16;
const uint8x16_t vi6 = vld1q_u8(i6); i6 += 16;
const uint8x16_t vi7 = vld1q_u8(i7); i7 += 16;
const uint8x16_t vo = vld1q_u8(o);
const uint8x16_t vmax01 = vmaxq_u8(vmaxq_u8(vi0, vi1), vo);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax0167 = vmaxq_u8(vmax01, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax0167);
const uint8x16_t vout = vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout); o += 16;
}
if (c != 0) {
const uint8x16_t vi0 = vld1q_u8(i0);
const uint8x16_t vi1 = vld1q_u8(i1);
const uint8x16_t vi2 = vld1q_u8(i2);
const uint8x16_t vi3 = vld1q_u8(i3);
const uint8x16_t vi4 = vld1q_u8(i4);
const uint8x16_t vi5 = vld1q_u8(i5);
const uint8x16_t vi6 = vld1q_u8(i6);
const uint8x16_t vi7 = vld1q_u8(i7);
const uint8x16_t vo = vld1q_u8(o);
const uint8x16_t vmax01 = vmaxq_u8(vmaxq_u8(vi0, vi1), vo);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax0167 = vmaxq_u8(vmax01, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax0167);
const uint8x16_t vout = vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
uint8x8_t vout_lo = vget_low_u8(vout);
if (c & 8) {
vst1_u8(o, vout_lo); o += 8;
vout_lo = vget_high_u8(vout);
}
if (c & 4) {
vst1_lane_u32((void*) o, vreinterpret_u32_u8(vout_lo), 0); o += 4;
vout_lo = vext_u8(vout_lo, vout_lo, 4);
}
if (c & 2) {
vst1_lane_u16((void*) o, vreinterpret_u16_u8(vout_lo), 0); o += 2;
vout_lo = vext_u8(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u8(o, vout_lo, 0); o += 1;
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_increment);
output = (uint8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,797
| 34.192
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-maxpool/u8-maxpool-9p8x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/maxpool.h>
void xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const uint32_t voutput_min = params->scalar.min;
const uint32_t voutput_max = params->scalar.max;
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
do {
const uint32_t vi0 = (uint32_t) *i0++;
const uint32_t vi1 = (uint32_t) *i1++;
const uint32_t vi2 = (uint32_t) *i2++;
const uint32_t vi3 = (uint32_t) *i3++;
const uint32_t vi4 = (uint32_t) *i4++;
const uint32_t vi5 = (uint32_t) *i5++;
const uint32_t vi6 = (uint32_t) *i6++;
const uint32_t vi7 = (uint32_t) *i7++;
const uint32_t vi8 = (uint32_t) *i8++;
const uint32_t vmax01 = math_max_u32(vi0, vi1);
const uint32_t vmax23 = math_max_u32(vi2, vi3);
const uint32_t vmax45 = math_max_u32(vi4, vi5);
const uint32_t vmax67 = math_max_u32(vi6, vi7);
const uint32_t vmax018 = math_max_u32(vmax01, vi8);
const uint8_t vmax2345 = math_max_u32(vmax23, vmax45);
const uint8_t vmax01678 = math_max_u32(vmax018, vmax67);
uint32_t vout = math_max_u32(vmax2345, vmax01678);
vout = math_max_u32(vout, voutput_min);
vout = math_min_u32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
do {
const uint32_t vi0 = (uint32_t) *i0++;
const uint32_t vi1 = (uint32_t) *i1++;
const uint32_t vi2 = (uint32_t) *i2++;
const uint32_t vi3 = (uint32_t) *i3++;
const uint32_t vi4 = (uint32_t) *i4++;
const uint32_t vi5 = (uint32_t) *i5++;
const uint32_t vi6 = (uint32_t) *i6++;
const uint32_t vi7 = (uint32_t) *i7++;
const uint32_t vi8 = (uint32_t) *o;
const uint32_t vmax01 = math_max_u32(vi0, vi1);
const uint32_t vmax23 = math_max_u32(vi2, vi3);
const uint32_t vmax45 = math_max_u32(vi4, vi5);
const uint32_t vmax67 = math_max_u32(vi6, vi7);
const uint32_t vmax018 = math_max_u32(vmax01, vi8);
const uint32_t vmax2345 = math_max_u32(vmax23, vmax45);
const uint32_t vmax01678 = math_max_u32(vmax018, vmax67);
uint32_t vout = math_max_u32(vmax2345, vmax01678);
vout = math_max_u32(vout, voutput_min);
vout = math_min_u32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_increment);
output = (uint8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,661
| 31.170455
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-maxpool/u8-maxpool-9p8x-minmax-sse2-c16.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/maxpool.h>
#include <xnnpack/unaligned.h>
void xnn_u8_maxpool_minmax_ukernel_9p8x__sse2_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.max);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.min);
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0); i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1); i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2); i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3); i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4); i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5); i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6); i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7); i7 += 16;
const __m128i vi8 = _mm_loadu_si128((const __m128i*) i8); i8 += 16;
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax01678);
vout = _mm_max_epu8(vout, voutput_min);
vout = _mm_min_epu8(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, vout); o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7);
const __m128i vi8 = _mm_loadu_si128((const __m128i*) i8);
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax01678);
vout = _mm_max_epu8(vout, voutput_min);
vout = _mm_min_epu8(vout, voutput_max);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*o = (uint8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0); i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1); i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2); i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3); i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4); i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5); i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6); i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7); i7 += 16;
const __m128i vo = _mm_loadu_si128((const __m128i*) o);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax0167);
vout = _mm_max_epu8(vout, voutput_min);
vout = _mm_min_epu8(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, vout);
o += 16;
}
if (c != 0) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*) i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*) i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*) i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*) i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*) i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*) i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*) i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*) i7);
const __m128i vo = _mm_loadu_si128((const __m128i*) o);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
__m128i vout = _mm_max_epu8(vmax2345, vmax0167);
vout = _mm_max_epu8(vout, voutput_min);
vout = _mm_min_epu8(vout, voutput_max);
if (c & 8) {
_mm_storel_epi64((__m128i*) o, vout);
vout = _mm_unpackhi_epi64(vout, vout);
o += 8;
}
if (c & 4) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_srli_epi64(vout, 32);
o += 4;
}
if (c & 2) {
unaligned_store_u16(o, (uint16_t) _mm_extract_epi16(vout, 0));
vout = _mm_srli_epi32(vout, 16);
o += 2;
}
if (c & 1) {
*o = (uint8_t) _mm_cvtsi128_si32(vout);
o += 1;
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_increment);
output = (uint8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 9,772
| 36.159696
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-maxpool/u8-maxpool-9p8x-minmax-wasmsimd-c16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/maxpool.h>
void xnn_u8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 16; c -= 16) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 16;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 16;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 16;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 16;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 16;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 16;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 16;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 16;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 16;
const v128_t vmax018 = wasm_u8x16_max(wasm_u8x16_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_u8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_u8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_u8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_u8x16_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_u8x16_max(vmax018, vmax67);
v128_t vout = wasm_u8x16_max(vmax2345, vmax01678);
vout = wasm_u8x16_min(vout, voutput_max);
vout = wasm_u8x16_max(vout, voutput_min);
wasm_v128_store(o, vout); o += 16;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vmax018 = wasm_u8x16_max(wasm_u8x16_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_u8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_u8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_u8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_u8x16_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_u8x16_max(vmax018, vmax67);
v128_t vout = wasm_u8x16_max(vmax2345, vmax01678);
vout = wasm_u8x16_min(vout, voutput_max);
vout = wasm_u8x16_max(vout, voutput_min);
if (c & 8) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 8;
}
if (c & 4) {
wasm_v128_store32_lane(o, vout, 0);
vout = wasm_u64x2_shr(vout, 32);
o += 4;
}
if (c & 2) {
wasm_v128_store16_lane(o, vout, 0);
vout = wasm_u32x4_shr(vout, 16);
o += 2;
}
if (c & 1) {
wasm_v128_store8_lane(o, vout, 0);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 16; c -= 16) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 16;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 16;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 16;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 16;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 16;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 16;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 16;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 16;
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_u8x16_max(wasm_u8x16_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_u8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_u8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_u8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_u8x16_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_u8x16_max(vmax01, vmax67);
v128_t vout = wasm_u8x16_max(vmax2345, vmax0167);
vout = wasm_u8x16_min(vout, voutput_max);
vout = wasm_u8x16_max(vout, voutput_min);
wasm_v128_store(o, vout);
o += 16;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_u8x16_max(wasm_u8x16_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_u8x16_max(vi2, vi3);
const v128_t vmax45 = wasm_u8x16_max(vi4, vi5);
const v128_t vmax67 = wasm_u8x16_max(vi6, vi7);
const v128_t vmax2345 = wasm_u8x16_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_u8x16_max(vmax01, vmax67);
v128_t vout = wasm_u8x16_max(vmax2345, vmax0167);
vout = wasm_u8x16_min(vout, voutput_max);
vout = wasm_u8x16_max(vout, voutput_min);
if (c & 8) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 8;
}
if (c & 4) {
wasm_v128_store32_lane(o, vout, 0);
vout = wasm_u64x2_shr(vout, 32);
o += 4;
}
if (c & 2) {
wasm_v128_store16_lane(o, vout, 0);
vout = wasm_u32x4_shr(vout, 16);
o += 2;
}
if (c & 1) {
wasm_v128_store8_lane(o, vout, 0);
o += 1;
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_increment);
output = (uint8_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 9,033
| 31.731884
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-rmax/u8-rmax-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/rmax.h>
void xnn_u8_rmax_ukernel__neon(
size_t batch,
const uint8_t* input,
uint8_t* output)
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
if XNN_LIKELY(batch >= 16) {
uint8x16_t vmax = vmovq_n_u8(0);
do {
const uint8x16_t vx = vld1q_u8(input); input += 16;
vmax = vmaxq_u8(vmax, vx);
batch -= 16;
} while (batch >= 16);
if (batch != 0) {
const size_t x_increment = batch - 16;
input = (const uint8_t*) ((uintptr_t) input + x_increment);
const uint8x16_t vx = vld1q_u8(input);
vmax = vmaxq_u8(vmax, vx);
}
uint8x8_t vmax8 = vmax_u8(vget_low_u8(vmax), vget_high_u8(vmax));
const uint8x8_t vmax4 = vpmax_u8(vmax8, vmax8);
const uint8x8_t vmax2 = vpmax_u8(vmax4, vmax4);
const uint8x8_t vmax1 = vpmax_u8(vmax2, vmax2);
vst1_lane_u8(output, vmax1, 0);
} else {
uint8x8_t vmax = vmov_n_u8(0);
do {
const uint8x8_t vx = vld1_dup_u8(input); input += 1;
vmax = vmax_u8(vmax, vx);
} while (--batch != 0);
vst1_lane_u8(output, vmax, 0);
}
}
| 1,437
| 26.132075
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-rmax/u8-rmax-scalar.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/rmax.h>
void xnn_u8_rmax_ukernel__scalar(
size_t batch,
const uint8_t* input,
uint8_t* output)
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint8_t vmax0 = 0;
uint8_t vmax1 = 0;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const uint8_t vt0 = input[0];
const uint8_t vt1 = input[1];
input += 2;
vmax0 = vt0 > vmax0 ? vt0 : vmax0;
vmax1 = vt1 > vmax1 ? vt1 : vmax1;
}
uint8_t vmax = vmax0 > vmax1 ? vmax0 : vmax1;
if (batch != 0) {
const uint8_t vt = *input++;
vmax = vt > vmax ? vt : vmax;
}
*output = vmax;
}
| 868
| 21.868421
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-rmax/u8-rmax-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/rmax.h>
void xnn_u8_rmax_ukernel__sse2(
size_t batch,
const uint8_t* input,
uint8_t* output)
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
if XNN_LIKELY(batch >= 16) {
__m128i vmax = _mm_setzero_si128();
do {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
vmax = _mm_max_epu8(vmax, vx);
batch -= 16;
} while (batch >= 16);
if (batch != 0) {
const size_t x_increment = batch - 16;
input = (const uint8_t*) ((uintptr_t) input + x_increment);
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
vmax = _mm_max_epu8(vmax, vx);
}
vmax = _mm_max_epu8(vmax, _mm_unpackhi_epi64(vmax, vmax));
vmax = _mm_max_epu8(vmax, _mm_srli_epi64(vmax, 32));
vmax = _mm_max_epu8(vmax, _mm_srli_epi32(vmax, 16));
vmax = _mm_max_epu8(vmax, _mm_srli_epi16(vmax, 8));
*output = (uint8_t) _mm_cvtsi128_si32(vmax);
} else {
uint8_t vmax = 0;
do {
const uint8_t vx = *input++;
vmax = vx > vmax ? vx : vmax;
} while (--batch != 0);
*output = vmax;
}
}
| 1,469
| 26.222222
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-vclamp/u8-vclamp-neon-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
void xnn_u8_vclamp_ukernel__neon_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.min);
for (; batch >= 64; batch -= 64) {
uint8x16_t vacc0 = vld1q_u8(input); input += 16;
uint8x16_t vacc1 = vld1q_u8(input); input += 16;
uint8x16_t vacc2 = vld1q_u8(input); input += 16;
uint8x16_t vacc3 = vld1q_u8(input); input += 16;
vacc0 = vmaxq_u8(vacc0, voutput_min);
vacc1 = vmaxq_u8(vacc1, voutput_min);
vacc2 = vmaxq_u8(vacc2, voutput_min);
vacc3 = vmaxq_u8(vacc3, voutput_min);
vacc0 = vminq_u8(vacc0, voutput_max);
vacc1 = vminq_u8(vacc1, voutput_max);
vacc2 = vminq_u8(vacc2, voutput_max);
vacc3 = vminq_u8(vacc3, voutput_max);
vst1q_u8(output, vacc0); output += 16;
vst1q_u8(output, vacc1); output += 16;
vst1q_u8(output, vacc2); output += 16;
vst1q_u8(output, vacc3); output += 16;
}
for (; batch >= 8; batch -= 8) {
uint8x8_t vacc = vld1_u8(input); input += 8;
vacc = vmin_u8(vacc, vget_low_u8(voutput_max));
vacc = vmax_u8(vacc, vget_low_u8(voutput_min));
vst1_u8(output, vacc); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
uint8x8_t vacc = vld1_u8(input); input += 8;
vacc = vmin_u8(vacc, vget_low_u8(voutput_max));
vacc = vmax_u8(vacc, vget_low_u8(voutput_min));
if (batch & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vacc), 0); output += 4;
vacc = vext_u8(vacc, vacc, 4);
}
if (batch & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vacc), 0); output += 2;
vacc = vext_u8(vacc, vacc, 2);
}
if (batch & 1) {
vst1_lane_u8(output, vacc, 0);
}
}
}
| 2,223
| 28.653333
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-vclamp/u8-vclamp-scalar-x4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_u8_vclamp_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t voutput_max = params->scalar.max;
const uint32_t voutput_min = params->scalar.min;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
uint32_t vt0 = (uint32_t) input[0];
uint32_t vt1 = (uint32_t) input[1];
uint32_t vt2 = (uint32_t) input[2];
uint32_t vt3 = (uint32_t) input[3];
input += 4;
vt0 = math_max_u32(vt0, voutput_min);
vt1 = math_max_u32(vt1, voutput_min);
vt2 = math_max_u32(vt2, voutput_min);
vt3 = math_max_u32(vt3, voutput_min);
vt0 = math_min_u32(vt0, voutput_max);
vt1 = math_min_u32(vt1, voutput_max);
vt2 = math_min_u32(vt2, voutput_max);
vt3 = math_min_u32(vt3, voutput_max);
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output[2] = (uint8_t) vt2;
output[3] = (uint8_t) vt3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
uint32_t vt = (uint32_t) *input++;
vt = math_max_u32(vt, voutput_min);
vt = math_min_u32(vt, voutput_max);
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 1,635
| 25.819672
| 74
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-vclamp/u8-vclamp-sse2-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vunary.h>
void xnn_u8_vclamp_ukernel__sse2_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.max);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.min);
for (; batch >= 64; batch -= 64) {
__m128i vacc0 = _mm_loadu_si128((const __m128i*) input);
__m128i vacc1 = _mm_loadu_si128((const __m128i*) input + 1);
__m128i vacc2 = _mm_loadu_si128((const __m128i*) input + 2);
__m128i vacc3 = _mm_loadu_si128((const __m128i*) input + 3);
input += 64;
vacc0 = _mm_max_epu8(vacc0, voutput_min);
vacc1 = _mm_max_epu8(vacc1, voutput_min);
vacc2 = _mm_max_epu8(vacc2, voutput_min);
vacc3 = _mm_max_epu8(vacc3, voutput_min);
vacc0 = _mm_min_epu8(vacc0, voutput_max);
vacc1 = _mm_min_epu8(vacc1, voutput_max);
vacc2 = _mm_min_epu8(vacc2, voutput_max);
vacc3 = _mm_min_epu8(vacc3, voutput_max);
_mm_storeu_si128((__m128i*) output, vacc0);
_mm_storeu_si128((__m128i*) output + 1, vacc1);
_mm_storeu_si128((__m128i*) output + 2, vacc2);
_mm_storeu_si128((__m128i*) output + 3, vacc3);
output += 64;
}
for (; batch >= 16; batch -= 16) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
input += 16;
vacc = _mm_min_epu8(vacc, voutput_max);
vacc = _mm_max_epu8(vacc, voutput_min);
_mm_storeu_si128((__m128i*) output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
__m128i vacc = _mm_loadu_si128((const __m128i*) input);
vacc = _mm_min_epu8(vacc, voutput_max);
vacc = _mm_max_epu8(vacc, voutput_min);
if (batch & 8) {
_mm_storel_epi64((__m128i*) output, vacc);
output += 8;
vacc = _mm_unpackhi_epi64(vacc, vacc);
}
if (batch & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vacc));
output += 4;
vacc = _mm_srli_epi64(vacc, 32);
}
if (batch & 2) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vacc));
output += 2;
vacc = _mm_srli_epi32(vacc, 16);
}
if (batch & 1) {
*output = (uint8_t) _mm_cvtsi128_si32(vacc);
}
}
}
| 2,639
| 29.697674
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/u8-vclamp/u8-vclamp-wasmsimd-x64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
void xnn_u8_vclamp_ukernel__wasmsimd_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
for (; batch >= 64; batch -= 64) {
v128_t vacc0 = wasm_v128_load(input);
v128_t vacc1 = wasm_v128_load(input + 16);
v128_t vacc2 = wasm_v128_load(input + 32);
v128_t vacc3 = wasm_v128_load(input + 48);
input += 64;
vacc0 = wasm_u8x16_max(vacc0, voutput_min);
vacc1 = wasm_u8x16_max(vacc1, voutput_min);
vacc2 = wasm_u8x16_max(vacc2, voutput_min);
vacc3 = wasm_u8x16_max(vacc3, voutput_min);
vacc0 = wasm_u8x16_min(vacc0, voutput_max);
vacc1 = wasm_u8x16_min(vacc1, voutput_max);
vacc2 = wasm_u8x16_min(vacc2, voutput_max);
vacc3 = wasm_u8x16_min(vacc3, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 16, vacc1);
wasm_v128_store(output + 32, vacc2);
wasm_v128_store(output + 48, vacc3);
output += 64;
}
for (; batch >= 16; batch -= 16) {
v128_t vacc = wasm_v128_load(input);
input += 16;
vacc = wasm_u8x16_min(vacc, voutput_max);
vacc = wasm_u8x16_max(vacc, voutput_min);
wasm_v128_store(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_u8x16_min(vacc, voutput_max);
vacc = wasm_u8x16_max(vacc, voutput_min);
if (batch & 8) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 8;
}
if (batch & 4) {
wasm_v128_store32_lane(output, vacc, 0);
vacc = wasm_u64x2_shr(vacc, 32);
output += 4;
}
if (batch & 2) {
wasm_v128_store16_lane(output, vacc, 0);
vacc = wasm_u32x4_shr(vacc, 16);
output += 2;
}
if (batch & 1) {
wasm_v128_store8_lane(output, vacc, 0);
}
}
}
| 2,410
| 27.364706
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x16-gemm-goi-neon-ld4lane-x4-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp0123x89ABCDEF;
do {
// NC main loop multiple of 16
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 16; n -= 16) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
uint16x8_t vb8 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
vst1q_u16(packed_weights, vb8); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
const uint16_t* w8 = w7 + kc;
const uint16_t* w9 = w8 + kc;
const uint16_t* w10 = w9 + kc;
const uint16_t* w11 = w10 + kc;
const uint16_t* w12 = w11 + kc;
const uint16_t* w13 = w12 + kc;
const uint16_t* w14 = w13 + kc;
const uint16_t* w15 = w14 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
xnn_prefetch_to_l1((const int8_t*) w8);
xnn_prefetch_to_l1((const int8_t*) w8 + 64);
xnn_prefetch_to_l1((const int8_t*) w9);
xnn_prefetch_to_l1((const int8_t*) w9 + 64);
xnn_prefetch_to_l1((const int8_t*) w10);
xnn_prefetch_to_l1((const int8_t*) w10 + 64);
xnn_prefetch_to_l1((const int8_t*) w11);
xnn_prefetch_to_l1((const int8_t*) w11 + 64);
xnn_prefetch_to_l1((const int8_t*) w12);
xnn_prefetch_to_l1((const int8_t*) w12 + 64);
xnn_prefetch_to_l1((const int8_t*) w13);
xnn_prefetch_to_l1((const int8_t*) w13 + 64);
xnn_prefetch_to_l1((const int8_t*) w14);
xnn_prefetch_to_l1((const int8_t*) w14 + 64);
xnn_prefetch_to_l1((const int8_t*) w15);
xnn_prefetch_to_l1((const int8_t*) w15 + 64);
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w8, vtmp0123x89ABCDEF, 0); w8 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w9, vtmp0123x89ABCDEF, 1); w9 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w10, vtmp0123x89ABCDEF, 2); w10 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w11, vtmp0123x89ABCDEF, 3); w11 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w12, vtmp0123x89ABCDEF, 4); w12 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w13, vtmp0123x89ABCDEF, 5); w13 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w14, vtmp0123x89ABCDEF, 6); w14 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w15, vtmp0123x89ABCDEF, 7); w15 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
xnn_prefetch_to_l1((const int8_t*) w8 + 128);
xnn_prefetch_to_l1((const int8_t*) w9 + 128);
xnn_prefetch_to_l1((const int8_t*) w10 + 128);
xnn_prefetch_to_l1((const int8_t*) w11 + 128);
xnn_prefetch_to_l1((const int8_t*) w12 + 128);
xnn_prefetch_to_l1((const int8_t*) w13 + 128);
xnn_prefetch_to_l1((const int8_t*) w14 + 128);
xnn_prefetch_to_l1((const int8_t*) w15 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 16x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
uint16x8_t vtmp0x89ABCDEF = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w8, vtmp0x89ABCDEF, 0); w8 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w9, vtmp0x89ABCDEF, 1); w9 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w10, vtmp0x89ABCDEF, 2); w10 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w11, vtmp0x89ABCDEF, 3); w11 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w12, vtmp0x89ABCDEF, 4); w12 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w13, vtmp0x89ABCDEF, 5); w13 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w14, vtmp0x89ABCDEF, 6); w14 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w15, vtmp0x89ABCDEF, 7); w15 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0x89ABCDEF); packed_weights += 8;
break;
}
// KC remainder of 16x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
uint16x8x2_t vtmp01x89ABCDEF;
vtmp01x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp01x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w8, vtmp01x89ABCDEF, 0); w8 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w9, vtmp01x89ABCDEF, 1); w9 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w10, vtmp01x89ABCDEF, 2); w10 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w11, vtmp01x89ABCDEF, 3); w11 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w12, vtmp01x89ABCDEF, 4); w12 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w13, vtmp01x89ABCDEF, 5); w13 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w14, vtmp01x89ABCDEF, 6); w14 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w15, vtmp01x89ABCDEF, 7); w15 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[1]); packed_weights += 8;
break;
}
// KC remainder of 16x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
uint16x8x3_t vtmp012x89ABCDEF;
vtmp012x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w8, vtmp012x89ABCDEF, 0); w8 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w9, vtmp012x89ABCDEF, 1); w9 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w10, vtmp012x89ABCDEF, 2); w10 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w11, vtmp012x89ABCDEF, 3); w11 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w12, vtmp012x89ABCDEF, 4); w12 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w13, vtmp012x89ABCDEF, 5); w13 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w14, vtmp012x89ABCDEF, 6); w14 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w15, vtmp012x89ABCDEF, 7); w15 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 15);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (16 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 16 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint16_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint16_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint16_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint16_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const uint16_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const uint16_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const uint16_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const uint16_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w8, vtmp0123x89ABCDEF, 0); w8 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w9, vtmp0123x89ABCDEF, 1); w9 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w10, vtmp0123x89ABCDEF, 2); w10 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w11, vtmp0123x89ABCDEF, 3); w11 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w12, vtmp0123x89ABCDEF, 4); w12 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w13, vtmp0123x89ABCDEF, 5); w13 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w14, vtmp0123x89ABCDEF, 6); w14 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
xnn_prefetch_to_l1((const int8_t*) w8 + 128);
xnn_prefetch_to_l1((const int8_t*) w9 + 128);
xnn_prefetch_to_l1((const int8_t*) w10 + 128);
xnn_prefetch_to_l1((const int8_t*) w11 + 128);
xnn_prefetch_to_l1((const int8_t*) w12 + 128);
xnn_prefetch_to_l1((const int8_t*) w13 + 128);
xnn_prefetch_to_l1((const int8_t*) w14 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 16x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
uint16x8_t vtmp0x89ABCDEF = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w8, vtmp0x89ABCDEF, 0); w8 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w9, vtmp0x89ABCDEF, 1); w9 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w10, vtmp0x89ABCDEF, 2); w10 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w11, vtmp0x89ABCDEF, 3); w11 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w12, vtmp0x89ABCDEF, 4); w12 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w13, vtmp0x89ABCDEF, 5); w13 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w14, vtmp0x89ABCDEF, 6); w14 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0x89ABCDEF); packed_weights += 8;
break;
}
// KC remainder of 16x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
uint16x8x2_t vtmp01x89ABCDEF;
vtmp01x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp01x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w8, vtmp01x89ABCDEF, 0); w8 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w9, vtmp01x89ABCDEF, 1); w9 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w10, vtmp01x89ABCDEF, 2); w10 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w11, vtmp01x89ABCDEF, 3); w11 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w12, vtmp01x89ABCDEF, 4); w12 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w13, vtmp01x89ABCDEF, 5); w13 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w14, vtmp01x89ABCDEF, 6); w14 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[1]); packed_weights += 8;
break;
}
// KC remainder of 16x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
uint16x8x3_t vtmp012x89ABCDEF;
vtmp012x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w8, vtmp012x89ABCDEF, 0); w8 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w9, vtmp012x89ABCDEF, 1); w9 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w10, vtmp012x89ABCDEF, 2); w10 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w11, vtmp012x89ABCDEF, 3); w11 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w12, vtmp012x89ABCDEF, 4); w12 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w13, vtmp012x89ABCDEF, 5); w13 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w14, vtmp012x89ABCDEF, 6); w14 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 23,585
| 46.648485
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x16-gemm-goi-neon-ld4lane-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x16__neon_ld4lane_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp0123x89ABCDEF;
do {
// NC main loop multiple of 16
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 16; n -= 16) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
uint16x8_t vb8 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
vst1q_u16(packed_weights, vb8); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
const uint16_t* w8 = w7 + kc;
const uint16_t* w9 = w8 + kc;
const uint16_t* w10 = w9 + kc;
const uint16_t* w11 = w10 + kc;
const uint16_t* w12 = w11 + kc;
const uint16_t* w13 = w12 + kc;
const uint16_t* w14 = w13 + kc;
const uint16_t* w15 = w14 + kc;
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w8, vtmp0123x89ABCDEF, 0); w8 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w9, vtmp0123x89ABCDEF, 1); w9 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w10, vtmp0123x89ABCDEF, 2); w10 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w11, vtmp0123x89ABCDEF, 3); w11 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w12, vtmp0123x89ABCDEF, 4); w12 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w13, vtmp0123x89ABCDEF, 5); w13 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w14, vtmp0123x89ABCDEF, 6); w14 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w15, vtmp0123x89ABCDEF, 7); w15 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 16x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
uint16x8_t vtmp0x89ABCDEF = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w8, vtmp0x89ABCDEF, 0); w8 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w9, vtmp0x89ABCDEF, 1); w9 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w10, vtmp0x89ABCDEF, 2); w10 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w11, vtmp0x89ABCDEF, 3); w11 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w12, vtmp0x89ABCDEF, 4); w12 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w13, vtmp0x89ABCDEF, 5); w13 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w14, vtmp0x89ABCDEF, 6); w14 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w15, vtmp0x89ABCDEF, 7); w15 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0x89ABCDEF); packed_weights += 8;
break;
}
// KC remainder of 16x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
uint16x8x2_t vtmp01x89ABCDEF;
vtmp01x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp01x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w8, vtmp01x89ABCDEF, 0); w8 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w9, vtmp01x89ABCDEF, 1); w9 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w10, vtmp01x89ABCDEF, 2); w10 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w11, vtmp01x89ABCDEF, 3); w11 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w12, vtmp01x89ABCDEF, 4); w12 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w13, vtmp01x89ABCDEF, 5); w13 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w14, vtmp01x89ABCDEF, 6); w14 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w15, vtmp01x89ABCDEF, 7); w15 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[1]); packed_weights += 8;
break;
}
// KC remainder of 16x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
uint16x8x3_t vtmp012x89ABCDEF;
vtmp012x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w8, vtmp012x89ABCDEF, 0); w8 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w9, vtmp012x89ABCDEF, 1); w9 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w10, vtmp012x89ABCDEF, 2); w10 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w11, vtmp012x89ABCDEF, 3); w11 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w12, vtmp012x89ABCDEF, 4); w12 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w13, vtmp012x89ABCDEF, 5); w13 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w14, vtmp012x89ABCDEF, 6); w14 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w15, vtmp012x89ABCDEF, 7); w15 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 15);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (16 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 16 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint16_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint16_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint16_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint16_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const uint16_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const uint16_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const uint16_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const uint16_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 4
size_t k = kc;
for (; k >= 4; k -= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w8, vtmp0123x89ABCDEF, 0); w8 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w9, vtmp0123x89ABCDEF, 1); w9 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w10, vtmp0123x89ABCDEF, 2); w10 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w11, vtmp0123x89ABCDEF, 3); w11 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w12, vtmp0123x89ABCDEF, 4); w12 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w13, vtmp0123x89ABCDEF, 5); w13 += 4;
vtmp0123x89ABCDEF = vld4q_lane_u16(w14, vtmp0123x89ABCDEF, 6); w14 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x89ABCDEF.val[3]); packed_weights += 8;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 16x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
uint16x8_t vtmp0x89ABCDEF = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w8, vtmp0x89ABCDEF, 0); w8 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w9, vtmp0x89ABCDEF, 1); w9 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w10, vtmp0x89ABCDEF, 2); w10 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w11, vtmp0x89ABCDEF, 3); w11 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w12, vtmp0x89ABCDEF, 4); w12 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w13, vtmp0x89ABCDEF, 5); w13 += 1;
vtmp0x89ABCDEF = vld1q_lane_u16(w14, vtmp0x89ABCDEF, 6); w14 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0x89ABCDEF); packed_weights += 8;
break;
}
// KC remainder of 16x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
uint16x8x2_t vtmp01x89ABCDEF;
vtmp01x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp01x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w8, vtmp01x89ABCDEF, 0); w8 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w9, vtmp01x89ABCDEF, 1); w9 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w10, vtmp01x89ABCDEF, 2); w10 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w11, vtmp01x89ABCDEF, 3); w11 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w12, vtmp01x89ABCDEF, 4); w12 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w13, vtmp01x89ABCDEF, 5); w13 += 2;
vtmp01x89ABCDEF = vld2q_lane_u16(w14, vtmp01x89ABCDEF, 6); w14 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x89ABCDEF.val[1]); packed_weights += 8;
break;
}
// KC remainder of 16x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
uint16x8x3_t vtmp012x89ABCDEF;
vtmp012x89ABCDEF.val[0] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[1] = vdupq_n_u16(0);
vtmp012x89ABCDEF.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w8, vtmp012x89ABCDEF, 0); w8 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w9, vtmp012x89ABCDEF, 1); w9 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w10, vtmp012x89ABCDEF, 2); w10 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w11, vtmp012x89ABCDEF, 3); w11 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w12, vtmp012x89ABCDEF, 4); w12 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w13, vtmp012x89ABCDEF, 5); w13 += 3;
vtmp012x89ABCDEF = vld3q_lane_u16(w14, vtmp012x89ABCDEF, 6); w14 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x89ABCDEF.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 20,301
| 46.104408
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x16-gemm-goi-scalar-int-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/x32-packw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x16__scalar_int_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 16);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16_t* out = (uint16_t*) packed_weights;
const uint16_t* b = (const uint16_t*) bias;
do {
// NC main loop multiple of 16
const uint16_t* w0 = (const uint16_t*) weights;
size_t n = nc;
for (;n >= 16; n -= 16) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
out[4] = b[4];
out[5] = b[5];
out[6] = b[6];
out[7] = b[7];
out[8] = b[8];
out[9] = b[9];
out[10] = b[10];
out[11] = b[11];
out[12] = b[12];
out[13] = b[13];
out[14] = b[14];
out[15] = b[15];
b += 16;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
out[4] = 0;
out[5] = 0;
out[6] = 0;
out[7] = 0;
out[8] = 0;
out[9] = 0;
out[10] = 0;
out[11] = 0;
out[12] = 0;
out[13] = 0;
out[14] = 0;
out[15] = 0;
}
out += 16;
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
const uint16_t* w8 = w7 + kc;
const uint16_t* w9 = w8 + kc;
const uint16_t* w10 = w9 + kc;
const uint16_t* w11 = w10 + kc;
const uint16_t* w12 = w11 + kc;
const uint16_t* w13 = w12 + kc;
const uint16_t* w14 = w13 + kc;
const uint16_t* w15 = w14 + kc;
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint16_t v00 = w0[0];
const uint16_t v01 = w0[1];
const uint16_t v02 = w0[2];
const uint16_t v03 = w0[3];
w0 += 4;
const uint16_t v10 = w1[0];
const uint16_t v11 = w1[1];
const uint16_t v12 = w1[2];
const uint16_t v13 = w1[3];
w1 += 4;
const uint16_t v20 = w2[0];
const uint16_t v21 = w2[1];
const uint16_t v22 = w2[2];
const uint16_t v23 = w2[3];
w2 += 4;
const uint16_t v30 = w3[0];
const uint16_t v31 = w3[1];
const uint16_t v32 = w3[2];
const uint16_t v33 = w3[3];
w3 += 4;
const uint16_t v40 = w4[0];
const uint16_t v41 = w4[1];
const uint16_t v42 = w4[2];
const uint16_t v43 = w4[3];
w4 += 4;
const uint16_t v50 = w5[0];
const uint16_t v51 = w5[1];
const uint16_t v52 = w5[2];
const uint16_t v53 = w5[3];
w5 += 4;
const uint16_t v60 = w6[0];
const uint16_t v61 = w6[1];
const uint16_t v62 = w6[2];
const uint16_t v63 = w6[3];
w6 += 4;
const uint16_t v70 = w7[0];
const uint16_t v71 = w7[1];
const uint16_t v72 = w7[2];
const uint16_t v73 = w7[3];
w7 += 4;
const uint16_t v80 = w8[0];
const uint16_t v81 = w8[1];
const uint16_t v82 = w8[2];
const uint16_t v83 = w8[3];
w8 += 4;
const uint16_t v90 = w9[0];
const uint16_t v91 = w9[1];
const uint16_t v92 = w9[2];
const uint16_t v93 = w9[3];
w9 += 4;
const uint16_t v100 = w10[0];
const uint16_t v101 = w10[1];
const uint16_t v102 = w10[2];
const uint16_t v103 = w10[3];
w10 += 4;
const uint16_t v110 = w11[0];
const uint16_t v111 = w11[1];
const uint16_t v112 = w11[2];
const uint16_t v113 = w11[3];
w11 += 4;
const uint16_t v120 = w12[0];
const uint16_t v121 = w12[1];
const uint16_t v122 = w12[2];
const uint16_t v123 = w12[3];
w12 += 4;
const uint16_t v130 = w13[0];
const uint16_t v131 = w13[1];
const uint16_t v132 = w13[2];
const uint16_t v133 = w13[3];
w13 += 4;
const uint16_t v140 = w14[0];
const uint16_t v141 = w14[1];
const uint16_t v142 = w14[2];
const uint16_t v143 = w14[3];
w14 += 4;
const uint16_t v150 = w15[0];
const uint16_t v151 = w15[1];
const uint16_t v152 = w15[2];
const uint16_t v153 = w15[3];
w15 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[15] = v150;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[31] = v151;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[47] = v152;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out[63] = v153;
out += 64;
}
// KC remainder
for (; k != 0; --k) {
const uint16_t v0 = *w0++;
out[0] = v0;
const uint16_t v1 = *w1++;
out[1] = v1;
const uint16_t v2 = *w2++;
out[2] = v2;
const uint16_t v3 = *w3++;
out[3] = v3;
const uint16_t v4 = *w4++;
out[4] = v4;
const uint16_t v5 = *w5++;
out[5] = v5;
const uint16_t v6 = *w6++;
out[6] = v6;
const uint16_t v7 = *w7++;
out[7] = v7;
const uint16_t v8 = *w8++;
out[8] = v8;
const uint16_t v9 = *w9++;
out[9] = v9;
const uint16_t v10 = *w10++;
out[10] = v10;
const uint16_t v11 = *w11++;
out[11] = v11;
const uint16_t v12 = *w12++;
out[12] = v12;
const uint16_t v13 = *w13++;
out[13] = v13;
const uint16_t v14 = *w14++;
out[14] = v14;
const uint16_t v15 = *w15++;
out[15] = v15;
out += 16;
}
out = (uint16_t*) ((uintptr_t) out + extra_bytes);
w0 = w15;
}
// NC remainder (1..15)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (16 - n);
// NR remainder has less than 16 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
const uint16_t* w7 = w6 + kc;
if XNN_UNPREDICTABLE(n < 8) {
w7 = w6;
}
const uint16_t* w8 = w7 + kc;
if XNN_UNPREDICTABLE(n <= 8) {
w8 = w7;
}
const uint16_t* w9 = w8 + kc;
if XNN_UNPREDICTABLE(n < 10) {
w9 = w8;
}
const uint16_t* w10 = w9 + kc;
if XNN_UNPREDICTABLE(n <= 10) {
w10 = w9;
}
const uint16_t* w11 = w10 + kc;
if XNN_UNPREDICTABLE(n < 12) {
w11 = w10;
}
const uint16_t* w12 = w11 + kc;
if XNN_UNPREDICTABLE(n <= 12) {
w12 = w11;
}
const uint16_t* w13 = w12 + kc;
if XNN_UNPREDICTABLE(n < 14) {
w13 = w12;
}
const uint16_t* w14 = w13 + kc;
if XNN_UNPREDICTABLE(n <= 14) {
w14 = w13;
}
// KC main loop multiple of 16x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const uint16_t v00 = w0[0];
const uint16_t v01 = w0[1];
const uint16_t v02 = w0[2];
const uint16_t v03 = w0[3];
w0 += 4;
const uint16_t v10 = w1[0];
const uint16_t v11 = w1[1];
const uint16_t v12 = w1[2];
const uint16_t v13 = w1[3];
w1 += 4;
const uint16_t v20 = w2[0];
const uint16_t v21 = w2[1];
const uint16_t v22 = w2[2];
const uint16_t v23 = w2[3];
w2 += 4;
const uint16_t v30 = w3[0];
const uint16_t v31 = w3[1];
const uint16_t v32 = w3[2];
const uint16_t v33 = w3[3];
w3 += 4;
const uint16_t v40 = w4[0];
const uint16_t v41 = w4[1];
const uint16_t v42 = w4[2];
const uint16_t v43 = w4[3];
w4 += 4;
const uint16_t v50 = w5[0];
const uint16_t v51 = w5[1];
const uint16_t v52 = w5[2];
const uint16_t v53 = w5[3];
w5 += 4;
const uint16_t v60 = w6[0];
const uint16_t v61 = w6[1];
const uint16_t v62 = w6[2];
const uint16_t v63 = w6[3];
w6 += 4;
const uint16_t v70 = w7[0];
const uint16_t v71 = w7[1];
const uint16_t v72 = w7[2];
const uint16_t v73 = w7[3];
w7 += 4;
const uint16_t v80 = w8[0];
const uint16_t v81 = w8[1];
const uint16_t v82 = w8[2];
const uint16_t v83 = w8[3];
w8 += 4;
const uint16_t v90 = w9[0];
const uint16_t v91 = w9[1];
const uint16_t v92 = w9[2];
const uint16_t v93 = w9[3];
w9 += 4;
const uint16_t v100 = w10[0];
const uint16_t v101 = w10[1];
const uint16_t v102 = w10[2];
const uint16_t v103 = w10[3];
w10 += 4;
const uint16_t v110 = w11[0];
const uint16_t v111 = w11[1];
const uint16_t v112 = w11[2];
const uint16_t v113 = w11[3];
w11 += 4;
const uint16_t v120 = w12[0];
const uint16_t v121 = w12[1];
const uint16_t v122 = w12[2];
const uint16_t v123 = w12[3];
w12 += 4;
const uint16_t v130 = w13[0];
const uint16_t v131 = w13[1];
const uint16_t v132 = w13[2];
const uint16_t v133 = w13[3];
w13 += 4;
const uint16_t v140 = w14[0];
const uint16_t v141 = w14[1];
const uint16_t v142 = w14[2];
const uint16_t v143 = w14[3];
w14 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v40;
out[5] = v50;
out[6] = v60;
out[7] = v70;
out[8] = v80;
out[9] = v90;
out[10] = v100;
out[11] = v110;
out[12] = v120;
out[13] = v130;
out[14] = v140;
out[16] = v01;
out[17] = v11;
out[18] = v21;
out[19] = v31;
out[20] = v41;
out[21] = v51;
out[22] = v61;
out[23] = v71;
out[24] = v81;
out[25] = v91;
out[26] = v101;
out[27] = v111;
out[28] = v121;
out[29] = v131;
out[30] = v141;
out[32] = v02;
out[33] = v12;
out[34] = v22;
out[35] = v32;
out[36] = v42;
out[37] = v52;
out[38] = v62;
out[39] = v72;
out[40] = v82;
out[41] = v92;
out[42] = v102;
out[43] = v112;
out[44] = v122;
out[45] = v132;
out[46] = v142;
out[48] = v03;
out[49] = v13;
out[50] = v23;
out[51] = v33;
out[52] = v43;
out[53] = v53;
out[54] = v63;
out[55] = v73;
out[56] = v83;
out[57] = v93;
out[58] = v103;
out[59] = v113;
out[60] = v123;
out[61] = v133;
out[62] = v143;
out += 64;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const uint16_t v0 = *w0++;
out[0] = v0;
const uint16_t v1 = *w1++;
out[1] = v1;
const uint16_t v2 = *w2++;
out[2] = v2;
const uint16_t v3 = *w3++;
out[3] = v3;
const uint16_t v4 = *w4++;
out[4] = v4;
const uint16_t v5 = *w5++;
out[5] = v5;
const uint16_t v6 = *w6++;
out[6] = v6;
const uint16_t v7 = *w7++;
out[7] = v7;
const uint16_t v8 = *w8++;
out[8] = v8;
const uint16_t v9 = *w9++;
out[9] = v9;
const uint16_t v10 = *w10++;
out[10] = v10;
const uint16_t v11 = *w11++;
out[11] = v11;
const uint16_t v12 = *w12++;
out[12] = v12;
const uint16_t v13 = *w13++;
out[13] = v13;
const uint16_t v14 = *w14++;
out[14] = v14;
out += 16;
}
out = (uint16_t*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 14,523
| 25.552102
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x12-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
#include <xnnpack/prefetch.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x12_prfm(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
uint16x8x4_t vtmp89ABx01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
xnn_prefetch_to_l1((const int8_t*) w0);
xnn_prefetch_to_l1((const int8_t*) w0 + 64);
xnn_prefetch_to_l1((const int8_t*) w1);
xnn_prefetch_to_l1((const int8_t*) w1 + 64);
xnn_prefetch_to_l1((const int8_t*) w2);
xnn_prefetch_to_l1((const int8_t*) w2 + 64);
xnn_prefetch_to_l1((const int8_t*) w3);
xnn_prefetch_to_l1((const int8_t*) w3 + 64);
xnn_prefetch_to_l1((const int8_t*) w4);
xnn_prefetch_to_l1((const int8_t*) w4 + 64);
xnn_prefetch_to_l1((const int8_t*) w5);
xnn_prefetch_to_l1((const int8_t*) w5 + 64);
xnn_prefetch_to_l1((const int8_t*) w6);
xnn_prefetch_to_l1((const int8_t*) w6 + 64);
xnn_prefetch_to_l1((const int8_t*) w7);
xnn_prefetch_to_l1((const int8_t*) w7 + 64);
// KC main loop multiple of 12
size_t k = kc;
for (; k >= 12; k -= 12) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w7, vtmp89ABx01234567, 7); w7 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
xnn_prefetch_to_l1((const int8_t*) w7 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 12
size_t k = kc;
for (; k >= 12; k -= 12) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
xnn_prefetch_to_l1((const int8_t*) w0 + 128);
xnn_prefetch_to_l1((const int8_t*) w1 + 128);
xnn_prefetch_to_l1((const int8_t*) w2 + 128);
xnn_prefetch_to_l1((const int8_t*) w3 + 128);
xnn_prefetch_to_l1((const int8_t*) w4 + 128);
xnn_prefetch_to_l1((const int8_t*) w5 + 128);
xnn_prefetch_to_l1((const int8_t*) w6 + 128);
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 18,677
| 46.047859
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/x16-packw/gen/x16-packw-x8-gemm-goi-neon-ld4lane-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/x16-packw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/packw.h>
void xnn_x16_packw_gemm_goi_ukernel_x8__neon_ld4lane_x12(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 8);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
uint16x8x4_t vtmp0123x01234567;
uint16x8x4_t vtmp4567x01234567;
uint16x8x4_t vtmp89ABx01234567;
do {
// NC main loop multiple of 8
const uint16_t* w0 = weights;
size_t n = nc;
for (; n >= 8; n -= 8) {
if XNN_LIKELY(bias != NULL) {
uint16x8_t vb0 = vld1q_u16(bias); bias += 8;
vst1q_u16(packed_weights, vb0); packed_weights += 8;
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
const uint16_t* w1 = w0 + kc;
const uint16_t* w2 = w1 + kc;
const uint16_t* w3 = w2 + kc;
const uint16_t* w4 = w3 + kc;
const uint16_t* w5 = w4 + kc;
const uint16_t* w6 = w5 + kc;
const uint16_t* w7 = w6 + kc;
// KC main loop multiple of 12
size_t k = kc;
for (; k >= 12; k -= 12) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w7, vtmp4567x01234567, 7); w7 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w7, vtmp89ABx01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w7, vtmp0123x01234567, 7); w7 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vtmp0x01234567 = vld1q_lane_u16(w7, vtmp0x01234567, 7); w7 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vtmp01x01234567 = vld2q_lane_u16(w7, vtmp01x01234567, 7); w7 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vtmp012x01234567 = vld3q_lane_u16(w7, vtmp012x01234567, 7); w7 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
w0 = w7;
}
// NC remainder (1..7)
if XNN_UNLIKELY(n != 0) {
assert(n >= 1);
assert(n <= 7);
if XNN_LIKELY(bias != NULL) {
size_t nb = n;
do {
*packed_weights++ = *bias++;
} while (--nb != 0);
packed_weights += (8 - n);
} else {
const uint16x8_t vzero = vmovq_n_u16(0);
vst1q_u16(packed_weights, vzero); packed_weights += 8;
}
// NR remainder has less than 8 rows so last row is not loaded
const uint16_t* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const uint16_t* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
const uint16_t* w3 = w2 + kc;
if XNN_UNPREDICTABLE(n < 4) {
w3 = w2;
}
const uint16_t* w4 = w3 + kc;
if XNN_UNPREDICTABLE(n <= 4) {
w4 = w3;
}
const uint16_t* w5 = w4 + kc;
if XNN_UNPREDICTABLE(n < 6) {
w5 = w4;
}
const uint16_t* w6 = w5 + kc;
if XNN_UNPREDICTABLE(n <= 6) {
w6 = w5;
}
// KC main loop multiple of 12
size_t k = kc;
for (; k >= 12; k -= 12) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w0, vtmp4567x01234567, 0); w0 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w1, vtmp4567x01234567, 1); w1 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w2, vtmp4567x01234567, 2); w2 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w3, vtmp4567x01234567, 3); w3 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w4, vtmp4567x01234567, 4); w4 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w5, vtmp4567x01234567, 5); w5 += 4;
vtmp4567x01234567 = vld4q_lane_u16(w6, vtmp4567x01234567, 6); w6 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w0, vtmp89ABx01234567, 0); w0 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w1, vtmp89ABx01234567, 1); w1 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w2, vtmp89ABx01234567, 2); w2 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w3, vtmp89ABx01234567, 3); w3 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w4, vtmp89ABx01234567, 4); w4 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w5, vtmp89ABx01234567, 5); w5 += 4;
vtmp89ABx01234567 = vld4q_lane_u16(w6, vtmp89ABx01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp4567x01234567.val[3]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp89ABx01234567.val[3]); packed_weights += 8;
}
// KC remainder multiple of 4
while (k >= 4) {
vtmp0123x01234567 = vld4q_lane_u16(w0, vtmp0123x01234567, 0); w0 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w1, vtmp0123x01234567, 1); w1 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w2, vtmp0123x01234567, 2); w2 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w3, vtmp0123x01234567, 3); w3 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w4, vtmp0123x01234567, 4); w4 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w5, vtmp0123x01234567, 5); w5 += 4;
vtmp0123x01234567 = vld4q_lane_u16(w6, vtmp0123x01234567, 6); w6 += 4;
vst1q_u16(packed_weights, vtmp0123x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[2]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp0123x01234567.val[3]); packed_weights += 8;
k -= 4;
}
// KC remainder of 1..3
// Same as main loop but ld1, ld2 or ld3
if XNN_UNLIKELY(k != 0) {
assert(k >= 1);
assert(k <= 3);
switch (k) {
// KC remainder of 8x1
case 1:
{
uint16x8_t vtmp0x01234567 = vdupq_n_u16(0);
vtmp0x01234567 = vld1q_lane_u16(w0, vtmp0x01234567, 0); w0 += 1;
vtmp0x01234567 = vld1q_lane_u16(w1, vtmp0x01234567, 1); w1 += 1;
vtmp0x01234567 = vld1q_lane_u16(w2, vtmp0x01234567, 2); w2 += 1;
vtmp0x01234567 = vld1q_lane_u16(w3, vtmp0x01234567, 3); w3 += 1;
vtmp0x01234567 = vld1q_lane_u16(w4, vtmp0x01234567, 4); w4 += 1;
vtmp0x01234567 = vld1q_lane_u16(w5, vtmp0x01234567, 5); w5 += 1;
vtmp0x01234567 = vld1q_lane_u16(w6, vtmp0x01234567, 6); w6 += 1;
vst1q_u16(packed_weights, vtmp0x01234567); packed_weights += 8;
break;
}
// KC remainder of 8x2
case 2:
{
uint16x8x2_t vtmp01x01234567;
vtmp01x01234567.val[0] = vdupq_n_u16(0);
vtmp01x01234567.val[1] = vdupq_n_u16(0);
vtmp01x01234567 = vld2q_lane_u16(w0, vtmp01x01234567, 0); w0 += 2;
vtmp01x01234567 = vld2q_lane_u16(w1, vtmp01x01234567, 1); w1 += 2;
vtmp01x01234567 = vld2q_lane_u16(w2, vtmp01x01234567, 2); w2 += 2;
vtmp01x01234567 = vld2q_lane_u16(w3, vtmp01x01234567, 3); w3 += 2;
vtmp01x01234567 = vld2q_lane_u16(w4, vtmp01x01234567, 4); w4 += 2;
vtmp01x01234567 = vld2q_lane_u16(w5, vtmp01x01234567, 5); w5 += 2;
vtmp01x01234567 = vld2q_lane_u16(w6, vtmp01x01234567, 6); w6 += 2;
vst1q_u16(packed_weights, vtmp01x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp01x01234567.val[1]); packed_weights += 8;
break;
}
// KC remainder of 8x3
case 3:
{
uint16x8x3_t vtmp012x01234567;
vtmp012x01234567.val[0] = vdupq_n_u16(0);
vtmp012x01234567.val[1] = vdupq_n_u16(0);
vtmp012x01234567.val[2] = vdupq_n_u16(0);
vtmp012x01234567 = vld3q_lane_u16(w0, vtmp012x01234567, 0); w0 += 3;
vtmp012x01234567 = vld3q_lane_u16(w1, vtmp012x01234567, 1); w1 += 3;
vtmp012x01234567 = vld3q_lane_u16(w2, vtmp012x01234567, 2); w2 += 3;
vtmp012x01234567 = vld3q_lane_u16(w3, vtmp012x01234567, 3); w3 += 3;
vtmp012x01234567 = vld3q_lane_u16(w4, vtmp012x01234567, 4); w4 += 3;
vtmp012x01234567 = vld3q_lane_u16(w5, vtmp012x01234567, 5); w5 += 3;
vtmp012x01234567 = vld3q_lane_u16(w6, vtmp012x01234567, 6); w6 += 3;
vst1q_u16(packed_weights, vtmp012x01234567.val[0]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[1]); packed_weights += 8;
vst1q_u16(packed_weights, vtmp012x01234567.val[2]); packed_weights += 8;
break;
}
default:
XNN_UNREACHABLE;
}
}
packed_weights = (uint16_t*) ((uintptr_t) packed_weights + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
| 17,056
| 45.731507
| 84
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.