repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/operators/channel-shuffle-nc.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/params.h>
static enum xnn_status create_channel_shuffle_nc(
size_t groups,
size_t group_channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
const struct xnn_zip_config* zip_config,
enum xnn_operator_type operator_type,
xnn_operator_t* channel_shuffle_op_out)
{
xnn_operator_t channel_shuffle_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (groups <= 1) {
xnn_log_error(
"failed to create %s operator with %zu groups: at least two groups required",
xnn_operator_type_to_string(operator_type), groups);
goto error;
}
if (group_channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu group channels: number of group channels must be non-zero",
xnn_operator_type_to_string(operator_type), group_channels);
goto error;
}
const size_t channels = groups * group_channels;
if (input_stride < channels) {
xnn_log_error(
"failed to create %s operator with input element stride of %zu: "
"stride must be at least as large as the number of channels (%zux%zu)",
xnn_operator_type_to_string(operator_type), input_stride, groups, group_channels);
goto error;
}
if (output_stride < channels) {
xnn_log_error(
"failed to create %s operator with output element stride of %zu: "
"stride must be at least as large as the number of channels (%zux%zu)",
xnn_operator_type_to_string(operator_type), output_stride, groups, group_channels);
goto error;
}
status = xnn_status_out_of_memory;
channel_shuffle_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (channel_shuffle_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
channel_shuffle_op->groups = groups;
channel_shuffle_op->group_channels = group_channels;
channel_shuffle_op->input_pixel_stride = input_stride;
channel_shuffle_op->output_pixel_stride = output_stride;
channel_shuffle_op->type = operator_type;
channel_shuffle_op->flags = flags;
channel_shuffle_op->zip_config = zip_config;
channel_shuffle_op->state = xnn_run_state_invalid;
*channel_shuffle_op_out = channel_shuffle_op;
return xnn_status_success;
error:
xnn_delete_operator(channel_shuffle_op);
return status;
}
enum xnn_status xnn_create_channel_shuffle_nc_x8(
size_t groups,
size_t group_channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* channel_shuffle_op_out)
{
const struct xnn_zip_config* zip_config = xnn_init_x8_zip_config();
assert(zip_config != NULL);
return create_channel_shuffle_nc(
groups,
group_channels,
input_stride,
output_stride,
flags,
zip_config,
xnn_operator_type_channel_shuffle_nc_x8,
channel_shuffle_op_out);
}
enum xnn_status xnn_create_channel_shuffle_nc_x32(
size_t groups,
size_t group_channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* channel_shuffle_op_out)
{
const struct xnn_zip_config* zip_config = xnn_init_x32_zip_config();
if (zip_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x32));
return xnn_status_unsupported_hardware;
}
return create_channel_shuffle_nc(
groups,
group_channels,
input_stride,
output_stride,
flags,
zip_config,
xnn_operator_type_channel_shuffle_nc_x32,
channel_shuffle_op_out);
}
static enum xnn_status reshape_channel_shuffle_nc(
xnn_operator_t channel_shuffle_op,
size_t batch_size,
uint32_t log2_element_size,
const struct xnn_zip_config zip[restrict XNN_MIN_ELEMENTS(1)])
{
channel_shuffle_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_uninitialized;
}
if (batch_size == 0) {
channel_shuffle_op->state = xnn_run_state_skip;
return xnn_status_success;
}
channel_shuffle_op->batch_size = batch_size;
const size_t groups = channel_shuffle_op->groups;
channel_shuffle_op->context.channel_shuffle = (struct channel_shuffle_context) {
.x_stride = channel_shuffle_op->input_pixel_stride << log2_element_size,
.y_stride = channel_shuffle_op->output_pixel_stride << log2_element_size,
.n = channel_shuffle_op->group_channels << log2_element_size,
.m = groups,
};
channel_shuffle_op->compute[0].type = xnn_parallelization_type_1d;
channel_shuffle_op->compute[0].range[0] = batch_size;
switch (groups) {
case 2:
channel_shuffle_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_channel_shuffle_fixed;
channel_shuffle_op->context.channel_shuffle.fixed_ukernel = zip->x2;
break;
case 3:
channel_shuffle_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_channel_shuffle_fixed;
channel_shuffle_op->context.channel_shuffle.fixed_ukernel = zip->x3;
break;
case 4:
channel_shuffle_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_channel_shuffle_fixed;
channel_shuffle_op->context.channel_shuffle.fixed_ukernel = zip->x4;
break;
default:
channel_shuffle_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_channel_shuffle_variable;
channel_shuffle_op->context.channel_shuffle.variable_ukernel = zip->xm;
break;
case 0:
case 1:
XNN_UNREACHABLE;
}
channel_shuffle_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_channel_shuffle_nc_x8(
xnn_operator_t channel_shuffle_op,
size_t batch_size,
pthreadpool_t threadpool)
{
if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x8) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x8),
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_invalid_parameter;
}
return reshape_channel_shuffle_nc(
channel_shuffle_op,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_UINT8_T,
channel_shuffle_op->zip_config);
}
enum xnn_status xnn_reshape_channel_shuffle_nc_x32(
xnn_operator_t channel_shuffle_op,
size_t batch_size,
pthreadpool_t threadpool)
{
if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x32) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x32),
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_invalid_parameter;
}
return reshape_channel_shuffle_nc(
channel_shuffle_op,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_UINT32_T,
channel_shuffle_op->zip_config);
}
static enum xnn_status setup_channel_shuffle_nc(
xnn_operator_t channel_shuffle_op,
const void* input,
void* output)
{
switch (channel_shuffle_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
channel_shuffle_op->context.channel_shuffle.x = input;
channel_shuffle_op->context.channel_shuffle.y = output;
channel_shuffle_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_channel_shuffle_nc_x8(
xnn_operator_t channel_shuffle_op,
const void* input,
void* output)
{
if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x8) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x8),
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_invalid_parameter;
}
return setup_channel_shuffle_nc(
channel_shuffle_op,
input,
output);
}
enum xnn_status xnn_setup_channel_shuffle_nc_x32(
xnn_operator_t channel_shuffle_op,
const void* input,
void* output)
{
if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x32) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x32),
xnn_operator_type_to_string(channel_shuffle_op->type));
return xnn_status_invalid_parameter;
}
return setup_channel_shuffle_nc(
channel_shuffle_op,
input,
output);
}
| 10,006
| 31.280645
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/lut-elementwise-nc.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/log.h>
static bool is_continugous(xnn_operator_t lut_elementwise_op)
{
const size_t channels = lut_elementwise_op->channels;
const size_t input_stride = lut_elementwise_op->input_pixel_stride;
const size_t output_stride = lut_elementwise_op->output_pixel_stride;
const size_t batch_size = lut_elementwise_op->batch_size;
return (((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1;
}
typedef float (*xnn_lut_init_fn)(float, const void*);
static enum xnn_status create_lut_elementwise_nc(
size_t channels,
size_t input_stride,
size_t output_stride,
int32_t input_zero_point,
float input_scale,
int32_t input_min,
long output_zero_point,
float output_scale,
long output_min,
long output_max,
uint32_t flags,
xnn_lut_init_fn init_fn,
const void* init_params,
enum xnn_operator_type operator_type,
xnn_operator_t* lut_elementwise_op_out)
{
xnn_operator_t lut_elementwise_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (input_stride < channels) {
xnn_log_error(
"failed to create %s operator with input element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), input_stride, channels);
goto error;
}
if (output_stride < channels) {
xnn_log_error(
"failed to create %s operator with output element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), output_stride, channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
xnn_log_error(
"failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
xnn_operator_type_to_string(operator_type), input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
xnn_operator_type_to_string(operator_type), output_scale);
goto error;
}
if (output_min >= output_max) {
xnn_log_error(
"failed to create %s operator with [%ld, %ld] output range: range min must be below range max",
xnn_operator_type_to_string(operator_type), output_min, output_max);
goto error;
}
const struct xnn_x8_lut_config* lut_config = xnn_init_x8_lut_config();
assert(lut_config != NULL);
status = xnn_status_out_of_memory;
lut_elementwise_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (lut_elementwise_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
lut_elementwise_op->lookup_table = xnn_allocate_simd_memory(256 * sizeof(uint8_t));
if (lut_elementwise_op->lookup_table == NULL) {
xnn_log_error(
"failed to allocate 256 bytes for %s operator lookup table",
xnn_operator_type_to_string(operator_type));
goto error;
}
uint8_t* lookup_table = lut_elementwise_op->lookup_table;
const float inv_output_scale = 1.0f / output_scale;
for (int32_t i = input_min; i < input_min + 256; i++) {
const float dequantized_input = (i - input_zero_point) * input_scale;
const float dequantized_output = init_fn(dequantized_input, init_params);
long quantized_output = lrintf(dequantized_output * inv_output_scale) + output_zero_point;
quantized_output = XNN_UNPREDICTABLE(quantized_output < output_min) ? output_min : quantized_output;
quantized_output = XNN_UNPREDICTABLE(quantized_output > output_max) ? output_max : quantized_output;
lookup_table[(uint8_t) i] = (uint8_t) quantized_output;
}
lut_elementwise_op->channels = channels;
lut_elementwise_op->input_pixel_stride = input_stride;
lut_elementwise_op->output_pixel_stride = output_stride;
lut_elementwise_op->type = operator_type;
lut_elementwise_op->flags = flags;
lut_elementwise_op->lut_config = lut_config;
lut_elementwise_op->state = xnn_run_state_invalid;
*lut_elementwise_op_out = lut_elementwise_op;
return xnn_status_success;
error:
xnn_delete_operator(lut_elementwise_op);
return status;
}
static float calculate_elu(float x, const float* alpha_ptr) {
const float alpha = *alpha_ptr;
return signbit(x) ? alpha * expm1f(x) : x;
}
enum xnn_status xnn_create_elu_nc_qs8(
size_t channels,
size_t input_stride,
size_t output_stride,
float alpha,
int8_t input_zero_point,
float input_scale,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_operator_t* elu_op_out)
{
if (alpha <= 0.0f || !isnormal(alpha)) {
xnn_log_error(
"failed to create %s operator with %.7g alpha parameter: alpha must be finite, normalized, and positive",
xnn_operator_type_to_string(xnn_operator_type_elu_nc_qs8), alpha);
return xnn_status_invalid_parameter;
}
return create_lut_elementwise_nc(
channels, input_stride, output_stride,
(int32_t) input_zero_point, input_scale, INT8_MIN,
(long) output_zero_point, output_scale,
(long) output_min, (long) output_max,
flags,
(xnn_lut_init_fn) &calculate_elu, &alpha,
xnn_operator_type_elu_nc_qs8, elu_op_out);
}
static float calculate_sigmoid(float x, const void* params) {
return signbit(x) ? 1.0f / (1.0f + expf(-x)) : 1.0f - 1.0f / (1.0f + expf(x));
}
enum xnn_status xnn_create_sigmoid_nc_qs8(
size_t channels,
size_t input_stride,
size_t output_stride,
int8_t input_zero_point,
float input_scale,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_operator_t* sigmoid_op_out)
{
if (output_scale != 0x1.0p-8f) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: only output scale of 1/256 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qs8), output_scale);
return xnn_status_unsupported_parameter;
}
if (output_zero_point != -128) {
xnn_log_error(
"failed to create %s operator with %" PRIu8 " output zero point: only output zero point of -128 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qs8), output_zero_point);
return xnn_status_unsupported_parameter;
}
return create_lut_elementwise_nc(
channels, input_stride, output_stride,
(int32_t) input_zero_point, input_scale, INT8_MIN,
(long) output_zero_point, output_scale,
(long) output_min, (long) output_max,
flags,
(xnn_lut_init_fn) &calculate_sigmoid, NULL,
xnn_operator_type_sigmoid_nc_qs8, sigmoid_op_out);
}
enum xnn_status xnn_create_sigmoid_nc_qu8(
size_t channels,
size_t input_stride,
size_t output_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_operator_t* sigmoid_op_out)
{
if (output_scale != 0x1.0p-8f) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: only output scale of 1/256 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_scale);
return xnn_status_unsupported_parameter;
}
if (output_zero_point != 0) {
xnn_log_error(
"failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_zero_point);
return xnn_status_unsupported_parameter;
}
return create_lut_elementwise_nc(
channels, input_stride, output_stride,
(int32_t) (uint32_t) input_zero_point, input_scale, 0 /* input min */,
(long) (unsigned long) output_zero_point, output_scale,
(long) (unsigned long) output_min, (long) (unsigned long) output_max,
flags,
(xnn_lut_init_fn) &calculate_sigmoid, NULL,
xnn_operator_type_sigmoid_nc_qu8, sigmoid_op_out);
}
static float calculate_tanh(float x, const void* params) {
return tanhf(x);
}
enum xnn_status xnn_create_tanh_nc_qs8(
size_t channels,
size_t input_stride,
size_t output_stride,
int8_t input_zero_point,
float input_scale,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_operator_t* tanh_op_out)
{
if (output_scale != 0x1.0p-7f) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: only output scale of 1/128 is supported",
xnn_operator_type_to_string(xnn_operator_type_tanh_nc_qs8), output_scale);
return xnn_status_unsupported_parameter;
}
if (output_zero_point != 0) {
xnn_log_error(
"failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
xnn_operator_type_to_string(xnn_operator_type_tanh_nc_qs8), output_zero_point);
return xnn_status_unsupported_parameter;
}
return create_lut_elementwise_nc(
channels, input_stride, output_stride,
(int32_t) input_zero_point, input_scale, INT8_MIN,
(long) output_zero_point, output_scale,
(long) output_min, (long) output_max,
flags,
(xnn_lut_init_fn) &calculate_tanh, NULL,
xnn_operator_type_tanh_nc_qs8, tanh_op_out);
}
enum xnn_status xnn_create_tanh_nc_qu8(
size_t channels,
size_t input_stride,
size_t output_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_operator_t* tanh_op_out)
{
if (output_scale != 0x1.0p-7f) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: only output scale of 1/128 is supported",
xnn_operator_type_to_string(xnn_operator_type_tanh_nc_qu8), output_scale);
return xnn_status_unsupported_parameter;
}
if (output_zero_point != 128) {
xnn_log_error(
"failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 128 is supported",
xnn_operator_type_to_string(xnn_operator_type_tanh_nc_qu8), output_zero_point);
return xnn_status_unsupported_parameter;
}
return create_lut_elementwise_nc(
channels, input_stride, output_stride,
(int32_t) (uint32_t) input_zero_point, input_scale, 0 /* input min */,
(long) (unsigned long) output_zero_point, output_scale,
(long) (unsigned long) output_min, (long) (unsigned long) output_max,
flags,
(xnn_lut_init_fn) &calculate_tanh, NULL,
xnn_operator_type_tanh_nc_qu8, tanh_op_out);
}
static enum xnn_status reshape_lut_elementwise_nc(
xnn_operator_t lut_elementwise_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t num_threads)
{
if (lut_elementwise_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(lut_elementwise_op->type));
return xnn_status_invalid_parameter;
}
lut_elementwise_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error(
"failed to setup %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(expected_operator_type));
return xnn_status_uninitialized;
}
if (batch_size == 0) {
lut_elementwise_op->state = xnn_run_state_skip;
return xnn_status_success;
}
lut_elementwise_op->batch_size = batch_size;
const struct xnn_x8_lut_config* lut_config = lut_elementwise_op->lut_config;
const size_t channels = lut_elementwise_op->channels;
const size_t input_stride = lut_elementwise_op->input_pixel_stride;
const size_t output_stride = lut_elementwise_op->output_pixel_stride;
if (is_continugous(lut_elementwise_op)) {
const size_t block_size = 1024;
lut_elementwise_op->context.lut_contiguous = (struct lut_contiguous_context) {
.x_stride = input_stride * sizeof(uint8_t),
.t = lut_elementwise_op->lookup_table,
.y_stride = output_stride * sizeof(uint8_t),
.ukernel = lut_config->microkernel,
};
const size_t range = batch_size * channels * sizeof(uint8_t);
lut_elementwise_op->compute[0].type = xnn_parallelization_type_1d_tile_1d;
lut_elementwise_op->compute[0].task_1d_tile_1d = (pthreadpool_task_1d_tile_1d_t) xnn_compute_lut_contiguous;
lut_elementwise_op->compute[0].range[0] = range;
lut_elementwise_op->compute[0].tile[0] = (num_threads == 1) ? range : block_size * sizeof(uint8_t);
} else {
lut_elementwise_op->context.lut_strided = (struct lut_strided_context) {
.n = channels * sizeof(uint8_t),
.x_stride = input_stride * sizeof(uint8_t),
.t = lut_elementwise_op->lookup_table,
.y_stride = output_stride * sizeof(uint8_t),
.ukernel = lut_config->microkernel,
};
lut_elementwise_op->compute[0].type = xnn_parallelization_type_1d;
lut_elementwise_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_lut_strided;
lut_elementwise_op->compute[0].range[0] = batch_size;
}
lut_elementwise_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_elu_nc_qs8(
xnn_operator_t elu_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_lut_elementwise_nc(
elu_op, xnn_operator_type_elu_nc_qs8,
batch_size,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_sigmoid_nc_qs8(
xnn_operator_t sigmoid_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_lut_elementwise_nc(
sigmoid_op, xnn_operator_type_sigmoid_nc_qs8,
batch_size,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_sigmoid_nc_qu8(
xnn_operator_t sigmoid_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_lut_elementwise_nc(
sigmoid_op, xnn_operator_type_sigmoid_nc_qu8,
batch_size,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_tanh_nc_qs8(
xnn_operator_t tanh_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_lut_elementwise_nc(
tanh_op, xnn_operator_type_tanh_nc_qs8,
batch_size,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_tanh_nc_qu8(
xnn_operator_t tanh_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_lut_elementwise_nc(
tanh_op, xnn_operator_type_tanh_nc_qu8,
batch_size,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_lut_elementwise_nc(
xnn_operator_t lut_elementwise_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (lut_elementwise_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(lut_elementwise_op->type));
return xnn_status_invalid_parameter;
}
switch (lut_elementwise_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(lut_elementwise_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
if (is_continugous(lut_elementwise_op)) {
lut_elementwise_op->context.lut_contiguous.x = input;
lut_elementwise_op->context.lut_contiguous.y = output;
} else {
lut_elementwise_op->context.lut_strided.x = input;
lut_elementwise_op->context.lut_strided.y = output;
}
lut_elementwise_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_elu_nc_qs8(
xnn_operator_t elu_op,
const int8_t* input,
int8_t* output)
{
return setup_lut_elementwise_nc(
elu_op, xnn_operator_type_elu_nc_qs8,
input, output);
}
enum xnn_status xnn_setup_sigmoid_nc_qs8(
xnn_operator_t sigmoid_op,
const int8_t* input,
int8_t* output)
{
return setup_lut_elementwise_nc(
sigmoid_op, xnn_operator_type_sigmoid_nc_qs8,
input, output);
}
enum xnn_status xnn_setup_sigmoid_nc_qu8(
xnn_operator_t sigmoid_op,
const uint8_t* input,
uint8_t* output)
{
return setup_lut_elementwise_nc(
sigmoid_op, xnn_operator_type_sigmoid_nc_qu8,
input, output);
}
enum xnn_status xnn_setup_tanh_nc_qs8(
xnn_operator_t tanh_op,
const int8_t* input,
int8_t* output)
{
return setup_lut_elementwise_nc(
tanh_op, xnn_operator_type_tanh_nc_qs8,
input, output);
}
enum xnn_status xnn_setup_tanh_nc_qu8(
xnn_operator_t tanh_op,
const uint8_t* input,
uint8_t* output)
{
return setup_lut_elementwise_nc(
tanh_op, xnn_operator_type_tanh_nc_qu8,
input, output);
}
| 18,257
| 32.076087
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/post-operation.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams.h>
#include <xnnpack/params.h>
#include <xnnpack/post-operation.h>
char* allocate_and_initialize_post_operation_params(
size_t num_post_operations,
const struct xnn_post_operation* post_operations) {
union {
union xnn_f32_hswish_params hswish_params;
} post_op_params; // Anonymous union to hold params of all valid post operations.
// Calculate how much space all post operation params will take.
size_t total_size = 0;
for (size_t i = 0; i < num_post_operations; i++) {
const struct xnn_post_operation post_op = post_operations[i];
switch (post_op.op_type) {
case xnn_post_operation_type_hardswish:
{
const struct xnn_unary_elementwise_config* f32_hswish_config = xnn_init_f32_hswish_config();
if (f32_hswish_config->init.f32_hswish != NULL) {
total_size += f32_hswish_config->init.f32_hswish(&post_op_params.hswish_params);
}
break;
}
default:
XNN_UNREACHABLE;
}
}
// Copy all params compactly into post_operation_params.
char* post_operation_params = xnn_allocate_zero_memory(total_size);
char* cur_params = post_operation_params;
for (size_t i = 0; i < num_post_operations; i++) {
const struct xnn_post_operation post_op = post_operations[i];
switch (post_op.op_type) {
case xnn_post_operation_type_hardswish:
{
const struct xnn_unary_elementwise_config* f32_hswish_config = xnn_init_f32_hswish_config();
if (f32_hswish_config->init.f32_hswish!= NULL) {
const size_t initialized_size = f32_hswish_config->init.f32_hswish(&post_op_params.hswish_params);
memcpy(cur_params, &post_op_params.hswish_params, initialized_size);
cur_params += initialized_size;
}
break;
}
default:
XNN_UNREACHABLE;
}
}
return post_operation_params;
}
| 2,119
| 34.333333
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/prelu-nc.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/cache.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/operator-utils.h>
#include <xnnpack/pack.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/params.h>
static enum xnn_status create_prelu_nc(
size_t channels,
size_t input_stride,
size_t output_stride,
const void* negative_slope,
uint32_t flags,
uint32_t log2_weights_element_size,
xnn_pack_prelu_w_fn pack_prelu_w,
enum xnn_operator_type operator_type,
const struct xnn_prelu_config* prelu_config,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* prelu_op_out)
{
xnn_operator_t prelu_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
return xnn_status_uninitialized;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (input_stride < channels) {
xnn_log_error(
"failed to create %s operator with input element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), input_stride, channels);
goto error;
}
if (output_stride < channels) {
xnn_log_error(
"failed to create %s operator with output element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), output_stride, channels);
goto error;
}
status = xnn_status_out_of_memory;
prelu_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (prelu_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
prelu_op->weights_cache = weights_cache;
const size_t packed_weights_size = (channels << log2_weights_element_size) + XNN_EXTRA_BYTES;
const size_t aligned_total_weights_size = round_up_po2(packed_weights_size, XNN_ALLOCATION_ALIGNMENT);
void* weights_ptr = xnn_get_pointer_to_write_weights(prelu_op, aligned_total_weights_size, 0);
xnn_log_debug("allocated %zu bytes for packed weights in %s operator",
aligned_total_weights_size, xnn_operator_type_to_string(operator_type));
pack_prelu_w(channels, negative_slope, weights_ptr);
if (use_weights_cache(prelu_op)) {
prelu_op->packed_weights.offset = xnn_get_or_insert_weights_cache(
prelu_op->weights_cache, weights_ptr, aligned_total_weights_size);
}
prelu_op->channels = channels;
prelu_op->input_pixel_stride = input_stride;
prelu_op->output_pixel_stride = output_stride;
prelu_op->type = operator_type;
prelu_op->flags = flags;
prelu_op->prelu_config = prelu_config;
prelu_op->state = xnn_run_state_invalid;
*prelu_op_out = prelu_op;
return xnn_status_success;
error:
xnn_delete_operator(prelu_op);
return status;
}
enum xnn_status xnn_create_prelu_nc_f16(
size_t channels,
size_t input_stride,
size_t output_stride,
const void* negative_slope,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* prelu_op_out)
{
xnn_pack_prelu_w_fn pack_prelu_w = (xnn_pack_prelu_w_fn) xnn_pack_f16_prelu_w;
if (flags & XNN_FLAG_FP32_STATIC_WEIGHTS) {
pack_prelu_w = (xnn_pack_prelu_w_fn) xnn_pack_f32_to_f16_prelu_w;
}
const struct xnn_prelu_config* prelu_config = xnn_init_f16_prelu_config();
if (prelu_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f16));
return xnn_status_unsupported_hardware;
}
return create_prelu_nc(
channels, input_stride, output_stride,
negative_slope, flags,
/*log2_weights_element_size=*/XNN_LOG2_SIZEOF_HALF,
pack_prelu_w,
xnn_operator_type_prelu_nc_f16,
prelu_config,
/*code_cache=*/code_cache,
/*weights_cache=*/weights_cache,
prelu_op_out);
}
enum xnn_status xnn_create_prelu_nc_f32(
size_t channels,
size_t input_stride,
size_t output_stride,
const float* negative_slope,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* prelu_op_out)
{
const struct xnn_prelu_config* prelu_config = xnn_init_f32_prelu_config();
if (prelu_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32));
return xnn_status_unsupported_hardware;
}
return create_prelu_nc(
channels, input_stride, output_stride,
negative_slope, flags,
/*log2_weights_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
(xnn_pack_prelu_w_fn) xnn_pack_f32_prelu_w,
xnn_operator_type_prelu_nc_f32,
prelu_config,
/*code_cache=*/code_cache,
/*weights_cache=*/weights_cache,
prelu_op_out);
}
static enum xnn_status reshape_prelu_nc(
xnn_operator_t prelu_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
uint32_t log2_element_size,
size_t num_threads)
{
if (prelu_op->type != expected_operator_type) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(prelu_op->type));
return xnn_status_invalid_parameter;
}
prelu_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(expected_operator_type));
return xnn_status_uninitialized;
}
if (batch_size == 0) {
prelu_op->state = xnn_run_state_skip;
return xnn_status_success;
}
if (prelu_op->weights_cache != NULL && !xnn_weights_cache_is_finalized(prelu_op->weights_cache)) {
xnn_log_error("failed to reshape %s operator: weights cache is not finalized",
xnn_operator_type_to_string(expected_operator_type));
return xnn_status_invalid_state;
}
const struct xnn_prelu_config* prelu = prelu_op->prelu_config;
const size_t channels = prelu_op->channels;
prelu_op->context.prelu = (struct prelu_context) {
.n = channels << log2_element_size,
.x_stride = prelu_op->input_pixel_stride << log2_element_size,
.w = packed_weights(prelu_op),
.y_stride = prelu_op->output_pixel_stride << log2_element_size,
.ukernel = prelu->ukernel,
};
#if XNN_TEST_MODE
const size_t batch_tile = prelu->row_tile;
#else
size_t batch_tile = batch_size;
if (num_threads > 1) {
const size_t target_tiles_per_thread = 5;
const size_t max_batch_tile = divide_round_up(batch_size, num_threads * target_tiles_per_thread);
if (max_batch_tile < batch_tile) {
const uint32_t row_tile = prelu->row_tile;
batch_tile = min(batch_tile, divide_round_up(batch_tile, max_batch_tile * row_tile) * row_tile);
}
}
#endif
prelu_op->compute[0].type = xnn_parallelization_type_1d_tile_1d;
prelu_op->compute[0].task_1d_tile_1d = (pthreadpool_task_1d_tile_1d_t) xnn_compute_prelu;
prelu_op->compute[0].range[0] = batch_size;
prelu_op->compute[0].tile[0] = batch_tile;
prelu_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_prelu_nc_f16(
xnn_operator_t prelu_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_prelu_nc(
prelu_op, xnn_operator_type_prelu_nc_f16,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_HALF,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_prelu_nc_f32(
xnn_operator_t prelu_op,
size_t batch_size,
pthreadpool_t threadpool)
{
return reshape_prelu_nc(
prelu_op, xnn_operator_type_prelu_nc_f32,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_prelu_nc(
xnn_operator_t prelu_op,
enum xnn_operator_type expected_operator_type,
const float* input,
float* output)
{
if (prelu_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(prelu_op->type));
return xnn_status_invalid_parameter;
}
switch (prelu_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(prelu_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
prelu_op->context.prelu.x = input;
prelu_op->context.prelu.y = output;
prelu_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_prelu_nc_f16(
xnn_operator_t prelu_op,
const void* input,
void* output)
{
return setup_prelu_nc(
prelu_op, xnn_operator_type_prelu_nc_f16,
input, output);
}
enum xnn_status xnn_setup_prelu_nc_f32(
xnn_operator_t prelu_op,
const float* input,
float* output)
{
return setup_prelu_nc(
prelu_op, xnn_operator_type_prelu_nc_f32,
input, output);
}
| 10,374
| 31.021605
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/resize-bilinear-nchw.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/params.h>
#include <xnnpack/indirection.h>
enum xnn_status create_resize_bilinear2d_nchw(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
enum xnn_operator_type operator_type,
const struct xnn_ibilinear_chw_config* ibilinear_chw_config,
xnn_operator_t* resize_op_out)
{
xnn_operator_t resize_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (input_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with input pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), input_pixel_stride, channels);
goto error;
}
if (output_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with output pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), output_pixel_stride, channels);
goto error;
}
status = xnn_status_out_of_memory;
resize_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (resize_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
resize_op->channels = channels;
resize_op->input_pixel_stride = input_pixel_stride;
resize_op->output_pixel_stride = output_pixel_stride;
resize_op->type = operator_type;
resize_op->flags = flags;
resize_op->ibilinear_chw_config = ibilinear_chw_config;
resize_op->state = xnn_run_state_invalid;
*resize_op_out = resize_op;
return xnn_status_success;
error:
xnn_delete_operator(resize_op);
return status;
}
enum xnn_status xnn_create_resize_bilinear2d_nchw_f16(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_chw_config* ibilinear_chw_config = xnn_init_f16_ibilinear_chw_config();
if (ibilinear_chw_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nchw_f16));
return xnn_status_unsupported_hardware;
}
return create_resize_bilinear2d_nchw(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nchw_f16,
ibilinear_chw_config,
resize_op_out);
}
enum xnn_status xnn_create_resize_bilinear2d_nchw_f32(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_chw_config* ibilinear_chw_config = xnn_init_f32_ibilinear_chw_config();
if (ibilinear_chw_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nchw_f32));
return xnn_status_unsupported_hardware;
}
return create_resize_bilinear2d_nchw(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nchw_f32,
ibilinear_chw_config,
resize_op_out);
}
static enum xnn_status reshape_resize_bilinear2d_nchw(
xnn_operator_t resize_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
uint32_t log2_data_element_size,
uint32_t log2_weight_element_size,
xnn_indirection_init_resize_bilinear2d_chw_fn indirection_init,
size_t num_threads)
{
if (resize_op->type != expected_operator_type) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_parameter;
}
resize_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(expected_operator_type));
return xnn_status_uninitialized;
}
if (input_width <= 1 || input_height <= 1) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu input: input dimensions must be greater than 1",
xnn_operator_type_to_string(expected_operator_type), input_width, input_height);
return xnn_status_invalid_parameter;
}
if (max(input_width, input_height) >= 16777216) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu input: input dimensions must be below 2**24",
xnn_operator_type_to_string(expected_operator_type), input_width, input_height);
return xnn_status_unsupported_parameter;
}
if (output_width == 0 || output_height == 0) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu output: output dimensions must be non-zero",
xnn_operator_type_to_string(expected_operator_type), output_width, output_height);
return xnn_status_invalid_parameter;
}
if (max(output_width, output_height) >= 16777216) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu output: output dimensions must be below 2**24",
xnn_operator_type_to_string(expected_operator_type), output_width, output_height);
return xnn_status_unsupported_parameter;
}
if (batch_size == 0) {
resize_op->state = xnn_run_state_skip;
return xnn_status_success;
}
if (output_height * output_width != resize_op->last_output_height * resize_op->last_output_width) {
const size_t indirection_buffer_size = sizeof(void*) * (output_height * output_width * 4);
const size_t packed_weights_size = (output_height * output_width * 2) << log2_weight_element_size;
const void** indirection_buffer = (const void**) xnn_reallocate_memory(resize_op->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator indirection buffer",
indirection_buffer_size, xnn_operator_type_to_string(expected_operator_type));
return xnn_status_out_of_memory;
}
resize_op->indirection_buffer = indirection_buffer;
xnn_log_debug("allocated %zu bytes for indirection buffer in %s operator",
indirection_buffer_size, xnn_operator_type_to_string(expected_operator_type));
// Note: packed weights must be SIMD-aligned, so we can't use xnn_reallocate_memory
xnn_release_simd_memory(resize_op->packed_weights.pointer);
resize_op->packed_weights.pointer = xnn_allocate_simd_memory(packed_weights_size);
if (resize_op->packed_weights.pointer == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator packed weights",
packed_weights_size, xnn_operator_type_to_string(expected_operator_type));
return xnn_status_out_of_memory;
}
}
const size_t input_pixel_stride_in_bytes = 1 << log2_data_element_size; // Since the layout in CHW the pixels
if (input_height != resize_op->last_input_height ||
input_width != resize_op->last_input_width ||
output_height != resize_op->last_output_height ||
output_width != resize_op->last_output_width)
{
const uint32_t flags = resize_op->flags;
// Set a dummy input first, the actual input offset is calculated in setup when we have the input pointer.
void* dummy_input = (void*) XNN_ALLOCATION_ALIGNMENT;
indirection_init(
input_pixel_stride_in_bytes,
input_height, input_width,
output_height, output_width,
dummy_input, resize_op->indirection_buffer, resize_op->packed_weights.pointer,
!!(flags & XNN_FLAG_ALIGN_CORNERS),
!!(flags & XNN_FLAG_TENSORFLOW_LEGACY_MODE));
resize_op->last_input = dummy_input;
resize_op->last_input_height = input_height;
resize_op->last_input_width = input_width;
resize_op->last_output_height = output_height;
resize_op->last_output_width = output_width;
}
const struct xnn_ibilinear_chw_config* ibilinear_chw = resize_op->ibilinear_chw_config;
// Resize bilinear packed weights can change when the operator is resized, we will not use weights cache.
assert(resize_op->weights_cache == NULL);
resize_op->context.resize_bilinear_chw = (struct resize_bilinear_chw_context) {
.output_pixels = output_height * output_width,
.channels = resize_op->channels,
.input_channel_stride = (input_height * input_width) << log2_data_element_size,
.indirect_input = resize_op->indirection_buffer,
.input_batch_stride = (resize_op->input_pixel_stride * input_height * input_width) << log2_data_element_size,
.packed_weights = resize_op->packed_weights.pointer,
.output_batch_stride = (resize_op->output_pixel_stride * output_height * output_width) << log2_data_element_size,
.output_channel_stride = (output_height * output_width) << log2_data_element_size,
.ukernel = ibilinear_chw->ukernel,
};
#if XNN_TEST_MODE
const size_t output_channel_tile = ibilinear_chw->channel_tile;
#else
size_t output_channel_tile = resize_op->channels;
if (num_threads > 1) {
const size_t target_tiles_per_thread = 4;
const size_t max_channel_tile = divide_round_up(output_channel_tile, num_threads * target_tiles_per_thread);
if (max_channel_tile < output_channel_tile) {
const uint32_t output_channel_subtile = ibilinear_chw->channel_tile;
output_channel_tile =
min(output_channel_tile,
divide_round_up(output_channel_tile, max_channel_tile * output_channel_subtile) * output_channel_subtile);
}
}
#endif
resize_op->compute[0].type = xnn_parallelization_type_2d_tile_1d;
resize_op->compute[0].task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_resize_bilinear_chw;
resize_op->compute[0].range[0] = batch_size;
resize_op->compute[0].range[1] = resize_op->channels;
resize_op->compute[0].tile[0] = output_channel_tile;
resize_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f16(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nchw(
resize_op,
xnn_operator_type_resize_bilinear_nchw_f16,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_HALF,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_HALF,
(xnn_indirection_init_resize_bilinear2d_chw_fn) xnn_indirection_init_resize_bilinear2d_chw_f16,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f32(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nchw(
resize_op,
xnn_operator_type_resize_bilinear_nchw_f32,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
(xnn_indirection_init_resize_bilinear2d_chw_fn) xnn_indirection_init_resize_bilinear2d_chw_f32,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_resize_bilinear2d_nchw(
xnn_operator_t resize_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (resize_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_parameter;
}
switch (resize_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
resize_op->context.resize_bilinear_chw.input_offset =
(size_t) ((uintptr_t) input - (uintptr_t) resize_op->last_input);
resize_op->context.resize_bilinear_chw.output = output;
resize_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_resize_bilinear2d_nchw_f16(
xnn_operator_t resize_op,
const void* input,
void* output)
{
return setup_resize_bilinear2d_nchw(
resize_op,
xnn_operator_type_resize_bilinear_nchw_f16,
input,
output);
}
enum xnn_status xnn_setup_resize_bilinear2d_nchw_f32(
xnn_operator_t resize_op,
const float* input,
float* output)
{
return setup_resize_bilinear2d_nchw(
resize_op,
xnn_operator_type_resize_bilinear_nchw_f32,
input,
output);
}
| 14,486
| 35.127182
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/resize-bilinear-nhwc.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/params.h>
#include <xnnpack/indirection.h>
static enum xnn_status create_resize_bilinear2d_nhwc(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
enum xnn_operator_type operator_type,
const struct xnn_ibilinear_config* ibilinear_config,
xnn_operator_t* resize_op_out)
{
xnn_operator_t resize_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (input_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with input pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), input_pixel_stride, channels);
goto error;
}
if (output_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with output pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), output_pixel_stride, channels);
goto error;
}
status = xnn_status_out_of_memory;
resize_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (resize_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
resize_op->channels = channels;
resize_op->input_pixel_stride = input_pixel_stride;
resize_op->output_pixel_stride = output_pixel_stride;
resize_op->type = operator_type;
resize_op->flags = flags;
resize_op->ibilinear_config = ibilinear_config;
resize_op->state = xnn_run_state_invalid;
*resize_op_out = resize_op;
return xnn_status_success;
error:
xnn_delete_operator(resize_op);
return status;
}
enum xnn_status xnn_create_resize_bilinear2d_nhwc_f16(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_config* ibilinear_config = xnn_init_f16_ibilinear_config();
if (ibilinear_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f16));
return xnn_status_unsupported_hardware;
}
return create_resize_bilinear2d_nhwc(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nhwc_f16,
ibilinear_config,
resize_op_out);
}
enum xnn_status xnn_create_resize_bilinear2d_nhwc_f32(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_config* ibilinear_config = xnn_init_f32_ibilinear_config();
if (ibilinear_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
return xnn_status_unsupported_hardware;
}
return create_resize_bilinear2d_nhwc(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nhwc_f32,
ibilinear_config,
resize_op_out);
}
enum xnn_status xnn_create_resize_bilinear2d_nhwc_s8(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_config* ibilinear_config = xnn_init_s8_ibilinear_config();
assert(ibilinear_config != NULL);
return create_resize_bilinear2d_nhwc(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nhwc_s8,
ibilinear_config,
resize_op_out);
}
enum xnn_status xnn_create_resize_bilinear2d_nhwc_u8(
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* resize_op_out)
{
const struct xnn_ibilinear_config* ibilinear_config = xnn_init_u8_ibilinear_config();
assert(ibilinear_config != NULL);
return create_resize_bilinear2d_nhwc(
channels,
input_pixel_stride,
output_pixel_stride,
flags,
xnn_operator_type_resize_bilinear_nhwc_u8,
ibilinear_config,
resize_op_out);
}
static enum xnn_status reshape_resize_bilinear2d_nhwc(
xnn_operator_t resize_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
uint32_t log2_data_element_size,
uint32_t log2_weight_element_size,
xnn_indirection_init_resize_bilinear2d_hwc_fn indirection_init,
size_t num_threads)
{
if (resize_op->type != expected_operator_type) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_parameter;
}
resize_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(resize_op->type));
return xnn_status_uninitialized;
}
if (input_width == 0 || input_height == 0) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu input: input dimensions must be non-zero",
xnn_operator_type_to_string(resize_op->type), input_width, input_height);
return xnn_status_invalid_parameter;
}
if (max(input_width, input_height) >= 16777216) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu input: input dimensions must be below 2**24",
xnn_operator_type_to_string(resize_op->type), input_width, input_height);
return xnn_status_unsupported_parameter;
}
if (output_width == 0 || output_height == 0) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu output: output dimensions must be non-zero",
xnn_operator_type_to_string(resize_op->type), output_width, output_height);
return xnn_status_invalid_parameter;
}
if (max(output_width, output_height) >= 16777216) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu output: output dimensions must be below 2**24",
xnn_operator_type_to_string(resize_op->type), output_width, output_height);
return xnn_status_unsupported_parameter;
}
if (batch_size == 0) {
resize_op->state = xnn_run_state_skip;
return xnn_status_success;
}
if (output_height * output_width != resize_op->last_output_height * resize_op->last_output_width) {
const size_t indirection_buffer_size = sizeof(void*) * (output_height * output_width * 4);
const size_t packed_weights_size = (output_height * output_width * 2) << log2_weight_element_size;
const void** indirection_buffer = (const void**) xnn_reallocate_memory(resize_op->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator indirection buffer",
indirection_buffer_size, xnn_operator_type_to_string(resize_op->type));
return xnn_status_out_of_memory;
}
resize_op->indirection_buffer = indirection_buffer;
xnn_log_debug("allocated %zu bytes for indirection buffer in %s operator",
indirection_buffer_size, xnn_operator_type_to_string(resize_op->type));
// Note: packed weights must be SIMD-aligned, so we can't use xnn_reallocate_memory
xnn_release_simd_memory(resize_op->packed_weights.pointer);
resize_op->packed_weights.pointer = xnn_allocate_simd_memory(packed_weights_size);
if (resize_op->packed_weights.pointer == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator packed weights",
packed_weights_size, xnn_operator_type_to_string(resize_op->type));
return xnn_status_out_of_memory;
}
}
const size_t input_pixel_stride_in_bytes = resize_op->input_pixel_stride << log2_data_element_size;
if (input_height != resize_op->last_input_height ||
input_width != resize_op->last_input_width ||
output_height != resize_op->last_output_height ||
output_width != resize_op->last_output_width)
{
const uint32_t flags = resize_op->flags;
// Set a dummy input first, the actual input offset is calculated in setup when we have the input pointer.
void* dummy_input = (void*) XNN_ALLOCATION_ALIGNMENT;
indirection_init(
input_pixel_stride_in_bytes,
input_height, input_width,
output_height, output_width,
dummy_input, resize_op->indirection_buffer, resize_op->packed_weights.pointer,
!!(flags & XNN_FLAG_ALIGN_CORNERS),
!!(flags & XNN_FLAG_TENSORFLOW_LEGACY_MODE));
resize_op->last_input = dummy_input;
resize_op->last_input_height = input_height;
resize_op->last_input_width = input_width;
resize_op->last_output_height = output_height;
resize_op->last_output_width = output_width;
}
const struct xnn_ibilinear_config* ibilinear = resize_op->ibilinear_config;
const size_t output_pixel_stride_in_bytes = resize_op->output_pixel_stride << log2_data_element_size;
// Resize bilinear packed weights can change when the operator is resized, we will not use weights cache.
assert(resize_op->weights_cache == NULL);
resize_op->context.resize_bilinear = (struct resize_bilinear_context) {
.scaled_channels = resize_op->channels << log2_data_element_size,
.indirect_input = resize_op->indirection_buffer,
.input_batch_stride = input_pixel_stride_in_bytes * input_height * input_width,
.packed_weights = resize_op->packed_weights.pointer,
.output_pixel_stride = output_pixel_stride_in_bytes,
.output_batch_stride = output_pixel_stride_in_bytes * output_height * output_width,
.log2_wsize = 1 + log2_weight_element_size /* log2(2 * sizeof(weight)) */,
.ukernel = ibilinear->ukernel,
};
const size_t output_size = output_height * output_width;
#if XNN_TEST_MODE
const size_t output_size_tile = ibilinear->pixel_tile;
#else
size_t output_size_tile = output_size;
if (num_threads > 1) {
const size_t target_tiles_per_thread = 5;
const size_t max_output_size_tile = divide_round_up(output_size, num_threads * target_tiles_per_thread);
if (max_output_size_tile < output_size_tile) {
const uint32_t output_size_subtile = ibilinear->pixel_tile;
output_size_tile =
min(output_size_tile,
divide_round_up(output_size_tile, max_output_size_tile * output_size_subtile) * output_size_subtile);
}
}
#endif
resize_op->compute[0].type = xnn_parallelization_type_2d_tile_1d;
resize_op->compute[0].task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_resize_bilinear;
resize_op->compute[0].range[0] = batch_size;
resize_op->compute[0].range[1] = output_size;
resize_op->compute[0].tile[0] = output_size_tile;
resize_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f16(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_f16,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_HALF,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_HALF,
(xnn_indirection_init_resize_bilinear2d_hwc_fn) xnn_indirection_init_resize_bilinear2d_hwc_f16,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f32(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_f32,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
(xnn_indirection_init_resize_bilinear2d_hwc_fn) xnn_indirection_init_resize_bilinear2d_hwc_f32,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_s8(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_s8,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_INT8_T,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_INT16_T,
(xnn_indirection_init_resize_bilinear2d_hwc_fn) xnn_indirection_init_resize_bilinear2d_hwc_q11,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_u8(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t output_height,
size_t output_width,
pthreadpool_t threadpool)
{
return reshape_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_u8,
batch_size,
input_height,
input_width,
output_height,
output_width,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_UINT8_T,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_INT16_T,
(xnn_indirection_init_resize_bilinear2d_hwc_fn) xnn_indirection_init_resize_bilinear2d_hwc_q11,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_resize_bilinear2d_nhwc(
xnn_operator_t resize_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (resize_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_parameter;
}
switch (resize_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(resize_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
resize_op->context.resize_bilinear.input_offset = (size_t) ((uintptr_t) input - (uintptr_t) resize_op->last_input);
resize_op->context.resize_bilinear.output = output;
resize_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f16(
xnn_operator_t resize_op,
const void* input,
void* output)
{
return setup_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_f16,
input,
output);
}
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f32(
xnn_operator_t resize_op,
const float* input,
float* output)
{
return setup_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_f32,
input,
output);
}
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_s8(
xnn_operator_t resize_op,
const int8_t* input,
int8_t* output)
{
return setup_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_s8,
input,
output);
}
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_u8(
xnn_operator_t resize_op,
const uint8_t* input,
uint8_t* output)
{
return setup_resize_bilinear2d_nhwc(
resize_op,
xnn_operator_type_resize_bilinear_nhwc_u8,
input,
output);
}
| 17,269
| 32.862745
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/rope-nthc.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/params.h>
#include <xnnpack/indirection.h>
static enum xnn_status create_rope_nthc(
size_t max_sequence_size,
size_t channels,
const float* weights,
uint32_t flags,
enum xnn_operator_type operator_type,
const struct xnn_cmul_config* config,
xnn_operator_t* rope_op_out)
{
xnn_operator_t rope_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (max_sequence_size == 0) {
xnn_log_error(
"failed to create %s operator with %zu max sequence size: sequence size must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
status = xnn_status_unsupported_parameter;
if (channels % 2 != 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: odd number of channels is not supported",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
status = xnn_status_out_of_memory;
rope_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (rope_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
rope_op->channels = channels;
rope_op->max_sequence_size = max_sequence_size;
rope_op->input2 = weights;
rope_op->type = operator_type;
rope_op->flags = flags;
rope_op->cmul_config = config;
rope_op->state = xnn_run_state_invalid;
*rope_op_out = rope_op;
return xnn_status_success;
error:
xnn_delete_operator(rope_op);
return status;
}
enum xnn_status xnn_create_rope_nthc_f32(
size_t max_sequence_size,
size_t channels,
const float* weights,
uint32_t flags,
xnn_operator_t* rope_op_out)
{
const struct xnn_cmul_config* config = xnn_init_f32_cmul_config();
if (config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_rope_nthc_f32));
return xnn_status_unsupported_hardware;
}
return create_rope_nthc(
max_sequence_size,
channels,
weights,
flags,
xnn_operator_type_rope_nthc_f32,
config,
rope_op_out);
}
static enum xnn_status reshape_rope_nthc(
xnn_operator_t rope_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t sequence_size,
size_t heads,
uint32_t log2_data_element_size,
uint32_t log2_weight_element_size,
size_t num_threads)
{
if (rope_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(rope_op->type));
return xnn_status_invalid_parameter;
}
rope_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(rope_op->type));
return xnn_status_uninitialized;
}
if (sequence_size == 0) {
xnn_log_error(
"failed to setup %s operator with %zu sequence size: sequence size must be non-zero",
xnn_operator_type_to_string(rope_op->type), sequence_size);
return xnn_status_invalid_parameter;
}
if (sequence_size > rope_op->max_sequence_size) {
xnn_log_error(
"failed to setup %s operator with %zu sequence size: sequence size can not exceed the maximum sequence size %zu",
xnn_operator_type_to_string(rope_op->type), sequence_size, rope_op->max_sequence_size);
return xnn_status_invalid_parameter;
}
if (heads == 0) {
xnn_log_error(
"failed to setup %s operator with %zu heads: number of heads must be non-zero",
xnn_operator_type_to_string(rope_op->type), heads);
return xnn_status_invalid_parameter;
}
if (batch_size == 0) {
rope_op->state = xnn_run_state_skip;
return xnn_status_success;
}
const struct xnn_cmul_config* config = rope_op->cmul_config;
rope_op->context.rope = (struct rope_context) {
.scaled_channels = (rope_op->channels / 2) << log2_data_element_size,
.batch_stride = (sequence_size * heads * rope_op->channels) << log2_data_element_size,
.head_stride = rope_op->channels << log2_data_element_size,
.sequence_stride = (heads * rope_op->channels) << log2_data_element_size,
.weights = rope_op->input2,
.vcmul = config->ukernel,
};
rope_op->compute[0].type = xnn_parallelization_type_3d;
rope_op->compute[0].task_3d = (pthreadpool_task_3d_t) xnn_compute_rope;
rope_op->compute[0].range[0] = batch_size;
rope_op->compute[0].range[1] = heads;
rope_op->compute[0].range[2] = sequence_size;
rope_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_rope_nthc_f32(
xnn_operator_t rope_op,
size_t batch_size,
size_t sequence_size,
size_t heads,
pthreadpool_t threadpool)
{
return reshape_rope_nthc(
rope_op, xnn_operator_type_rope_nthc_f32,
batch_size, sequence_size, heads,
/*log2_data_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
/*log2_weight_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_rope_nthc(
xnn_operator_t rope_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (rope_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(rope_op->type));
return xnn_status_invalid_parameter;
}
switch (rope_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(rope_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
rope_op->context.rope.input = input;
rope_op->context.rope.output = output;
rope_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_rope_nthc_f32(
xnn_operator_t rope_op,
const float* input,
float* output)
{
return setup_rope_nthc(
rope_op, xnn_operator_type_rope_nthc_f32,
input, output);
}
| 7,637
| 29.430279
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/slice-nd.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/log.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/normalization.h>
#include <xnnpack/operator.h>
#include <xnnpack/config.h>
static void init_slice_nd(
uint32_t flags,
enum xnn_operator_type operator_type,
const struct xnn_unary_elementwise_config* copy_config,
xnn_operator_t slice_op)
{
slice_op->type = operator_type;
slice_op->flags = flags;
slice_op->copy_config = copy_config;
slice_op->state = xnn_run_state_invalid;
}
static enum xnn_status create_slice_nd(
uint32_t flags,
enum xnn_operator_type operator_type,
xnn_operator_t* slice_op_out)
{
xnn_operator_t slice_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error(
"failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_unsupported_hardware;
const struct xnn_unary_elementwise_config* copy_config = xnn_init_xx_copy_config();
if (copy_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_out_of_memory;
slice_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (slice_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
init_slice_nd(
flags,
operator_type,
copy_config,
slice_op);
*slice_op_out = slice_op;
return xnn_status_success;
error:
xnn_delete_operator(slice_op);
return status;
}
enum xnn_status xnn_create_slice_nd_x8(
uint32_t flags,
xnn_operator_t *slice_op_out)
{
return create_slice_nd(flags, xnn_operator_type_slice_nd_x8, slice_op_out);
}
enum xnn_status xnn_create_slice_nd_x16(
uint32_t flags,
xnn_operator_t *slice_op_out)
{
return create_slice_nd(flags, xnn_operator_type_slice_nd_x16, slice_op_out);
}
enum xnn_status xnn_create_slice_nd_x32(
uint32_t flags,
xnn_operator_t *slice_op_out)
{
return create_slice_nd(flags, xnn_operator_type_slice_nd_x32, slice_op_out);
}
static enum xnn_status reshape_slice_nd(
xnn_operator_t slice_op,
enum xnn_operator_type expected_operator_type,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
uint32_t log2_element_size,
size_t num_threads)
{
if (slice_op->type != expected_operator_type) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(slice_op->type));
return xnn_status_invalid_parameter;
}
slice_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(slice_op->type));
return xnn_status_uninitialized;
}
if (num_dims == 0) {
xnn_log_error(
"failed to create %s operator with %zu num_dims: num_dims must be non-zero",
xnn_operator_type_to_string(slice_op->type), num_dims);
return xnn_status_unsupported_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to create %s operator with %zu num_dims: num_dims must be <= %d",
xnn_operator_type_to_string(slice_op->type), num_dims, XNN_MAX_TENSOR_DIMS);
return xnn_status_unsupported_parameter;
}
for (size_t i = 0; i < num_dims; i++) {
if (input_shape[i] == 0) {
xnn_log_error(
"failed to reshape %s operator: input shape dimension #%zu is zero",
xnn_operator_type_to_string(slice_op->type), i);
return xnn_status_invalid_parameter;
}
if (offsets[i] >= input_shape[i]) {
xnn_log_error(
"failed to create %s operator with %zu offsets[%zu]: 0 <= offset < %zu",
xnn_operator_type_to_string(slice_op->type), offsets[i], i, input_shape[i]);
return xnn_status_unsupported_parameter;
}
if (sizes[i] == 0 || sizes[i] > input_shape[i]) {
xnn_log_error(
"failed to create %s operator with %zu sizes[%zu]: 0 < size <= %zu",
xnn_operator_type_to_string(slice_op->type), sizes[i], i, input_shape[i]);
return xnn_status_unsupported_parameter;
}
if (offsets[i] + sizes[i] > input_shape[i]) {
xnn_log_error(
"failed to create %s operator with %zu offsets[%zu] and %zu sizes[%zu]: offset + size <= %zu",
xnn_operator_type_to_string(slice_op->type), offsets[i], i, sizes[i], i, input_shape[i]);
return xnn_status_unsupported_parameter;
}
}
const struct xnn_unary_elementwise_config* copy_config = slice_op->copy_config;
size_t normalized_offsets[XNN_MAX_TENSOR_DIMS];
size_t normalized_input_shape[XNN_MAX_TENSOR_DIMS];
size_t normalized_output_shape[XNN_MAX_TENSOR_DIMS];
size_t num_normalized_dims;
xnn_normalize_slice(
num_dims,
offsets,
sizes,
input_shape,
normalized_offsets,
normalized_input_shape,
normalized_output_shape,
&num_normalized_dims);
assert(num_normalized_dims <= XNN_MAX_TENSOR_DIMS);
slice_op->context.slice = (struct slice_context) {
.ukernel = copy_config->ukernel,
.num_normalized_dims = num_normalized_dims,
};
// TODO(b/246969669): move strides calculation into normalization to simplify code here.
for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS; i++) {
slice_op->context.slice.offsets[i] = normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i];
}
slice_op->context.slice.offsets[0] <<= log2_element_size;
size_t input_stride = normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1];
size_t output_stride = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1];
for (size_t i = 1; i < XNN_MAX_TENSOR_DIMS; i++) {
slice_op->context.slice.input_stride[i - 1] = input_stride << log2_element_size;
slice_op->context.slice.output_stride[i - 1] = output_stride << log2_element_size;
input_stride *= normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i];
output_stride *= normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i];
}
slice_op->context.slice.contiguous_size = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1] << log2_element_size;
switch (num_normalized_dims) {
case 1:
case 2:
slice_op->compute[0].type = xnn_parallelization_type_1d;
slice_op->compute[0].task_1d = (pthreadpool_task_1d_t)xnn_compute_slice_1d;
slice_op->compute[0].range[0] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 2];
break;
case 3:
slice_op->compute[0].type = xnn_parallelization_type_2d;
slice_op->compute[0].task_2d = (pthreadpool_task_2d_t) xnn_compute_slice_2d;
slice_op->compute[0].range[0] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 3];
slice_op->compute[0].range[1] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 2];
break;
case 4:
slice_op->compute[0].type = xnn_parallelization_type_3d;
slice_op->compute[0].task_3d = (pthreadpool_task_3d_t) xnn_compute_slice_3d;
slice_op->compute[0].range[0] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 4];
slice_op->compute[0].range[1] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 3];
slice_op->compute[0].range[2] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 2];
break;
case 5:
slice_op->compute[0].type = xnn_parallelization_type_4d;
slice_op->compute[0].task_4d = (pthreadpool_task_4d_t) xnn_compute_slice_4d;
slice_op->compute[0].range[0] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 5];
slice_op->compute[0].range[1] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 4];
slice_op->compute[0].range[2] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 3];
slice_op->compute[0].range[3] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 2];
break;
case 6:
// TODO(b/246969669): write normalized_output_shape in reverse order to simplify code here.
slice_op->compute[0].type = xnn_parallelization_type_5d;
slice_op->compute[0].task_5d = (pthreadpool_task_5d_t) xnn_compute_slice_5d;
slice_op->compute[0].range[0] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 6];
slice_op->compute[0].range[1] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 5];
slice_op->compute[0].range[2] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 4];
slice_op->compute[0].range[3] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 3];
slice_op->compute[0].range[4] = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 2];
break;
default:
XNN_UNREACHABLE;
}
slice_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_reshape_slice_nd_x8(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool)
{
return reshape_slice_nd(
slice_op, xnn_operator_type_slice_nd_x8,
num_dims, input_shape, offsets, sizes,
0 /* log2(element size) */,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_slice_nd_x16(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool)
{
return reshape_slice_nd(
slice_op, xnn_operator_type_slice_nd_x16,
num_dims, input_shape, offsets, sizes,
1 /* log2(element size) */,
pthreadpool_get_threads_count(threadpool));
}
enum xnn_status xnn_reshape_slice_nd_x32(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool)
{
return reshape_slice_nd(
slice_op, xnn_operator_type_slice_nd_x32,
num_dims, input_shape, offsets, sizes,
2 /* log2(element size) */,
pthreadpool_get_threads_count(threadpool));
}
static enum xnn_status setup_slice_nd(
xnn_operator_t slice_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (slice_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(slice_op->type));
return xnn_status_invalid_parameter;
}
switch (slice_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(slice_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
slice_op->context.slice.input = input;
slice_op->context.slice.output = output;
slice_op->context.slice.input =
(void*) ((uintptr_t) slice_op->context.slice.input + slice_op->context.slice.offsets[0]);
// Pre-calculate offsets into input pointer.
for (size_t i = 1; i < slice_op->context.slice.num_normalized_dims; i++) {
slice_op->context.slice.input =
(void*) ((uintptr_t) slice_op->context.slice.input +
slice_op->context.slice.offsets[i] * slice_op->context.slice.input_stride[i-1]);
}
slice_op->state = xnn_run_state_ready;
return xnn_status_success;
}
enum xnn_status xnn_setup_slice_nd_x8(
xnn_operator_t slice_op,
const void* input,
void* output)
{
return setup_slice_nd(
slice_op, xnn_operator_type_slice_nd_x8,
input, output);
}
enum xnn_status xnn_setup_slice_nd_x16(
xnn_operator_t slice_op,
const void* input,
void* output)
{
return setup_slice_nd(
slice_op, xnn_operator_type_slice_nd_x16,
input, output);
}
enum xnn_status xnn_setup_slice_nd_x32(
xnn_operator_t slice_op,
const void* input,
void* output)
{
return setup_slice_nd(
slice_op, xnn_operator_type_slice_nd_x32,
input, output);
}
static enum xnn_status xnn_run_slice_nd(
enum xnn_operator_type operator_type,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
const void* input,
void* output,
uint32_t log2_element_size,
uint32_t flags,
pthreadpool_t threadpool)
{
struct xnn_operator slice_op;
memset(&slice_op, 0, sizeof(slice_op));
const struct xnn_unary_elementwise_config* copy_config = xnn_init_xx_copy_config();
if (copy_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(operator_type));
return xnn_status_unsupported_hardware;
}
init_slice_nd(
flags,
operator_type,
copy_config,
&slice_op);
enum xnn_status status = reshape_slice_nd(
&slice_op, operator_type,
num_dims, input_shape, offsets, sizes,
log2_element_size,
pthreadpool_get_threads_count(threadpool));
if (status != xnn_status_success){
return status;
}
status = setup_slice_nd(
&slice_op, operator_type,
input, output);
if (status != xnn_status_success){
return status;
}
return xnn_run_operator(&slice_op, threadpool);
}
enum xnn_status xnn_run_slice_nd_x32(
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
const void* input,
void* output,
uint32_t flags,
pthreadpool_t threadpool)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error(
"failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(xnn_operator_type_slice_nd_x32));
return xnn_status_uninitialized;
}
return xnn_run_slice_nd(
xnn_operator_type_slice_nd_x32,
num_dims, input_shape, offsets, sizes,
input, output,
2 /* log2(element size) */,
flags,
threadpool);
}
| 14,596
| 31.802247
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/softmax-nc.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <fp16/fp16.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/microparams-init.h>
enum xnn_status xnn_create_softmax_nc_qu8(
size_t channels,
size_t input_stride,
size_t output_stride,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint32_t flags,
xnn_operator_t* softmax_op_out)
{
xnn_operator_t softmax_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8));
goto error;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), channels);
goto error;
}
if (input_stride < channels) {
xnn_log_error(
"failed to create %s operator with input element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), input_stride, channels);
goto error;
}
if (output_stride < channels) {
xnn_log_error(
"failed to create %s operator with output element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_stride, channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
xnn_log_error(
"failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_scale);
goto error;
}
status = xnn_status_unsupported_parameter;
if (output_scale != 0x1.0p-8f) {
xnn_log_error(
"failed to create %s operator with %.7g output scale: only output scale of 1/256 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_scale);
goto error;
}
if (output_zero_point != 0) {
xnn_log_error(
"failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8), output_zero_point);
goto error;
}
status = xnn_status_out_of_memory;
softmax_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (softmax_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8));
goto error;
}
softmax_op->lookup_table = xnn_allocate_simd_memory(256 * sizeof(uint32_t));
if (softmax_op->lookup_table == NULL) {
xnn_log_error(
"failed to allocate 256 bytes for %s operator lookup table",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8));
goto error;
}
uint32_t* lookup_table = softmax_op->lookup_table;
const double qscale = fmin(((double) UINT32_MAX) / (double) channels, 8388607.0);
for (int32_t i = 0; i < 256; i++) {
const double scaled_exp_xi = qscale * exp((double) (i - 255) * (double) input_scale);
lookup_table[(uint32_t) i] = (uint32_t) lrint(scaled_exp_xi);
}
const struct xnn_lut32norm_config* lut32norm_config = xnn_init_u8_lut32norm_config();
assert(lut32norm_config != NULL);
const struct xnn_rmax_config* rmax_config = xnn_init_u8_rmax_config();
assert(rmax_config != NULL);
softmax_op->channels = channels;
softmax_op->input_pixel_stride = input_stride;
softmax_op->output_pixel_stride = output_stride;
softmax_op->type = xnn_operator_type_softmax_nc_qu8;
softmax_op->flags = flags;
softmax_op->lut32norm_config = lut32norm_config;
softmax_op->rmax_config = rmax_config;
softmax_op->state = xnn_run_state_invalid;
*softmax_op_out = softmax_op;
return xnn_status_success;
error:
xnn_delete_operator(softmax_op);
return status;
}
enum xnn_status xnn_reshape_softmax_nc_qu8(
xnn_operator_t softmax_op,
size_t batch_size,
pthreadpool_t threadpool)
{
if (softmax_op->type != xnn_operator_type_softmax_nc_qu8) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_qu8),
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_parameter;
}
softmax_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_qu8));
return xnn_status_uninitialized;
}
if (batch_size == 0) {
softmax_op->state = xnn_run_state_skip;
return xnn_status_success;
}
softmax_op->batch_size = batch_size;
softmax_op->context.u8_softmax = (struct u8_softmax_context) {
.n = softmax_op->channels,
.x_stride = softmax_op->input_pixel_stride * sizeof(uint8_t),
.t = softmax_op->lookup_table,
.y_stride = softmax_op->output_pixel_stride * sizeof(uint8_t),
.rmax_ukernel = softmax_op->rmax_config->rmax.u8,
.lut_norm_ukernel = softmax_op->lut32norm_config->lut32norm,
};
softmax_op->compute[0].type = xnn_parallelization_type_1d;
softmax_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_u8_softmax;
softmax_op->compute[0].range[0] = batch_size;
softmax_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
enum xnn_status xnn_setup_softmax_nc_qu8(
xnn_operator_t softmax_op,
const uint8_t* input,
uint8_t* output)
{
if (softmax_op->type != xnn_operator_type_softmax_nc_qu8) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_qu8),
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_parameter;
}
switch (softmax_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
softmax_op->context.u8_softmax.x = input;
softmax_op->context.u8_softmax.y = output;
softmax_op->state = xnn_run_state_ready;
return xnn_status_success;
}
static enum xnn_status create_softmax_nc_floating_point(
size_t channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
const struct xnn_raddstoreexpminusmax_config* raddstoreexpminusmax_config,
const struct xnn_rmax_config* rmax_config,
const struct xnn_binary_elementwise_config* vmul_config,
enum xnn_operator_type operator_type,
xnn_operator_t* softmax_op_out)
{
xnn_operator_t softmax_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(operator_type));
goto error;
}
status = xnn_status_invalid_parameter;
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(operator_type), channels);
goto error;
}
if (input_stride < channels) {
xnn_log_error(
"failed to create %s operator with input element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), input_stride, channels);
goto error;
}
if (output_stride < channels) {
xnn_log_error(
"failed to create %s operator with output element stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(operator_type), output_stride, channels);
goto error;
}
status = xnn_status_out_of_memory;
softmax_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (softmax_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
goto error;
}
softmax_op->channels = channels;
softmax_op->input_pixel_stride = input_stride;
softmax_op->output_pixel_stride = output_stride;
softmax_op->type = operator_type;
softmax_op->flags = flags;
softmax_op->raddstoreexpminusmax_config = raddstoreexpminusmax_config;
softmax_op->rmax_config = rmax_config;
softmax_op->vmul_config = vmul_config;
softmax_op->state = xnn_run_state_invalid;
*softmax_op_out = softmax_op;
return xnn_status_success;
error:
xnn_delete_operator(softmax_op);
return status;
}
enum xnn_status xnn_create_softmax_nc_f16(
size_t channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* softmax_op_out)
{
const struct xnn_raddstoreexpminusmax_config* raddstoreexpminusmax_config =
xnn_init_f16_raddstoreexpminusmax_config();
if (raddstoreexpminusmax_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_f16));
return xnn_status_unsupported_hardware;
}
const struct xnn_rmax_config* rmax_config = xnn_init_f16_rmax_config();
if (rmax_config == NULL) {
xnn_log_error("failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_f16));
return xnn_status_unsupported_hardware;
}
const struct xnn_binary_elementwise_config* vmul_config = xnn_init_f16_vmul_config();
if (vmul_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_multiply_nd_f16));
return xnn_status_unsupported_hardware;
}
return create_softmax_nc_floating_point(
channels, input_stride, output_stride,
flags,
raddstoreexpminusmax_config,
rmax_config,
vmul_config,
xnn_operator_type_softmax_nc_f16,
softmax_op_out);
}
enum xnn_status xnn_create_softmax_nc_f32(
size_t channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* softmax_op_out)
{
const struct xnn_raddstoreexpminusmax_config* raddstoreexpminusmax_config =
xnn_init_f32_raddstoreexpminusmax_config();
if (raddstoreexpminusmax_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_f32));
return xnn_status_unsupported_hardware;
}
const struct xnn_rmax_config* rmax_config = xnn_init_f32_rmax_config();
if (rmax_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_softmax_nc_f32));
return xnn_status_unsupported_hardware;
}
const struct xnn_binary_elementwise_config* vmul_config = xnn_init_f32_vmul_config();
if (vmul_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_multiply_nd_f32));
return xnn_status_unsupported_hardware;
}
return create_softmax_nc_floating_point(
channels, input_stride, output_stride,
flags,
raddstoreexpminusmax_config,
rmax_config,
vmul_config,
xnn_operator_type_softmax_nc_f32,
softmax_op_out);
}
static enum xnn_status reshape_softmax_nc_floating_point(
xnn_operator_t softmax_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
uint32_t log2_element_size,
xnn_rmax_ukernel_fn rmax,
const struct xnn_raddstoreexpminusmax_config raddstoreexpminusmax[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_binary_elementwise_config* vmul,
xnn_compute_reciprocal_fn compute_reciprocal,
const void* expminus_params,
size_t expminus_params_size,
const void* minmax_params,
size_t minmax_params_size)
{
if (vmul == NULL) {
return xnn_status_unsupported_hardware;
}
if (softmax_op->type != expected_operator_type) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_parameter;
}
softmax_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(expected_operator_type));
return xnn_status_uninitialized;
}
if (batch_size == 0) {
softmax_op->state = xnn_run_state_skip;
return xnn_status_success;
}
softmax_op->batch_size = batch_size;
softmax_op->context.floating_point_softmax = (struct floating_point_softmax_context) {
.n = softmax_op->channels << log2_element_size,
.x_stride = softmax_op->input_pixel_stride << log2_element_size,
.y_stride = softmax_op->output_pixel_stride << log2_element_size,
.rmax_ukernel = rmax,
.raddstoreexpminusmax_ukernel = raddstoreexpminusmax->ukernel,
.compute_reciprocal = compute_reciprocal,
.vmulc_ukernel = vmul->minmax.opc_ukernel,
};
if (vmul->linear.opc_ukernel != NULL) {
softmax_op->context.floating_point_softmax.vmulc_ukernel = vmul->linear.opc_ukernel;
};
memcpy(&softmax_op->context.floating_point_softmax.expminus_params, expminus_params, expminus_params_size);
memcpy(&softmax_op->context.floating_point_softmax.minmax_params, minmax_params, minmax_params_size);
softmax_op->compute[0].type = xnn_parallelization_type_1d;
softmax_op->compute[0].task_1d = (pthreadpool_task_1d_t) xnn_compute_floating_point_softmax;
softmax_op->compute[0].range[0] = batch_size;
softmax_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
static enum xnn_status setup_softmax_nc_floating_point(
xnn_operator_t softmax_op,
enum xnn_operator_type expected_operator_type,
const void* input,
void* output)
{
if (softmax_op->type != expected_operator_type) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(expected_operator_type),
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_parameter;
}
switch (softmax_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(softmax_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
softmax_op->context.floating_point_softmax.x = input;
softmax_op->context.floating_point_softmax.y = output;
softmax_op->state = xnn_run_state_ready;
return xnn_status_success;
}
static void compute_reciprocal_f16(
const uint16_t input[XNN_MIN_ELEMENTS(1)],
uint16_t output[XNN_MIN_ELEMENTS(1)])
{
*output = fp16_ieee_from_fp32_value(1.0f / fp16_ieee_to_fp32_value(*input));
}
enum xnn_status xnn_setup_softmax_nc_f16(
xnn_operator_t softmax_op,
const void* input,
void* output)
{
return setup_softmax_nc_floating_point(
softmax_op, xnn_operator_type_softmax_nc_f16,
input, output);
}
static void compute_reciprocal_f32(
const float input[XNN_MIN_ELEMENTS(1)],
float output[XNN_MIN_ELEMENTS(1)])
{
*output = 1.0f / *input;
}
enum xnn_status xnn_setup_softmax_nc_f32(
xnn_operator_t softmax_op,
const float* input,
float* output)
{
return setup_softmax_nc_floating_point(
softmax_op, xnn_operator_type_softmax_nc_f32,
input, output);
}
enum xnn_status xnn_reshape_softmax_nc_f16(
xnn_operator_t softmax_op,
size_t batch_size,
pthreadpool_t threadpool)
{
union xnn_f16_expminus_params expminus_params;
if (softmax_op->raddstoreexpminusmax_config->init.f16 != NULL) {
softmax_op->raddstoreexpminusmax_config->init.f16(&expminus_params);
}
const struct xnn_binary_elementwise_config* f16_vmul_config = softmax_op->vmul_config;
union xnn_f16_minmax_params minmax_params;
if (f16_vmul_config->init.f16_minmax != NULL) {
f16_vmul_config->init.f16_minmax(&minmax_params, UINT16_C(0xFC00), UINT16_C(0x7C00));
}
return reshape_softmax_nc_floating_point(
softmax_op, xnn_operator_type_softmax_nc_f16,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_HALF,
softmax_op->rmax_config->rmax.f16, softmax_op->raddstoreexpminusmax_config, f16_vmul_config,
(xnn_compute_reciprocal_fn) compute_reciprocal_f16,
&expminus_params, sizeof(expminus_params),
&minmax_params, sizeof(minmax_params));
}
enum xnn_status xnn_reshape_softmax_nc_f32(
xnn_operator_t softmax_op,
size_t batch_size,
pthreadpool_t threadpool)
{
const struct xnn_binary_elementwise_config* f32_vmul_config = softmax_op->vmul_config;
union xnn_f32_expminus_params expminus_params;
if (softmax_op->raddstoreexpminusmax_config->init.f32 != NULL) {
softmax_op->raddstoreexpminusmax_config->init.f32(&expminus_params);
}
union xnn_f32_minmax_params minmax_params;
if (f32_vmul_config->init.f32_minmax != NULL) {
f32_vmul_config->init.f32_minmax(&minmax_params, -INFINITY, INFINITY);
}
return reshape_softmax_nc_floating_point(
softmax_op, xnn_operator_type_softmax_nc_f32,
batch_size,
/*log2_element_size=*/XNN_LOG2_SIZEOF_FLOAT,
softmax_op->rmax_config->rmax.f16, softmax_op->raddstoreexpminusmax_config, f32_vmul_config,
(xnn_compute_reciprocal_fn) compute_reciprocal_f32,
&expminus_params, sizeof(expminus_params),
&minmax_params, sizeof(minmax_params));
}
| 19,442
| 33.657754
| 113
|
c
|
XNNPACK
|
XNNPACK-master/src/operators/unpooling-nhwc.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/config.h>
#include <xnnpack/operator.h>
#include <xnnpack/operator-utils.h>
#include <xnnpack/operator-type.h>
#include <xnnpack/log.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/params.h>
#include <xnnpack/indirection.h>
enum xnn_status xnn_create_unpooling2d_nhwc_x32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* unpooling_op_out)
{
xnn_operator_t unpooling_op = NULL;
enum xnn_status status = xnn_status_uninitialized;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
goto error;
}
status = xnn_status_invalid_parameter;
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
xnn_log_error(
"failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
"pooling size dimensions must be non-zero",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), pooling_width, pooling_height);
goto error;
}
if (pooling_size == 1) {
xnn_log_error(
"failed to create %s operator with 1 pooling element: 1x1 unpooling is meaningless",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
goto error;
}
if (channels == 0) {
xnn_log_error(
"failed to create %s operator with %zu channels: number of channels must be non-zero",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), channels);
goto error;
}
if (input_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with input pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), input_pixel_stride, channels);
goto error;
}
if (output_pixel_stride < channels) {
xnn_log_error(
"failed to create %s operator with output pixel stride of %zu: "
"stride must be at least as large as the number of channels (%zu)",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), output_pixel_stride, channels);
goto error;
}
status = xnn_status_out_of_memory;
unpooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
if (unpooling_op == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator descriptor",
sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
goto error;
}
const struct xnn_unpool_config* unpool_config = xnn_init_x32_unpool_config();
if (unpool_config == NULL) {
xnn_log_error(
"failed to create %s operator: unsupported hardware configuration",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
return xnn_status_unsupported_hardware;
}
unpooling_op->padding_top = input_padding_top;
unpooling_op->padding_right = input_padding_right;
unpooling_op->padding_bottom = input_padding_bottom;
unpooling_op->padding_left = input_padding_left;
unpooling_op->kernel_height = pooling_height;
unpooling_op->kernel_width = pooling_width;
unpooling_op->channels = channels;
unpooling_op->input_pixel_stride = input_pixel_stride;
unpooling_op->output_pixel_stride = output_pixel_stride;
unpooling_op->type = xnn_operator_type_unpooling_nhwc_x32;
unpooling_op->flags = flags;
unpooling_op->unpool_config = unpool_config;
unpooling_op->state = xnn_run_state_invalid;
*unpooling_op_out = unpooling_op;
return xnn_status_success;
error:
xnn_delete_operator(unpooling_op);
return status;
}
enum xnn_status xnn_reshape_unpooling2d_nhwc_x32(
xnn_operator_t unpooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{
if (unpooling_op->type != xnn_operator_type_unpooling_nhwc_x32) {
xnn_log_error("failed to reshape operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32),
xnn_operator_type_to_string(unpooling_op->type));
return xnn_status_invalid_parameter;
}
unpooling_op->state = xnn_run_state_invalid;
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to reshape %s operator: XNNPACK is not initialized",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
return xnn_status_uninitialized;
}
if (input_width == 0 || input_height == 0) {
xnn_log_error(
"failed to reshape %s operator with %zux%zu input: input dimensions must be non-zero",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), input_width, input_height);
return xnn_status_invalid_parameter;
}
if (batch_size == 0) {
unpooling_op->state = xnn_run_state_skip;
return xnn_status_success;
}
unpooling_op->batch_size = batch_size;
unpooling_op->input_height = input_height;
unpooling_op->input_width = input_width;
unpooling_op->output_height = xnn_compute_unpooling_output_dimension(
input_height, unpooling_op->padding_top + unpooling_op->padding_bottom,
unpooling_op->kernel_height);
unpooling_op->output_width = xnn_compute_unpooling_output_dimension(
input_width, unpooling_op->padding_left + unpooling_op->padding_right,
unpooling_op->kernel_width);
if (output_height_out != NULL) {
*output_height_out = unpooling_op->output_height;
}
if (output_width_out != NULL) {
*output_width_out = unpooling_op->output_width;
}
// Dummy output for initializing indirection buffers. Output needs to be earlier output due to valid_batch_size
// optimization, where the smaller batch sizes are not re-initialized if we setup with different output.
unpooling_op->output = unpooling_op->last_output;
size_t valid_batch_size = 0;
if (input_height == unpooling_op->last_input_height &&
input_width == unpooling_op->last_input_width)
{
valid_batch_size = unpooling_op->valid_batch_size;
if (batch_size <= valid_batch_size) {
unpooling_op->compute[0].range[0] = batch_size * input_height;
unpooling_op->state = xnn_run_state_needs_setup;
return xnn_status_success;
}
}
const size_t pooling_height = unpooling_op->kernel_height;
const size_t pooling_width = unpooling_op->kernel_width;
const size_t pooling_size = pooling_height * pooling_width;
const size_t indirection_buffer_size = sizeof(void*) * (batch_size * input_height * input_width * pooling_size);
const void** indirection_buffer = (const void**) xnn_reallocate_memory(unpooling_op->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
xnn_log_error(
"failed to allocate %zu bytes for %s operator indirection buffer",
indirection_buffer_size, xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
return xnn_status_out_of_memory;
}
unpooling_op->indirection_buffer = indirection_buffer;
xnn_log_debug("allocated %zu bytes for indirection buffer in %s operator",
indirection_buffer_size, xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
xnn_indirection_init_unpool2d(unpooling_op, valid_batch_size, /*log2_element_size=*/XNN_LOG2_SIZEOF_FLOAT);
const size_t channels = unpooling_op->channels;
const size_t input_pixel_stride_in_bytes = unpooling_op->input_pixel_stride * sizeof(float);
unpooling_op->context.unpooling = (struct unpooling_context) {
.input_height_stride = input_width * input_pixel_stride_in_bytes,
.input_width_stride = input_pixel_stride_in_bytes,
.index_height_stride = input_width * channels * sizeof(uint32_t),
.index_width_stride = channels * sizeof(uint32_t),
.indirect_output = indirection_buffer,
.indirect_output_height_stride = input_width * pooling_size * sizeof(void*),
.indirect_output_width_stride = pooling_size * sizeof(void*),
.pooling_size = pooling_size,
.channels = channels,
.fill_value = 0,
.ukernel = unpooling_op->unpool_config->unpool,
};
unpooling_op->compute[0].type = xnn_parallelization_type_2d;
unpooling_op->compute[0].task_2d = (pthreadpool_task_2d_t) xnn_compute_unpooling;
unpooling_op->compute[0].range[0] = batch_size * input_height;
unpooling_op->compute[0].range[1] = input_width;
unpooling_op->state = xnn_run_state_needs_setup;
unpooling_op->last_input_height = input_height;
unpooling_op->last_input_width = input_width;
unpooling_op->valid_batch_size = max(valid_batch_size, batch_size);
return xnn_status_success;
}
enum xnn_status xnn_setup_unpooling2d_nhwc_x32(
xnn_operator_t unpooling_op,
const void* input,
const uint32_t* index,
void* output)
{
if (unpooling_op->type != xnn_operator_type_unpooling_nhwc_x32) {
xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32),
xnn_operator_type_to_string(unpooling_op->type));
return xnn_status_invalid_parameter;
}
switch (unpooling_op->state) {
case xnn_run_state_skip:
return xnn_status_success;
case xnn_run_state_invalid:
xnn_log_error(
"failed to setup %s operator: operator has not been reshaped yet",
xnn_operator_type_to_string(unpooling_op->type));
return xnn_status_invalid_state;
case xnn_run_state_needs_setup:
// Operator has been reshaped, but not setup, continue with setup.
case xnn_run_state_ready:
// Operator has been reshaped, and we are setting up with different pointers.
break;
}
const size_t pooling_height = unpooling_op->kernel_height;
const size_t pooling_width = unpooling_op->kernel_width;
const size_t pooling_size = pooling_height * pooling_width;
const size_t batch_size = unpooling_op->valid_batch_size;
const size_t input_height = unpooling_op->input_height;
const size_t input_width = unpooling_op->input_width;
const size_t indirection_buffer_num_elements = batch_size * input_height * input_width * pooling_size;
for (size_t i = 0; i < indirection_buffer_num_elements; i++) {
unpooling_op->context.unpooling.indirect_output[i] =
(void*) ((uintptr_t) unpooling_op->context.unpooling.indirect_output[i] +
((uintptr_t) output - (uintptr_t) unpooling_op->last_output));
}
unpooling_op->context.unpooling.input = input;
unpooling_op->context.unpooling.index = index;
unpooling_op->state = xnn_run_state_ready;
unpooling_op->last_output = output;
return xnn_status_success;
}
| 11,335
| 36.913043
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x16c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc0x89AB = vmulq_s32(vacc0x89AB, vzp0);
vacc0xCDEF = vmulq_s32(vacc0xCDEF, vzp0);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vout0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
vout0x89AB = vmulq_f32(vout0x89AB, vscale0);
vout0xCDEF = vmulq_f32(vout0xCDEF, vscale0);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias89AB = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbiasCDEF = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout0x89AB = vaddq_f32(vout0x89AB, vbias89AB);
vout0xCDEF = vaddq_f32(vout0xCDEF, vbiasCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout0x89AB = vmaxq_f32(vout0x89AB, vmin);
vout0x89AB = vminq_f32(vout0x89AB, vmax);
vout0xCDEF = vmaxq_f32(vout0xCDEF, vmin);
vout0xCDEF = vminq_f32(vout0xCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
vst1q_f32(&c0[8], vout0x89AB);
vst1q_f32(&c0[12], vout0xCDEF);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x89AB;
vst1q_f32(c0, vout0x4567); c0 += 4;
vout0x4567 = vout0xCDEF;
}
if (nc & 4) {
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c0, vout0x01); c0 += 2;
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,671
| 38.714286
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int32_t vzp0 = quantization_params[0].zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
const float vscale0 = quantization_params[0].scale;
vout0x0 *= vscale0;
vout0x1 *= vscale0;
const float vbias0 = unaligned_indexed_load_f32(w, 0);
const float vbias1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
if XNN_LIKELY(nc >= 2) {
c0[0] = vout0x0;
c0[1] = vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,445
| 24.747368
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int32_t vzp0 = quantization_params[0].zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc0x2 *= vzp0;
vacc0x3 *= vzp0;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout0x2 = (float) vacc0x2;
float vout0x3 = (float) vacc0x3;
const float vscale0 = quantization_params[0].scale;
vout0x0 *= vscale0;
vout0x1 *= vscale0;
vout0x2 *= vscale0;
vout0x3 *= vscale0;
const float vbias0 = ((const float*) w)[0];
const float vbias1 = ((const float*) w)[1];
const float vbias2 = ((const float*) w)[2];
const float vbias3 = ((const float*) w)[3];
w = (const float*) w + 4;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout0x2 += vbias2;
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x2 = math_min_f32(vout0x2, vmax);
vout0x3 += vbias3;
vout0x3 = math_max_f32(vout0x3, vmin);
vout0x3 = math_min_f32(vout0x3, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vout0x0;
c0[1] = vout0x1;
c0[2] = vout0x2;
c0[3] = vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,306
| 26.106557
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_broadcast_ss(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,884
| 31.107438
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_broadcast_ss(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 4,002
| 31.024
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_load1_ps(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 4,794
| 34.518519
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_load1_ps(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 4,888
| 34.686131
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_load1_ps(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,829
| 31.735043
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_load1_ps(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,947
| 31.628099
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_broadcast_ss(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,828
| 30.644628
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x4c8-minmax-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_cvtsi32_si128((int) quantization_params[0].zero_point);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0 = _mm_broadcast_ss(&quantization_params[0].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,946
| 30.576
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x8-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x8__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int32_t vzp0 = quantization_params[0].zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc0x4 = ((const int32_t*) w)[4];
int32_t vacc0x5 = ((const int32_t*) w)[5];
int32_t vacc0x6 = ((const int32_t*) w)[6];
int32_t vacc0x7 = ((const int32_t*) w)[7];
w = (const int32_t*) w + 8;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc0x2 *= vzp0;
vacc0x3 *= vzp0;
vacc0x4 *= vzp0;
vacc0x5 *= vzp0;
vacc0x6 *= vzp0;
vacc0x7 *= vzp0;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
const int32_t vb4 = (int32_t) ((const int8_t*) w)[4];
const int32_t vb5 = (int32_t) ((const int8_t*) w)[5];
const int32_t vb6 = (int32_t) ((const int8_t*) w)[6];
const int32_t vb7 = (int32_t) ((const int8_t*) w)[7];
w = (const int8_t*) w + 8;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc0x4 += va0 * vb4;
vacc0x5 += va0 * vb5;
vacc0x6 += va0 * vb6;
vacc0x7 += va0 * vb7;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout0x2 = (float) vacc0x2;
float vout0x3 = (float) vacc0x3;
float vout0x4 = (float) vacc0x4;
float vout0x5 = (float) vacc0x5;
float vout0x6 = (float) vacc0x6;
float vout0x7 = (float) vacc0x7;
const float vscale0 = quantization_params[0].scale;
vout0x0 *= vscale0;
vout0x1 *= vscale0;
vout0x2 *= vscale0;
vout0x3 *= vscale0;
vout0x4 *= vscale0;
vout0x5 *= vscale0;
vout0x6 *= vscale0;
vout0x7 *= vscale0;
const float vbias0 = ((const float*) w)[0];
const float vbias1 = ((const float*) w)[1];
const float vbias2 = ((const float*) w)[2];
const float vbias3 = ((const float*) w)[3];
const float vbias4 = ((const float*) w)[4];
const float vbias5 = ((const float*) w)[5];
const float vbias6 = ((const float*) w)[6];
const float vbias7 = ((const float*) w)[7];
w = (const float*) w + 8;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout0x2 += vbias2;
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x2 = math_min_f32(vout0x2, vmax);
vout0x3 += vbias3;
vout0x3 = math_max_f32(vout0x3, vmin);
vout0x3 = math_min_f32(vout0x3, vmax);
vout0x4 += vbias4;
vout0x4 = math_max_f32(vout0x4, vmin);
vout0x4 = math_min_f32(vout0x4, vmax);
vout0x5 += vbias5;
vout0x5 = math_max_f32(vout0x5, vmin);
vout0x5 = math_min_f32(vout0x5, vmax);
vout0x6 += vbias6;
vout0x6 = math_max_f32(vout0x6, vmin);
vout0x6 = math_min_f32(vout0x6, vmax);
vout0x7 += vbias7;
vout0x7 = math_max_f32(vout0x7, vmin);
vout0x7 = math_min_f32(vout0x7, vmax);
if XNN_LIKELY(nc >= 8) {
c0[0] = vout0x0;
c0[1] = vout0x1;
c0[2] = vout0x2;
c0[3] = vout0x3;
c0[4] = vout0x4;
c0[5] = vout0x5;
c0[6] = vout0x6;
c0[7] = vout0x7;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
c0[2] = (float) vout0x2;
c0[3] = (float) vout0x3;
vout0x0 = vout0x4;
vout0x1 = vout0x5;
vout0x2 = vout0x6;
c0 += 4;
}
if (nc & 2) {
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
vout0x0 = vout0x2;
vout0x1 = vout0x3;
vout0x2 = vout0x4;
vout0x3 = vout0x5;
vout0x4 = vout0x6;
c0 += 2;
}
if (nc & 1) {
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,260
| 28.227778
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x8c2s4-minmax-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
float* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int32x4_t vzp01 = vld1q_dup_s32(&quantization_params[0].zero_point);
vacc0x0123 = vmulq_s32(vacc0x0123, vzp01);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp01);
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale01 = vld1q_dup_f32(&quantization_params[0].scale);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
#if XNN_ARCH_ARM64
vout0x0123 = vfmaq_f32(vbias0123, vout0x0123, vscale01);
#else
vout0x0123 = vmlaq_f32(vbias0123, vout0x0123, vscale01);
#endif
#if XNN_ARCH_ARM64
vout0x4567 = vfmaq_f32(vbias4567, vout0x4567, vscale01);
#else
vout0x4567 = vmlaq_f32(vbias4567, vout0x4567, vscale01);
#endif
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vminq_f32(vout0x4567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c0, vout0x01); c0 += 2;
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,843
| 40.946524
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-1x8c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_1x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c0, vout0x01); c0 += 2;
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,609
| 33.924242
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x16c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
const int32x4_t vzp1 = vdupq_n_s32(quantization_params[1].zero_point);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc1x89AB = vacc0x89AB;
int32x4_t vacc1xCDEF = vacc0xCDEF;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc0x89AB = vmulq_s32(vacc0x89AB, vzp0);
vacc0xCDEF = vmulq_s32(vacc0xCDEF, vzp0);
vacc1x0123 = vmulq_s32(vacc1x0123, vzp1);
vacc1x4567 = vmulq_s32(vacc1x4567, vzp1);
vacc1x89AB = vmulq_s32(vacc1x89AB, vzp1);
vacc1xCDEF = vmulq_s32(vacc1xCDEF, vzp1);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 2x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 2x8 * 8x16 --> 2x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 2x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 2x4 * 4x16 --> 2x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vout0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
float32x4_t vout1x89AB = vcvtq_f32_s32(vacc1x89AB);
float32x4_t vout1xCDEF = vcvtq_f32_s32(vacc1xCDEF);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
vout0x89AB = vmulq_f32(vout0x89AB, vscale0);
vout0xCDEF = vmulq_f32(vout0xCDEF, vscale0);
const float32x4_t vscale1 = vdupq_n_f32(quantization_params[1].scale);
vout1x0123 = vmulq_f32(vout1x0123, vscale1);
vout1x4567 = vmulq_f32(vout1x4567, vscale1);
vout1x89AB = vmulq_f32(vout1x89AB, vscale1);
vout1xCDEF = vmulq_f32(vout1xCDEF, vscale1);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias89AB = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbiasCDEF = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout0x89AB = vaddq_f32(vout0x89AB, vbias89AB);
vout0xCDEF = vaddq_f32(vout0xCDEF, vbiasCDEF);
vout1x0123 = vaddq_f32(vout1x0123, vbias0123);
vout1x4567 = vaddq_f32(vout1x4567, vbias4567);
vout1x89AB = vaddq_f32(vout1x89AB, vbias89AB);
vout1xCDEF = vaddq_f32(vout1xCDEF, vbiasCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout0x89AB = vmaxq_f32(vout0x89AB, vmin);
vout0x89AB = vminq_f32(vout0x89AB, vmax);
vout0xCDEF = vmaxq_f32(vout0xCDEF, vmin);
vout0xCDEF = vminq_f32(vout0xCDEF, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x4567 = vminq_f32(vout1x4567, vmax);
vout1x89AB = vmaxq_f32(vout1x89AB, vmin);
vout1x89AB = vminq_f32(vout1x89AB, vmax);
vout1xCDEF = vmaxq_f32(vout1xCDEF, vmin);
vout1xCDEF = vminq_f32(vout1xCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c1[8], vout1x89AB);
vst1q_f32(&c1[12], vout1xCDEF);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
vst1q_f32(&c0[8], vout0x89AB);
vst1q_f32(&c0[12], vout0xCDEF);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x89AB;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x89AB;
vst1q_f32(c1, vout1x4567); c1 += 4;
vout1x4567 = vout1xCDEF;
vst1q_f32(c0, vout0x4567); c0 += 4;
vout0x4567 = vout0xCDEF;
}
if (nc & 4) {
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,026
| 41.850427
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const int32_t vzp0 = quantization_params[0].zero_point;
const int32_t vzp1 = quantization_params[1].zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const int32_t*) w + 2;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc1x0 *= vzp1;
vacc1x1 *= vzp1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout1x0 = (float) vacc1x0;
float vout1x1 = (float) vacc1x1;
const float vscale0 = quantization_params[0].scale;
const float vscale1 = quantization_params[1].scale;
vout0x0 *= vscale0;
vout1x0 *= vscale1;
vout0x1 *= vscale0;
vout1x1 *= vscale1;
const float vbias0 = unaligned_indexed_load_f32(w, 0);
const float vbias1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout1x0 += vbias0;
vout1x0 = math_max_f32(vout1x0, vmin);
vout1x0 = math_min_f32(vout1x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout1x1 += vbias1;
vout1x1 = math_max_f32(vout1x1, vmin);
vout1x1 = math_min_f32(vout1x1, vmax);
if XNN_LIKELY(nc >= 2) {
c1[0] = vout1x0;
c1[1] = vout1x1;
c0[0] = vout0x0;
c0[1] = vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c1[0] = (float) vout1x0;
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,464
| 26.72
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const int32_t vzp0 = quantization_params[0].zero_point;
const int32_t vzp1 = quantization_params[1].zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc0x2 *= vzp0;
vacc0x3 *= vzp0;
vacc1x0 *= vzp1;
vacc1x1 *= vzp1;
vacc1x2 *= vzp1;
vacc1x3 *= vzp1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout0x2 = (float) vacc0x2;
float vout0x3 = (float) vacc0x3;
float vout1x0 = (float) vacc1x0;
float vout1x1 = (float) vacc1x1;
float vout1x2 = (float) vacc1x2;
float vout1x3 = (float) vacc1x3;
const float vscale0 = quantization_params[0].scale;
const float vscale1 = quantization_params[1].scale;
vout0x0 *= vscale0;
vout1x0 *= vscale1;
vout0x1 *= vscale0;
vout1x1 *= vscale1;
vout0x2 *= vscale0;
vout1x2 *= vscale1;
vout0x3 *= vscale0;
vout1x3 *= vscale1;
const float vbias0 = ((const float*) w)[0];
const float vbias1 = ((const float*) w)[1];
const float vbias2 = ((const float*) w)[2];
const float vbias3 = ((const float*) w)[3];
w = (const float*) w + 4;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout1x0 += vbias0;
vout1x0 = math_max_f32(vout1x0, vmin);
vout1x0 = math_min_f32(vout1x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout1x1 += vbias1;
vout1x1 = math_max_f32(vout1x1, vmin);
vout1x1 = math_min_f32(vout1x1, vmax);
vout0x2 += vbias2;
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x2 = math_min_f32(vout0x2, vmax);
vout1x2 += vbias2;
vout1x2 = math_max_f32(vout1x2, vmin);
vout1x2 = math_min_f32(vout1x2, vmax);
vout0x3 += vbias3;
vout0x3 = math_max_f32(vout0x3, vmin);
vout0x3 = math_min_f32(vout0x3, vmax);
vout1x3 += vbias3;
vout1x3 = math_max_f32(vout1x3, vmin);
vout1x3 = math_min_f32(vout1x3, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vout1x0;
c1[1] = vout1x1;
c1[2] = vout1x2;
c1[3] = vout1x3;
c0[0] = vout0x0;
c0[1] = vout0x1;
c0[2] = vout0x2;
c0[3] = vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = (float) vout1x0;
c1[1] = (float) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c1[0] = (float) vout1x0;
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,981
| 27.632184
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,786
| 35.859873
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,904
| 35.677019
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,407
| 40.617978
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,501
| 40.677778
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,735
| 36.490196
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,853
| 36.286624
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,674
| 35.146497
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x4c8-minmax-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,792
| 34.981366
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x8-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x8__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const int32_t vzp0 = quantization_params[0].zero_point;
const int32_t vzp1 = quantization_params[1].zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc0x4 = ((const int32_t*) w)[4];
int32_t vacc0x5 = ((const int32_t*) w)[5];
int32_t vacc0x6 = ((const int32_t*) w)[6];
int32_t vacc0x7 = ((const int32_t*) w)[7];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc1x4 = vacc0x4;
int32_t vacc1x5 = vacc0x5;
int32_t vacc1x6 = vacc0x6;
int32_t vacc1x7 = vacc0x7;
w = (const int32_t*) w + 8;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc0x2 *= vzp0;
vacc0x3 *= vzp0;
vacc0x4 *= vzp0;
vacc0x5 *= vzp0;
vacc0x6 *= vzp0;
vacc0x7 *= vzp0;
vacc1x0 *= vzp1;
vacc1x1 *= vzp1;
vacc1x2 *= vzp1;
vacc1x3 *= vzp1;
vacc1x4 *= vzp1;
vacc1x5 *= vzp1;
vacc1x6 *= vzp1;
vacc1x7 *= vzp1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
const int32_t vb4 = (int32_t) ((const int8_t*) w)[4];
const int32_t vb5 = (int32_t) ((const int8_t*) w)[5];
const int32_t vb6 = (int32_t) ((const int8_t*) w)[6];
const int32_t vb7 = (int32_t) ((const int8_t*) w)[7];
w = (const int8_t*) w + 8;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc0x4 += va0 * vb4;
vacc0x5 += va0 * vb5;
vacc0x6 += va0 * vb6;
vacc0x7 += va0 * vb7;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc1x4 += va1 * vb4;
vacc1x5 += va1 * vb5;
vacc1x6 += va1 * vb6;
vacc1x7 += va1 * vb7;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout0x2 = (float) vacc0x2;
float vout0x3 = (float) vacc0x3;
float vout0x4 = (float) vacc0x4;
float vout0x5 = (float) vacc0x5;
float vout0x6 = (float) vacc0x6;
float vout0x7 = (float) vacc0x7;
float vout1x0 = (float) vacc1x0;
float vout1x1 = (float) vacc1x1;
float vout1x2 = (float) vacc1x2;
float vout1x3 = (float) vacc1x3;
float vout1x4 = (float) vacc1x4;
float vout1x5 = (float) vacc1x5;
float vout1x6 = (float) vacc1x6;
float vout1x7 = (float) vacc1x7;
const float vscale0 = quantization_params[0].scale;
const float vscale1 = quantization_params[1].scale;
vout0x0 *= vscale0;
vout1x0 *= vscale1;
vout0x1 *= vscale0;
vout1x1 *= vscale1;
vout0x2 *= vscale0;
vout1x2 *= vscale1;
vout0x3 *= vscale0;
vout1x3 *= vscale1;
vout0x4 *= vscale0;
vout1x4 *= vscale1;
vout0x5 *= vscale0;
vout1x5 *= vscale1;
vout0x6 *= vscale0;
vout1x6 *= vscale1;
vout0x7 *= vscale0;
vout1x7 *= vscale1;
const float vbias0 = ((const float*) w)[0];
const float vbias1 = ((const float*) w)[1];
const float vbias2 = ((const float*) w)[2];
const float vbias3 = ((const float*) w)[3];
const float vbias4 = ((const float*) w)[4];
const float vbias5 = ((const float*) w)[5];
const float vbias6 = ((const float*) w)[6];
const float vbias7 = ((const float*) w)[7];
w = (const float*) w + 8;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout1x0 += vbias0;
vout1x0 = math_max_f32(vout1x0, vmin);
vout1x0 = math_min_f32(vout1x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout1x1 += vbias1;
vout1x1 = math_max_f32(vout1x1, vmin);
vout1x1 = math_min_f32(vout1x1, vmax);
vout0x2 += vbias2;
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x2 = math_min_f32(vout0x2, vmax);
vout1x2 += vbias2;
vout1x2 = math_max_f32(vout1x2, vmin);
vout1x2 = math_min_f32(vout1x2, vmax);
vout0x3 += vbias3;
vout0x3 = math_max_f32(vout0x3, vmin);
vout0x3 = math_min_f32(vout0x3, vmax);
vout1x3 += vbias3;
vout1x3 = math_max_f32(vout1x3, vmin);
vout1x3 = math_min_f32(vout1x3, vmax);
vout0x4 += vbias4;
vout0x4 = math_max_f32(vout0x4, vmin);
vout0x4 = math_min_f32(vout0x4, vmax);
vout1x4 += vbias4;
vout1x4 = math_max_f32(vout1x4, vmin);
vout1x4 = math_min_f32(vout1x4, vmax);
vout0x5 += vbias5;
vout0x5 = math_max_f32(vout0x5, vmin);
vout0x5 = math_min_f32(vout0x5, vmax);
vout1x5 += vbias5;
vout1x5 = math_max_f32(vout1x5, vmin);
vout1x5 = math_min_f32(vout1x5, vmax);
vout0x6 += vbias6;
vout0x6 = math_max_f32(vout0x6, vmin);
vout0x6 = math_min_f32(vout0x6, vmax);
vout1x6 += vbias6;
vout1x6 = math_max_f32(vout1x6, vmin);
vout1x6 = math_min_f32(vout1x6, vmax);
vout0x7 += vbias7;
vout0x7 = math_max_f32(vout0x7, vmin);
vout0x7 = math_min_f32(vout0x7, vmax);
vout1x7 += vbias7;
vout1x7 = math_max_f32(vout1x7, vmin);
vout1x7 = math_min_f32(vout1x7, vmax);
if XNN_LIKELY(nc >= 8) {
c1[0] = vout1x0;
c1[1] = vout1x1;
c1[2] = vout1x2;
c1[3] = vout1x3;
c1[4] = vout1x4;
c1[5] = vout1x5;
c1[6] = vout1x6;
c1[7] = vout1x7;
c0[0] = vout0x0;
c0[1] = vout0x1;
c0[2] = vout0x2;
c0[3] = vout0x3;
c0[4] = vout0x4;
c0[5] = vout0x5;
c0[6] = vout0x6;
c0[7] = vout0x7;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
c1[0] = (float) vout1x0;
c1[1] = (float) vout1x1;
c1[2] = (float) vout1x2;
c1[3] = (float) vout1x3;
vout1x0 = vout1x4;
vout1x1 = vout1x5;
vout1x2 = vout1x6;
c1 += 4;
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
c0[2] = (float) vout0x2;
c0[3] = (float) vout0x3;
vout0x0 = vout0x4;
vout0x1 = vout0x5;
vout0x2 = vout0x6;
c0 += 4;
}
if (nc & 2) {
c1[0] = (float) vout1x0;
c1[1] = (float) vout1x1;
vout1x0 = vout1x2;
vout1x1 = vout1x3;
vout1x2 = vout1x4;
vout1x3 = vout1x5;
vout1x4 = vout1x6;
c1 += 2;
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
vout0x0 = vout0x2;
vout0x1 = vout0x3;
vout0x2 = vout0x4;
vout0x3 = vout0x5;
vout0x4 = vout0x6;
c0 += 2;
}
if (nc & 1) {
c1[0] = (float) vout1x0;
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 8,365
| 28.878571
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x8c2s4-minmax-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int32x4_t vzp01 = vld1q_s32(&quantization_params[0].zero_point);
int32x4_t vacc1x0123 = vmulq_lane_s32(vacc0x0123, vget_high_s32(vzp01), 0);
int32x4_t vacc1x4567 = vmulq_lane_s32(vacc0x4567, vget_high_s32(vzp01), 0);
vacc0x0123 = vmulq_lane_s32(vacc0x0123, vget_low_s32(vzp01), 0);
vacc0x4567 = vmulq_lane_s32(vacc0x4567, vget_low_s32(vzp01), 0);
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale01 = vreinterpretq_f32_s32(vld1q_s32(&quantization_params[0].zero_point));
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
#if XNN_ARCH_ARM64
vout0x0123 = vfmaq_lane_f32(vbias0123, vout0x0123, vget_low_f32(vscale01), 1);
#else
vout0x0123 = vmlaq_lane_f32(vbias0123, vout0x0123, vget_low_f32(vscale01), 1);
#endif
#if XNN_ARCH_ARM64
vout0x4567 = vfmaq_lane_f32(vbias4567, vout0x4567, vget_low_f32(vscale01), 1);
#else
vout0x4567 = vmlaq_lane_f32(vbias4567, vout0x4567, vget_low_f32(vscale01), 1);
#endif
#if XNN_ARCH_ARM64
vout1x0123 = vfmaq_lane_f32(vbias0123, vout1x0123, vget_high_f32(vscale01), 1);
#else
vout1x0123 = vmlaq_lane_f32(vbias0123, vout1x0123, vget_high_f32(vscale01), 1);
#endif
#if XNN_ARCH_ARM64
vout1x4567 = vfmaq_lane_f32(vbias4567, vout1x4567, vget_high_f32(vscale01), 1);
#else
vout1x4567 = vmlaq_lane_f32(vbias4567, vout1x4567, vget_high_f32(vscale01), 1);
#endif
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vminq_f32(vout1x4567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,442
| 44.578755
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-2x8c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_2x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
const int32x4_t vzp1 = vdupq_n_s32(quantization_params[1].zero_point);
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc1x0123 = vmulq_s32(vacc1x0123, vzp1);
vacc1x4567 = vmulq_s32(vacc1x4567, vzp1);
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 2x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 2x8 * 8x8 --> 2x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 2x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 2x4 * 4x8 --> 2x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
const float32x4_t vscale1 = vdupq_n_f32(quantization_params[1].scale);
vout1x0123 = vmulq_f32(vout1x0123, vscale1);
vout1x4567 = vmulq_f32(vout1x4567, vscale1);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout1x0123 = vaddq_f32(vout1x0123, vbias0123);
vout1x4567 = vaddq_f32(vout1x4567, vbias4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x4567 = vminq_f32(vout1x4567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,611
| 37.44186
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x16c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
const int32x4_t vzp1 = vdupq_n_s32(quantization_params[1].zero_point);
const int32x4_t vzp2 = vdupq_n_s32(quantization_params[2].zero_point);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc1x89AB = vacc0x89AB;
int32x4_t vacc1xCDEF = vacc0xCDEF;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc2x89AB = vacc0x89AB;
int32x4_t vacc2xCDEF = vacc0xCDEF;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc0x89AB = vmulq_s32(vacc0x89AB, vzp0);
vacc0xCDEF = vmulq_s32(vacc0xCDEF, vzp0);
vacc1x0123 = vmulq_s32(vacc1x0123, vzp1);
vacc1x4567 = vmulq_s32(vacc1x4567, vzp1);
vacc1x89AB = vmulq_s32(vacc1x89AB, vzp1);
vacc1xCDEF = vmulq_s32(vacc1xCDEF, vzp1);
vacc2x0123 = vmulq_s32(vacc2x0123, vzp2);
vacc2x4567 = vmulq_s32(vacc2x4567, vzp2);
vacc2x89AB = vmulq_s32(vacc2x89AB, vzp2);
vacc2xCDEF = vmulq_s32(vacc2xCDEF, vzp2);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 3x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 3x8 * 8x16 --> 3x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 3x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 3x4 * 4x16 --> 3x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vout0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
float32x4_t vout1x89AB = vcvtq_f32_s32(vacc1x89AB);
float32x4_t vout1xCDEF = vcvtq_f32_s32(vacc1xCDEF);
float32x4_t vout2x0123 = vcvtq_f32_s32(vacc2x0123);
float32x4_t vout2x4567 = vcvtq_f32_s32(vacc2x4567);
float32x4_t vout2x89AB = vcvtq_f32_s32(vacc2x89AB);
float32x4_t vout2xCDEF = vcvtq_f32_s32(vacc2xCDEF);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
vout0x89AB = vmulq_f32(vout0x89AB, vscale0);
vout0xCDEF = vmulq_f32(vout0xCDEF, vscale0);
const float32x4_t vscale1 = vdupq_n_f32(quantization_params[1].scale);
vout1x0123 = vmulq_f32(vout1x0123, vscale1);
vout1x4567 = vmulq_f32(vout1x4567, vscale1);
vout1x89AB = vmulq_f32(vout1x89AB, vscale1);
vout1xCDEF = vmulq_f32(vout1xCDEF, vscale1);
const float32x4_t vscale2 = vdupq_n_f32(quantization_params[2].scale);
vout2x0123 = vmulq_f32(vout2x0123, vscale2);
vout2x4567 = vmulq_f32(vout2x4567, vscale2);
vout2x89AB = vmulq_f32(vout2x89AB, vscale2);
vout2xCDEF = vmulq_f32(vout2xCDEF, vscale2);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias89AB = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbiasCDEF = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout0x89AB = vaddq_f32(vout0x89AB, vbias89AB);
vout0xCDEF = vaddq_f32(vout0xCDEF, vbiasCDEF);
vout1x0123 = vaddq_f32(vout1x0123, vbias0123);
vout1x4567 = vaddq_f32(vout1x4567, vbias4567);
vout1x89AB = vaddq_f32(vout1x89AB, vbias89AB);
vout1xCDEF = vaddq_f32(vout1xCDEF, vbiasCDEF);
vout2x0123 = vaddq_f32(vout2x0123, vbias0123);
vout2x4567 = vaddq_f32(vout2x4567, vbias4567);
vout2x89AB = vaddq_f32(vout2x89AB, vbias89AB);
vout2xCDEF = vaddq_f32(vout2xCDEF, vbiasCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout0x89AB = vmaxq_f32(vout0x89AB, vmin);
vout0x89AB = vminq_f32(vout0x89AB, vmax);
vout0xCDEF = vmaxq_f32(vout0xCDEF, vmin);
vout0xCDEF = vminq_f32(vout0xCDEF, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x4567 = vminq_f32(vout1x4567, vmax);
vout1x89AB = vmaxq_f32(vout1x89AB, vmin);
vout1x89AB = vminq_f32(vout1x89AB, vmax);
vout1xCDEF = vmaxq_f32(vout1xCDEF, vmin);
vout1xCDEF = vminq_f32(vout1xCDEF, vmax);
vout2x0123 = vmaxq_f32(vout2x0123, vmin);
vout2x0123 = vminq_f32(vout2x0123, vmax);
vout2x4567 = vmaxq_f32(vout2x4567, vmin);
vout2x4567 = vminq_f32(vout2x4567, vmax);
vout2x89AB = vmaxq_f32(vout2x89AB, vmin);
vout2x89AB = vminq_f32(vout2x89AB, vmax);
vout2xCDEF = vmaxq_f32(vout2xCDEF, vmin);
vout2xCDEF = vminq_f32(vout2xCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(&c2[0], vout2x0123);
vst1q_f32(&c2[4], vout2x4567);
vst1q_f32(&c2[8], vout2x89AB);
vst1q_f32(&c2[12], vout2xCDEF);
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c1[8], vout1x89AB);
vst1q_f32(&c1[12], vout1xCDEF);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
vst1q_f32(&c0[8], vout0x89AB);
vst1q_f32(&c0[12], vout0xCDEF);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c2, vout2x0123); c2 += 4;
vout2x0123 = vout2x89AB;
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x89AB;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x89AB;
vst1q_f32(c2, vout2x4567); c2 += 4;
vout2x4567 = vout2xCDEF;
vst1q_f32(c1, vout1x4567); c1 += 4;
vout1x4567 = vout1xCDEF;
vst1q_f32(c0, vout0x4567); c0 += 4;
vout0x4567 = vout0xCDEF;
}
if (nc & 4) {
vst1q_f32(c2, vout2x0123); c2 += 4;
vout2x0123 = vout2x4567;
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout2x01 = vget_low_f32(vout2x0123);
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c2, vout2x01); c2 += 2;
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout2x01 = vget_high_f32(vout2x0123);
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c2, vout2x01, 0);
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,380
| 43.603333
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_broadcast_ss(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,631
| 38.544041
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_broadcast_ss(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,749
| 38.340102
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
__m128i vzpprodksumhi2 = _mm_mulhi_epu16(vzp2, vksum_lo);
const __m128i vzpprodksumlo2 = _mm_mullo_epi16(vzp2, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi2 = _mm_add_epi16(vzpprodksumhi2, _mm_mullo_epi16(vzp2, vksum_hi));
vzpprodksumhi2 = _mm_sub_epi16(vzpprodksumhi2, _mm_and_si128(_mm_srai_epi16(vzp2, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
vzpprodksumhi2 = _mm_slli_si128(vzpprodksumhi2, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksumzp2 = _mm_or_si128(vzpprodksumhi2, vzpprodksumlo2);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
const __m128i vksum012 = _mm_unpacklo_epi32(vksumzp2, vzero);
const __m128i vksum232 = _mm_unpackhi_epi32(vksumzp2, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc2x0 = _mm_unpacklo_epi64(vksum012, vzero);
__m128i vacc2x1 = _mm_unpackhi_epi64(vksum012, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
__m128i vacc2x2 = _mm_unpacklo_epi64(vksum232, vzero);
__m128i vacc2x3 = _mm_unpackhi_epi64(vksum232, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_load1_ps(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,955
| 44.049774
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
__m128i vzpprodksumhi2 = _mm_mulhi_epu16(vzp2, vksum_lo);
const __m128i vzpprodksumlo2 = _mm_mullo_epi16(vzp2, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi2 = _mm_add_epi16(vzpprodksumhi2, _mm_mullo_epi16(vzp2, vksum_hi));
vzpprodksumhi2 = _mm_sub_epi16(vzpprodksumhi2, _mm_and_si128(_mm_srai_epi16(vzp2, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
vzpprodksumhi2 = _mm_slli_si128(vzpprodksumhi2, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksumzp2 = _mm_or_si128(vzpprodksumhi2, vzpprodksumlo2);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
const __m128i vksum012 = _mm_unpacklo_epi32(vksumzp2, vzero);
const __m128i vksum232 = _mm_unpackhi_epi32(vksumzp2, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc2x0 = _mm_unpacklo_epi64(vksum012, vzero);
__m128i vacc2x1 = _mm_unpackhi_epi64(vksum012, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
__m128i vacc2x2 = _mm_unpacklo_epi64(vksum232, vzero);
__m128i vacc2x3 = _mm_unpackhi_epi64(vksum232, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_load1_ps(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,049
| 44.067265
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_load1_ps(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,576
| 39.089947
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_load1_ps(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,694
| 38.870466
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_broadcast_ss(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,463
| 37.673575
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-3x4c8-minmax-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_3x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
__m128i vzp2 = _mm_cvtsi32_si128((int) quantization_params[2].zero_point);
vzp2 = _mm_shuffle_epi32(vzp2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale2 = _mm_broadcast_ss(&quantization_params[2].scale);
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,581
| 37.48731
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x16c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
const int32x4_t vzp1 = vdupq_n_s32(quantization_params[1].zero_point);
const int32x4_t vzp2 = vdupq_n_s32(quantization_params[2].zero_point);
const int32x4_t vzp3 = vdupq_n_s32(quantization_params[3].zero_point);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc1x89AB = vacc0x89AB;
int32x4_t vacc1xCDEF = vacc0xCDEF;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc2x89AB = vacc0x89AB;
int32x4_t vacc2xCDEF = vacc0xCDEF;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
int32x4_t vacc3x89AB = vacc0x89AB;
int32x4_t vacc3xCDEF = vacc0xCDEF;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc0x89AB = vmulq_s32(vacc0x89AB, vzp0);
vacc0xCDEF = vmulq_s32(vacc0xCDEF, vzp0);
vacc1x0123 = vmulq_s32(vacc1x0123, vzp1);
vacc1x4567 = vmulq_s32(vacc1x4567, vzp1);
vacc1x89AB = vmulq_s32(vacc1x89AB, vzp1);
vacc1xCDEF = vmulq_s32(vacc1xCDEF, vzp1);
vacc2x0123 = vmulq_s32(vacc2x0123, vzp2);
vacc2x4567 = vmulq_s32(vacc2x4567, vzp2);
vacc2x89AB = vmulq_s32(vacc2x89AB, vzp2);
vacc2xCDEF = vmulq_s32(vacc2xCDEF, vzp2);
vacc3x0123 = vmulq_s32(vacc3x0123, vzp3);
vacc3x4567 = vmulq_s32(vacc3x4567, vzp3);
vacc3x89AB = vmulq_s32(vacc3x89AB, vzp3);
vacc3xCDEF = vmulq_s32(vacc3xCDEF, vzp3);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 4x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x8 * 8x16 --> 4x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x4 * 4x16 --> 4x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vout0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
float32x4_t vout1x89AB = vcvtq_f32_s32(vacc1x89AB);
float32x4_t vout1xCDEF = vcvtq_f32_s32(vacc1xCDEF);
float32x4_t vout2x0123 = vcvtq_f32_s32(vacc2x0123);
float32x4_t vout2x4567 = vcvtq_f32_s32(vacc2x4567);
float32x4_t vout2x89AB = vcvtq_f32_s32(vacc2x89AB);
float32x4_t vout2xCDEF = vcvtq_f32_s32(vacc2xCDEF);
float32x4_t vout3x0123 = vcvtq_f32_s32(vacc3x0123);
float32x4_t vout3x4567 = vcvtq_f32_s32(vacc3x4567);
float32x4_t vout3x89AB = vcvtq_f32_s32(vacc3x89AB);
float32x4_t vout3xCDEF = vcvtq_f32_s32(vacc3xCDEF);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
vout0x89AB = vmulq_f32(vout0x89AB, vscale0);
vout0xCDEF = vmulq_f32(vout0xCDEF, vscale0);
const float32x4_t vscale1 = vdupq_n_f32(quantization_params[1].scale);
vout1x0123 = vmulq_f32(vout1x0123, vscale1);
vout1x4567 = vmulq_f32(vout1x4567, vscale1);
vout1x89AB = vmulq_f32(vout1x89AB, vscale1);
vout1xCDEF = vmulq_f32(vout1xCDEF, vscale1);
const float32x4_t vscale2 = vdupq_n_f32(quantization_params[2].scale);
vout2x0123 = vmulq_f32(vout2x0123, vscale2);
vout2x4567 = vmulq_f32(vout2x4567, vscale2);
vout2x89AB = vmulq_f32(vout2x89AB, vscale2);
vout2xCDEF = vmulq_f32(vout2xCDEF, vscale2);
const float32x4_t vscale3 = vdupq_n_f32(quantization_params[3].scale);
vout3x0123 = vmulq_f32(vout3x0123, vscale3);
vout3x4567 = vmulq_f32(vout3x4567, vscale3);
vout3x89AB = vmulq_f32(vout3x89AB, vscale3);
vout3xCDEF = vmulq_f32(vout3xCDEF, vscale3);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias89AB = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbiasCDEF = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout0x89AB = vaddq_f32(vout0x89AB, vbias89AB);
vout0xCDEF = vaddq_f32(vout0xCDEF, vbiasCDEF);
vout1x0123 = vaddq_f32(vout1x0123, vbias0123);
vout1x4567 = vaddq_f32(vout1x4567, vbias4567);
vout1x89AB = vaddq_f32(vout1x89AB, vbias89AB);
vout1xCDEF = vaddq_f32(vout1xCDEF, vbiasCDEF);
vout2x0123 = vaddq_f32(vout2x0123, vbias0123);
vout2x4567 = vaddq_f32(vout2x4567, vbias4567);
vout2x89AB = vaddq_f32(vout2x89AB, vbias89AB);
vout2xCDEF = vaddq_f32(vout2xCDEF, vbiasCDEF);
vout3x0123 = vaddq_f32(vout3x0123, vbias0123);
vout3x4567 = vaddq_f32(vout3x4567, vbias4567);
vout3x89AB = vaddq_f32(vout3x89AB, vbias89AB);
vout3xCDEF = vaddq_f32(vout3xCDEF, vbiasCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout0x89AB = vmaxq_f32(vout0x89AB, vmin);
vout0x89AB = vminq_f32(vout0x89AB, vmax);
vout0xCDEF = vmaxq_f32(vout0xCDEF, vmin);
vout0xCDEF = vminq_f32(vout0xCDEF, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x4567 = vminq_f32(vout1x4567, vmax);
vout1x89AB = vmaxq_f32(vout1x89AB, vmin);
vout1x89AB = vminq_f32(vout1x89AB, vmax);
vout1xCDEF = vmaxq_f32(vout1xCDEF, vmin);
vout1xCDEF = vminq_f32(vout1xCDEF, vmax);
vout2x0123 = vmaxq_f32(vout2x0123, vmin);
vout2x0123 = vminq_f32(vout2x0123, vmax);
vout2x4567 = vmaxq_f32(vout2x4567, vmin);
vout2x4567 = vminq_f32(vout2x4567, vmax);
vout2x89AB = vmaxq_f32(vout2x89AB, vmin);
vout2x89AB = vminq_f32(vout2x89AB, vmax);
vout2xCDEF = vmaxq_f32(vout2xCDEF, vmin);
vout2xCDEF = vminq_f32(vout2xCDEF, vmax);
vout3x0123 = vmaxq_f32(vout3x0123, vmin);
vout3x0123 = vminq_f32(vout3x0123, vmax);
vout3x4567 = vmaxq_f32(vout3x4567, vmin);
vout3x4567 = vminq_f32(vout3x4567, vmax);
vout3x89AB = vmaxq_f32(vout3x89AB, vmin);
vout3x89AB = vminq_f32(vout3x89AB, vmax);
vout3xCDEF = vmaxq_f32(vout3xCDEF, vmin);
vout3xCDEF = vminq_f32(vout3xCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(&c3[0], vout3x0123);
vst1q_f32(&c3[4], vout3x4567);
vst1q_f32(&c3[8], vout3x89AB);
vst1q_f32(&c3[12], vout3xCDEF);
vst1q_f32(&c2[0], vout2x0123);
vst1q_f32(&c2[4], vout2x4567);
vst1q_f32(&c2[8], vout2x89AB);
vst1q_f32(&c2[12], vout2xCDEF);
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c1[8], vout1x89AB);
vst1q_f32(&c1[12], vout1xCDEF);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
vst1q_f32(&c0[8], vout0x89AB);
vst1q_f32(&c0[12], vout0xCDEF);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vout3x0123); c3 += 4;
vout3x0123 = vout3x89AB;
vst1q_f32(c2, vout2x0123); c2 += 4;
vout2x0123 = vout2x89AB;
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x89AB;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x89AB;
vst1q_f32(c3, vout3x4567); c3 += 4;
vout3x4567 = vout3xCDEF;
vst1q_f32(c2, vout2x4567); c2 += 4;
vout2x4567 = vout2xCDEF;
vst1q_f32(c1, vout1x4567); c1 += 4;
vout1x4567 = vout1xCDEF;
vst1q_f32(c0, vout0x4567); c0 += 4;
vout0x4567 = vout0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vout3x0123); c3 += 4;
vout3x0123 = vout3x4567;
vst1q_f32(c2, vout2x0123); c2 += 4;
vout2x0123 = vout2x4567;
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout3x01 = vget_low_f32(vout3x0123);
float32x2_t vout2x01 = vget_low_f32(vout2x0123);
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c3, vout3x01); c3 += 2;
vst1_f32(c2, vout2x01); c2 += 2;
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout3x01 = vget_high_f32(vout3x0123);
vout2x01 = vget_high_f32(vout2x0123);
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vout3x01, 0);
vst1_lane_f32(c2, vout2x01, 0);
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,735
| 44.726776
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int32_t vzp0 = quantization_params[0].zero_point;
const int32_t vzp1 = quantization_params[1].zero_point;
const int32_t vzp2 = quantization_params[2].zero_point;
const int32_t vzp3 = quantization_params[3].zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
vacc0x0 *= vzp0;
vacc0x1 *= vzp0;
vacc0x2 *= vzp0;
vacc0x3 *= vzp0;
vacc1x0 *= vzp1;
vacc1x1 *= vzp1;
vacc1x2 *= vzp1;
vacc1x3 *= vzp1;
vacc2x0 *= vzp2;
vacc2x1 *= vzp2;
vacc2x2 *= vzp2;
vacc2x3 *= vzp2;
vacc3x0 *= vzp3;
vacc3x1 *= vzp3;
vacc3x2 *= vzp3;
vacc3x3 *= vzp3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vout0x0 = (float) vacc0x0;
float vout0x1 = (float) vacc0x1;
float vout0x2 = (float) vacc0x2;
float vout0x3 = (float) vacc0x3;
float vout1x0 = (float) vacc1x0;
float vout1x1 = (float) vacc1x1;
float vout1x2 = (float) vacc1x2;
float vout1x3 = (float) vacc1x3;
float vout2x0 = (float) vacc2x0;
float vout2x1 = (float) vacc2x1;
float vout2x2 = (float) vacc2x2;
float vout2x3 = (float) vacc2x3;
float vout3x0 = (float) vacc3x0;
float vout3x1 = (float) vacc3x1;
float vout3x2 = (float) vacc3x2;
float vout3x3 = (float) vacc3x3;
const float vscale0 = quantization_params[0].scale;
const float vscale1 = quantization_params[1].scale;
const float vscale2 = quantization_params[2].scale;
const float vscale3 = quantization_params[3].scale;
vout0x0 *= vscale0;
vout1x0 *= vscale1;
vout2x0 *= vscale2;
vout3x0 *= vscale3;
vout0x1 *= vscale0;
vout1x1 *= vscale1;
vout2x1 *= vscale2;
vout3x1 *= vscale3;
vout0x2 *= vscale0;
vout1x2 *= vscale1;
vout2x2 *= vscale2;
vout3x2 *= vscale3;
vout0x3 *= vscale0;
vout1x3 *= vscale1;
vout2x3 *= vscale2;
vout3x3 *= vscale3;
const float vbias0 = ((const float*) w)[0];
const float vbias1 = ((const float*) w)[1];
const float vbias2 = ((const float*) w)[2];
const float vbias3 = ((const float*) w)[3];
w = (const float*) w + 4;
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
vout0x0 += vbias0;
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x0 = math_min_f32(vout0x0, vmax);
vout1x0 += vbias0;
vout1x0 = math_max_f32(vout1x0, vmin);
vout1x0 = math_min_f32(vout1x0, vmax);
vout2x0 += vbias0;
vout2x0 = math_max_f32(vout2x0, vmin);
vout2x0 = math_min_f32(vout2x0, vmax);
vout3x0 += vbias0;
vout3x0 = math_max_f32(vout3x0, vmin);
vout3x0 = math_min_f32(vout3x0, vmax);
vout0x1 += vbias1;
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x1 = math_min_f32(vout0x1, vmax);
vout1x1 += vbias1;
vout1x1 = math_max_f32(vout1x1, vmin);
vout1x1 = math_min_f32(vout1x1, vmax);
vout2x1 += vbias1;
vout2x1 = math_max_f32(vout2x1, vmin);
vout2x1 = math_min_f32(vout2x1, vmax);
vout3x1 += vbias1;
vout3x1 = math_max_f32(vout3x1, vmin);
vout3x1 = math_min_f32(vout3x1, vmax);
vout0x2 += vbias2;
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x2 = math_min_f32(vout0x2, vmax);
vout1x2 += vbias2;
vout1x2 = math_max_f32(vout1x2, vmin);
vout1x2 = math_min_f32(vout1x2, vmax);
vout2x2 += vbias2;
vout2x2 = math_max_f32(vout2x2, vmin);
vout2x2 = math_min_f32(vout2x2, vmax);
vout3x2 += vbias2;
vout3x2 = math_max_f32(vout3x2, vmin);
vout3x2 = math_min_f32(vout3x2, vmax);
vout0x3 += vbias3;
vout0x3 = math_max_f32(vout0x3, vmin);
vout0x3 = math_min_f32(vout0x3, vmax);
vout1x3 += vbias3;
vout1x3 = math_max_f32(vout1x3, vmin);
vout1x3 = math_min_f32(vout1x3, vmax);
vout2x3 += vbias3;
vout2x3 = math_max_f32(vout2x3, vmin);
vout2x3 = math_min_f32(vout2x3, vmax);
vout3x3 += vbias3;
vout3x3 = math_max_f32(vout3x3, vmin);
vout3x3 = math_min_f32(vout3x3, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vout3x0;
c3[1] = vout3x1;
c3[2] = vout3x2;
c3[3] = vout3x3;
c2[0] = vout2x0;
c2[1] = vout2x1;
c2[2] = vout2x2;
c2[3] = vout2x3;
c1[0] = vout1x0;
c1[1] = vout1x1;
c1[2] = vout1x2;
c1[3] = vout1x3;
c0[0] = vout0x0;
c0[1] = vout0x1;
c0[2] = vout0x2;
c0[3] = vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (float) vout3x0;
c3[1] = (float) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (float) vout2x0;
c2[1] = (float) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (float) vout1x0;
c1[1] = (float) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (float) vout0x0;
c0[1] = (float) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (float) vout3x0;
c2[0] = (float) vout2x0;
c1[0] = (float) vout1x0;
c0[0] = (float) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 8,330
| 28.967626
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,567
| 40.781659
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,685
| 40.570815
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
__m128i vzpprodksumhi2 = _mm_mulhi_epu16(vzp2, vksum_lo);
const __m128i vzpprodksumlo2 = _mm_mullo_epi16(vzp2, vksum_lo);
__m128i vzpprodksumhi3 = _mm_mulhi_epu16(vzp3, vksum_lo);
const __m128i vzpprodksumlo3 = _mm_mullo_epi16(vzp3, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi2 = _mm_add_epi16(vzpprodksumhi2, _mm_mullo_epi16(vzp2, vksum_hi));
vzpprodksumhi2 = _mm_sub_epi16(vzpprodksumhi2, _mm_and_si128(_mm_srai_epi16(vzp2, 15), vksum_lo));
vzpprodksumhi3 = _mm_add_epi16(vzpprodksumhi3, _mm_mullo_epi16(vzp3, vksum_hi));
vzpprodksumhi3 = _mm_sub_epi16(vzpprodksumhi3, _mm_and_si128(_mm_srai_epi16(vzp3, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
vzpprodksumhi2 = _mm_slli_si128(vzpprodksumhi2, 2);
vzpprodksumhi3 = _mm_slli_si128(vzpprodksumhi3, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksumzp2 = _mm_or_si128(vzpprodksumhi2, vzpprodksumlo2);
const __m128i vksumzp3 = _mm_or_si128(vzpprodksumhi3, vzpprodksumlo3);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
const __m128i vksum012 = _mm_unpacklo_epi32(vksumzp2, vzero);
const __m128i vksum232 = _mm_unpackhi_epi32(vksumzp2, vzero);
const __m128i vksum013 = _mm_unpacklo_epi32(vksumzp3, vzero);
const __m128i vksum233 = _mm_unpackhi_epi32(vksumzp3, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc2x0 = _mm_unpacklo_epi64(vksum012, vzero);
__m128i vacc2x1 = _mm_unpackhi_epi64(vksum012, vzero);
__m128i vacc3x0 = _mm_unpacklo_epi64(vksum013, vzero);
__m128i vacc3x1 = _mm_unpackhi_epi64(vksum013, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
__m128i vacc2x2 = _mm_unpacklo_epi64(vksum232, vzero);
__m128i vacc2x3 = _mm_unpackhi_epi64(vksum232, vzero);
__m128i vacc3x2 = _mm_unpacklo_epi64(vksum233, vzero);
__m128i vacc3x3 = _mm_unpackhi_epi64(vksum233, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
const __m128i vacc3x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x0, vacc3x2), _mm_unpackhi_epi32(vacc3x0, vacc3x2));
const __m128i vacc3x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x1, vacc3x3), _mm_unpackhi_epi32(vacc3x1, vacc3x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128i vacc3x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x02, vacc3x13), _mm_unpackhi_epi32(vacc3x02, vacc3x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,602
| 46.738636
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksum_lo = _mm_srli_epi32(_mm_slli_epi32(vksum, 16), 16);
const __m128i vksum_hi = _mm_srli_epi32(vksum, 16);
__m128i vzpprodksumhi0 = _mm_mulhi_epu16(vzp0, vksum_lo);
const __m128i vzpprodksumlo0 = _mm_mullo_epi16(vzp0, vksum_lo);
__m128i vzpprodksumhi1 = _mm_mulhi_epu16(vzp1, vksum_lo);
const __m128i vzpprodksumlo1 = _mm_mullo_epi16(vzp1, vksum_lo);
__m128i vzpprodksumhi2 = _mm_mulhi_epu16(vzp2, vksum_lo);
const __m128i vzpprodksumlo2 = _mm_mullo_epi16(vzp2, vksum_lo);
__m128i vzpprodksumhi3 = _mm_mulhi_epu16(vzp3, vksum_lo);
const __m128i vzpprodksumlo3 = _mm_mullo_epi16(vzp3, vksum_lo);
vzpprodksumhi0 = _mm_add_epi16(vzpprodksumhi0, _mm_mullo_epi16(vzp0, vksum_hi));
vzpprodksumhi0 = _mm_sub_epi16(vzpprodksumhi0, _mm_and_si128(_mm_srai_epi16(vzp0, 15), vksum_lo));
vzpprodksumhi1 = _mm_add_epi16(vzpprodksumhi1, _mm_mullo_epi16(vzp1, vksum_hi));
vzpprodksumhi1 = _mm_sub_epi16(vzpprodksumhi1, _mm_and_si128(_mm_srai_epi16(vzp1, 15), vksum_lo));
vzpprodksumhi2 = _mm_add_epi16(vzpprodksumhi2, _mm_mullo_epi16(vzp2, vksum_hi));
vzpprodksumhi2 = _mm_sub_epi16(vzpprodksumhi2, _mm_and_si128(_mm_srai_epi16(vzp2, 15), vksum_lo));
vzpprodksumhi3 = _mm_add_epi16(vzpprodksumhi3, _mm_mullo_epi16(vzp3, vksum_hi));
vzpprodksumhi3 = _mm_sub_epi16(vzpprodksumhi3, _mm_and_si128(_mm_srai_epi16(vzp3, 15), vksum_lo));
vzpprodksumhi0 = _mm_slli_si128(vzpprodksumhi0, 2);
vzpprodksumhi1 = _mm_slli_si128(vzpprodksumhi1, 2);
vzpprodksumhi2 = _mm_slli_si128(vzpprodksumhi2, 2);
vzpprodksumhi3 = _mm_slli_si128(vzpprodksumhi3, 2);
const __m128i vksumzp0 = _mm_or_si128(vzpprodksumhi0, vzpprodksumlo0);
const __m128i vksumzp1 = _mm_or_si128(vzpprodksumhi1, vzpprodksumlo1);
const __m128i vksumzp2 = _mm_or_si128(vzpprodksumhi2, vzpprodksumlo2);
const __m128i vksumzp3 = _mm_or_si128(vzpprodksumhi3, vzpprodksumlo3);
const __m128i vksum010 = _mm_unpacklo_epi32(vksumzp0, vzero);
const __m128i vksum230 = _mm_unpackhi_epi32(vksumzp0, vzero);
const __m128i vksum011 = _mm_unpacklo_epi32(vksumzp1, vzero);
const __m128i vksum231 = _mm_unpackhi_epi32(vksumzp1, vzero);
const __m128i vksum012 = _mm_unpacklo_epi32(vksumzp2, vzero);
const __m128i vksum232 = _mm_unpackhi_epi32(vksumzp2, vzero);
const __m128i vksum013 = _mm_unpacklo_epi32(vksumzp3, vzero);
const __m128i vksum233 = _mm_unpackhi_epi32(vksumzp3, vzero);
__m128i vacc0x0 = _mm_unpacklo_epi64(vksum010, vzero);
__m128i vacc0x1 = _mm_unpackhi_epi64(vksum010, vzero);
__m128i vacc1x0 = _mm_unpacklo_epi64(vksum011, vzero);
__m128i vacc1x1 = _mm_unpackhi_epi64(vksum011, vzero);
__m128i vacc2x0 = _mm_unpacklo_epi64(vksum012, vzero);
__m128i vacc2x1 = _mm_unpackhi_epi64(vksum012, vzero);
__m128i vacc3x0 = _mm_unpacklo_epi64(vksum013, vzero);
__m128i vacc3x1 = _mm_unpackhi_epi64(vksum013, vzero);
__m128i vacc0x2 = _mm_unpacklo_epi64(vksum230, vzero);
__m128i vacc0x3 = _mm_unpackhi_epi64(vksum230, vzero);
__m128i vacc1x2 = _mm_unpacklo_epi64(vksum231, vzero);
__m128i vacc1x3 = _mm_unpackhi_epi64(vksum231, vzero);
__m128i vacc2x2 = _mm_unpacklo_epi64(vksum232, vzero);
__m128i vacc2x3 = _mm_unpackhi_epi64(vksum232, vzero);
__m128i vacc3x2 = _mm_unpacklo_epi64(vksum233, vzero);
__m128i vacc3x3 = _mm_unpackhi_epi64(vksum233, vzero);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
const __m128i vacc3x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x0, vacc3x2), _mm_unpackhi_epi32(vacc3x0, vacc3x2));
const __m128i vacc3x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x1, vacc3x3), _mm_unpackhi_epi32(vacc3x1, vacc3x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128i vacc3x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc3x02, vacc3x13), _mm_unpackhi_epi32(vacc3x02, vacc3x13));
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,696
| 46.733083
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,516
| 41.297778
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc3x0 = _mm_add_epi32(vacc3x0, _mm_madd_epi16(vxa3, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
vacc3x1 = _mm_add_epi32(vacc3x1, _mm_madd_epi16(vxa3, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc3x2 = _mm_add_epi32(vacc3x2, _mm_madd_epi16(vxa3, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
vacc3x3 = _mm_add_epi32(vacc3x3, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,634
| 41.074236
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
vacc3x0 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0);
vacc3x1 = _mm_maddd_epi16(vxa3, vxb1, vacc3x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
vacc3x2 = _mm_maddd_epi16(vxa3, vxb2, vacc3x2);
vacc3x3 = _mm_maddd_epi16(vxa3, vxb3, vacc3x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,343
| 39.803493
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x4c8-minmax-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
const __m128i vksum = _mm_load_si128((const __m128i*) w);
const __m128i vzero = _mm_setzero_si128();
const __m128i vzp01 = _mm_loadu_si128((const __m128i*) quantization_params);
const __m128i vzp0 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp1 = _mm_shuffle_epi32(vzp01, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vzp23 = _mm_loadu_si128((const __m128i*) (quantization_params + 2));
const __m128i vzp2 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(0, 0, 0, 0));
const __m128i vzp3 = _mm_shuffle_epi32(vzp23, _MM_SHUFFLE(2, 2, 2, 2));
const __m128i vksumzp0 = _mm_mullo_epi32(vksum, vzp0);
const __m128i vksumzp1 = _mm_mullo_epi32(vksum, vzp1);
const __m128i vksumzp2 = _mm_mullo_epi32(vksum, vzp2);
const __m128i vksumzp3 = _mm_mullo_epi32(vksum, vzp3);
__m128i vacc0x0 = _mm_blend_epi16(vksumzp0, vzero, 0xFC);
__m128i vacc0x1 = _mm_blend_epi16(vksumzp0, vzero, 0xF3);
__m128i vacc0x2 = _mm_blend_epi16(vksumzp0, vzero, 0xCF);
__m128i vacc0x3 = _mm_blend_epi16(vksumzp0, vzero, 0x3F);
__m128i vacc1x0 = _mm_blend_epi16(vksumzp1, vzero, 0xFC);
__m128i vacc1x1 = _mm_blend_epi16(vksumzp1, vzero, 0xF3);
__m128i vacc1x2 = _mm_blend_epi16(vksumzp1, vzero, 0xCF);
__m128i vacc1x3 = _mm_blend_epi16(vksumzp1, vzero, 0x3F);
__m128i vacc2x0 = _mm_blend_epi16(vksumzp2, vzero, 0xFC);
__m128i vacc2x1 = _mm_blend_epi16(vksumzp2, vzero, 0xF3);
__m128i vacc2x2 = _mm_blend_epi16(vksumzp2, vzero, 0xCF);
__m128i vacc2x3 = _mm_blend_epi16(vksumzp2, vzero, 0x3F);
__m128i vacc3x0 = _mm_blend_epi16(vksumzp3, vzero, 0xFC);
__m128i vacc3x1 = _mm_blend_epi16(vksumzp3, vzero, 0xF3);
__m128i vacc3x2 = _mm_blend_epi16(vksumzp3, vzero, 0xCF);
__m128i vacc3x3 = _mm_blend_epi16(vksumzp3, vzero, 0x3F);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
vacc3x0 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
vacc3x1 = _mm_maddd_epi16(vxa3, vxb1, vacc3x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
vacc3x2 = _mm_maddd_epi16(vxa3, vxb2, vacc3x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
vacc3x3 = _mm_maddd_epi16(vxa3, vxb3, vacc3x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
const __m128i vacc3x01 = _mm_hadd_epi32(vacc3x0, vacc3x1);
const __m128i vacc3x23 = _mm_hadd_epi32(vacc3x2, vacc3x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128i vacc3x0123 = _mm_hadd_epi32(vacc3x01, vacc3x23);
__m128 vout0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vout1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vout2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vout3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale01 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) quantization_params));
const __m128 vscale0 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale1 = _mm_shuffle_ps(vscale01, vscale01, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vscale23 = _mm_castsi128_ps(_mm_loadu_si128((const __m128i*) (quantization_params + 2)));
const __m128 vscale2 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vscale3 = _mm_shuffle_ps(vscale23, vscale23, _MM_SHUFFLE(3, 3, 3, 3));
vout0x0123 = _mm_mul_ps(vout0x0123, vscale0);
vout1x0123 = _mm_mul_ps(vout1x0123, vscale1);
vout2x0123 = _mm_mul_ps(vout2x0123, vscale2);
vout3x0123 = _mm_mul_ps(vout3x0123, vscale3);
const __m128 vbias0123 = _mm_load_ps(w); w = (const float*) w + 4;
vout0x0123 = _mm_add_ps(vout0x0123, vbias0123);
vout1x0123 = _mm_add_ps(vout1x0123, vbias0123);
vout2x0123 = _mm_add_ps(vout2x0123, vbias0123);
vout3x0123 = _mm_add_ps(vout3x0123, vbias0123);
const __m128 vmax = _mm_load_ps(¶ms->sse.max[0]);
const __m128 vmin = _mm_load_ps(¶ms->sse.min[0]);
vout0x0123 = _mm_max_ps(vout0x0123, vmin);
vout0x0123 = _mm_min_ps(vout0x0123, vmax);
vout1x0123 = _mm_max_ps(vout1x0123, vmin);
vout1x0123 = _mm_min_ps(vout1x0123, vmax);
vout2x0123 = _mm_max_ps(vout2x0123, vmin);
vout2x0123 = _mm_min_ps(vout2x0123, vmax);
vout3x0123 = _mm_max_ps(vout3x0123, vmin);
vout3x0123 = _mm_min_ps(vout3x0123, vmax);
if XNN_LIKELY(nc >= 4) {
_mm_storeu_ps(c3, vout3x0123);
_mm_storeu_ps(c2, vout2x0123);
_mm_storeu_ps(c1, vout1x0123);
_mm_storeu_ps(c0, vout0x0123);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vout3x0123);
vout3x0123 = _mm_unpackhi_ps(vout3x0123, vout3x0123);
c3 += 2;
_mm_storel_pi((__m64*) c2, vout2x0123);
vout2x0123 = _mm_unpackhi_ps(vout2x0123, vout2x0123);
c2 += 2;
_mm_storel_pi((__m64*) c1, vout1x0123);
vout1x0123 = _mm_unpackhi_ps(vout1x0123, vout1x0123);
c1 += 2;
_mm_storel_pi((__m64*) c0, vout0x0123);
vout0x0123 = _mm_unpackhi_ps(vout0x0123, vout0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vout3x0123);
_mm_store_ss(c2, vout2x0123);
_mm_store_ss(c1, vout1x0123);
_mm_store_ss(c0, vout0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,461
| 39.609442
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qd8-f32-qs8w-gemm/gen/qd8-f32-qs8w-gemm-4x8c4-minmax-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qd8_f32_qs8w_gemm_minmax_ukernel_4x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
float* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int32x4_t vzp0 = vdupq_n_s32(quantization_params[0].zero_point);
const int32x4_t vzp1 = vdupq_n_s32(quantization_params[1].zero_point);
const int32x4_t vzp2 = vdupq_n_s32(quantization_params[2].zero_point);
const int32x4_t vzp3 = vdupq_n_s32(quantization_params[3].zero_point);
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
vacc0x0123 = vmulq_s32(vacc0x0123, vzp0);
vacc0x4567 = vmulq_s32(vacc0x4567, vzp0);
vacc1x0123 = vmulq_s32(vacc1x0123, vzp1);
vacc1x4567 = vmulq_s32(vacc1x4567, vzp1);
vacc2x0123 = vmulq_s32(vacc2x0123, vzp2);
vacc2x4567 = vmulq_s32(vacc2x4567, vzp2);
vacc3x0123 = vmulq_s32(vacc3x0123, vzp3);
vacc3x4567 = vmulq_s32(vacc3x4567, vzp3);
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 4x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x8 * 8x8 --> 4x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x4 * 4x8 --> 4x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
}
float32x4_t vout0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vout0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vout1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vout1x4567 = vcvtq_f32_s32(vacc1x4567);
float32x4_t vout2x0123 = vcvtq_f32_s32(vacc2x0123);
float32x4_t vout2x4567 = vcvtq_f32_s32(vacc2x4567);
float32x4_t vout3x0123 = vcvtq_f32_s32(vacc3x0123);
float32x4_t vout3x4567 = vcvtq_f32_s32(vacc3x4567);
const float32x4_t vscale0 = vdupq_n_f32(quantization_params[0].scale);
vout0x0123 = vmulq_f32(vout0x0123, vscale0);
vout0x4567 = vmulq_f32(vout0x4567, vscale0);
const float32x4_t vscale1 = vdupq_n_f32(quantization_params[1].scale);
vout1x0123 = vmulq_f32(vout1x0123, vscale1);
vout1x4567 = vmulq_f32(vout1x4567, vscale1);
const float32x4_t vscale2 = vdupq_n_f32(quantization_params[2].scale);
vout2x0123 = vmulq_f32(vout2x0123, vscale2);
vout2x4567 = vmulq_f32(vout2x4567, vscale2);
const float32x4_t vscale3 = vdupq_n_f32(quantization_params[3].scale);
vout3x0123 = vmulq_f32(vout3x0123, vscale3);
vout3x4567 = vmulq_f32(vout3x4567, vscale3);
const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4;
const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4;
vout0x0123 = vaddq_f32(vout0x0123, vbias0123);
vout0x4567 = vaddq_f32(vout0x4567, vbias4567);
vout1x0123 = vaddq_f32(vout1x0123, vbias0123);
vout1x4567 = vaddq_f32(vout1x4567, vbias4567);
vout2x0123 = vaddq_f32(vout2x0123, vbias0123);
vout2x4567 = vaddq_f32(vout2x4567, vbias4567);
vout3x0123 = vaddq_f32(vout3x0123, vbias0123);
vout3x4567 = vaddq_f32(vout3x4567, vbias4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vout0x0123 = vmaxq_f32(vout0x0123, vmin);
vout0x0123 = vminq_f32(vout0x0123, vmax);
vout0x4567 = vmaxq_f32(vout0x4567, vmin);
vout0x4567 = vminq_f32(vout0x4567, vmax);
vout1x0123 = vmaxq_f32(vout1x0123, vmin);
vout1x0123 = vminq_f32(vout1x0123, vmax);
vout1x4567 = vmaxq_f32(vout1x4567, vmin);
vout1x4567 = vminq_f32(vout1x4567, vmax);
vout2x0123 = vmaxq_f32(vout2x0123, vmin);
vout2x0123 = vminq_f32(vout2x0123, vmax);
vout2x4567 = vmaxq_f32(vout2x4567, vmin);
vout2x4567 = vminq_f32(vout2x4567, vmax);
vout3x0123 = vmaxq_f32(vout3x0123, vmin);
vout3x0123 = vminq_f32(vout3x0123, vmax);
vout3x4567 = vmaxq_f32(vout3x4567, vmin);
vout3x4567 = vminq_f32(vout3x4567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(&c3[0], vout3x0123);
vst1q_f32(&c3[4], vout3x4567);
vst1q_f32(&c2[0], vout2x0123);
vst1q_f32(&c2[4], vout2x4567);
vst1q_f32(&c1[0], vout1x0123);
vst1q_f32(&c1[4], vout1x4567);
vst1q_f32(&c0[0], vout0x0123);
vst1q_f32(&c0[4], vout0x4567);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vout3x0123); c3 += 4;
vout3x0123 = vout3x4567;
vst1q_f32(c2, vout2x0123); c2 += 4;
vout2x0123 = vout2x4567;
vst1q_f32(c1, vout1x0123); c1 += 4;
vout1x0123 = vout1x4567;
vst1q_f32(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
float32x2_t vout3x01 = vget_low_f32(vout3x0123);
float32x2_t vout2x01 = vget_low_f32(vout2x0123);
float32x2_t vout1x01 = vget_low_f32(vout1x0123);
float32x2_t vout0x01 = vget_low_f32(vout0x0123);
if (nc & 2) {
vst1_f32(c3, vout3x01); c3 += 2;
vst1_f32(c2, vout2x01); c2 += 2;
vst1_f32(c1, vout1x01); c1 += 2;
vst1_f32(c0, vout0x01); c0 += 2;
vout3x01 = vget_high_f32(vout3x0123);
vout2x01 = vget_high_f32(vout2x0123);
vout1x01 = vget_high_f32(vout1x0123);
vout0x01 = vget_high_f32(vout0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vout3x01, 0);
vst1_lane_f32(c2, vout2x01, 0);
vst1_lane_f32(c1, vout1x01, 0);
vst1_lane_f32(c0, vout0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,614
| 41.123016
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__avx_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->sse4.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->sse4.shuffle67);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
const __m128i vx2 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
__m128i vacc2lo = _mm_shuffle_epi8(vx2, vshuffle01);
__m128i vacc2hi = _mm_shuffle_epi8(vx2, vshuffle23);
__m128i vacc3lo = _mm_shuffle_epi8(vx2, vshuffle45);
__m128i vacc3hi = _mm_shuffle_epi8(vx2, vshuffle67);
vacc0lo = _mm_mul_epi32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epi32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epi32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epi32(vacc1hi, vmultiplier);
vacc2lo = _mm_mul_epi32(vacc2lo, vmultiplier);
vacc2hi = _mm_mul_epi32(vacc2hi, vmultiplier);
vacc3lo = _mm_mul_epi32(vacc3lo, vmultiplier);
vacc3hi = _mm_mul_epi32(vacc3hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
vacc2lo = _mm_add_epi64(vacc2lo, vbias);
vacc2hi = _mm_add_epi64(vacc2hi, vbias);
vacc3lo = _mm_add_epi64(vacc3lo, vbias);
vacc3hi = _mm_add_epi64(vacc3hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc2 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc2lo), _mm_castsi128_ps(vacc2hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc3lo), _mm_castsi128_ps(vacc3hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc2 = _mm_packs_epi32(vacc2, vacc3);
// Pack 16 shorts into 16 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc2);
_mm_storeu_si128((__m128i*) output, vy0); output += 16;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,202
| 41.300813
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-avx-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__avx_x4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,716
| 35.226667
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__avx_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->sse4.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->sse4.shuffle67);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
vacc0lo = _mm_mul_epi32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epi32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epi32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epi32(vacc1hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
// Pack 8 shorts into 8 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc0);
_mm_storel_epi64((__m128i*) output, vy0); output += 8;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,194
| 38.205607
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__neon_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
const int16x8_t vx0 = vld1q_s16(input); input += 8;
const int16x8_t vx1 = vld1q_s16(input); input += 8;
int32x4_t vacc_lo0 = vshll_n_s16(vget_low_s16(vx0), 15);
int32x4_t vacc_hi0 = vshll_n_s16(vget_high_s16(vx0), 15);
int32x4_t vacc_lo1 = vshll_n_s16(vget_low_s16(vx1), 15);
int32x4_t vacc_hi1 = vshll_n_s16(vget_high_s16(vx1), 15);
vacc_lo0 = vqrdmulhq_s32(vacc_lo0, vmultiplier);
vacc_hi0 = vqrdmulhq_s32(vacc_hi0, vmultiplier);
vacc_lo1 = vqrdmulhq_s32(vacc_lo1, vmultiplier);
vacc_hi1 = vqrdmulhq_s32(vacc_hi1, vmultiplier);
int16x8_t vacc0 = vcombine_s16(vqmovn_s32(vacc_lo0), vqmovn_s32(vacc_hi0));
int16x8_t vacc1 = vcombine_s16(vqmovn_s32(vacc_lo1), vqmovn_s32(vacc_hi1));
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
const int8x8_t vy0 = vqmovn_s16(vacc0);
const int8x8_t vy1 = vqmovn_s16(vacc1);
vst1_s8(output, vy0); output += 8;
vst1_s8(output, vy1); output += 8;
}
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vx = vld1q_s16(input); input += 8;
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 7 * sizeof(int16_t));
const int16x8_t vx = vld1q_s16(input);
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int16_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int16_t))) {
vst1_lane_s8((void*) output, vy, 0);
}
}
}
| 3,439
| 35.989247
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__neon_x32(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(int16_t); batch -= 32 * sizeof(int16_t)) {
const int16x8_t vx0 = vld1q_s16(input); input += 8;
const int16x8_t vx1 = vld1q_s16(input); input += 8;
const int16x8_t vx2 = vld1q_s16(input); input += 8;
const int16x8_t vx3 = vld1q_s16(input); input += 8;
int32x4_t vacc_lo0 = vshll_n_s16(vget_low_s16(vx0), 15);
int32x4_t vacc_hi0 = vshll_n_s16(vget_high_s16(vx0), 15);
int32x4_t vacc_lo1 = vshll_n_s16(vget_low_s16(vx1), 15);
int32x4_t vacc_hi1 = vshll_n_s16(vget_high_s16(vx1), 15);
int32x4_t vacc_lo2 = vshll_n_s16(vget_low_s16(vx2), 15);
int32x4_t vacc_hi2 = vshll_n_s16(vget_high_s16(vx2), 15);
int32x4_t vacc_lo3 = vshll_n_s16(vget_low_s16(vx3), 15);
int32x4_t vacc_hi3 = vshll_n_s16(vget_high_s16(vx3), 15);
vacc_lo0 = vqrdmulhq_s32(vacc_lo0, vmultiplier);
vacc_hi0 = vqrdmulhq_s32(vacc_hi0, vmultiplier);
vacc_lo1 = vqrdmulhq_s32(vacc_lo1, vmultiplier);
vacc_hi1 = vqrdmulhq_s32(vacc_hi1, vmultiplier);
vacc_lo2 = vqrdmulhq_s32(vacc_lo2, vmultiplier);
vacc_hi2 = vqrdmulhq_s32(vacc_hi2, vmultiplier);
vacc_lo3 = vqrdmulhq_s32(vacc_lo3, vmultiplier);
vacc_hi3 = vqrdmulhq_s32(vacc_hi3, vmultiplier);
int16x8_t vacc0 = vcombine_s16(vqmovn_s32(vacc_lo0), vqmovn_s32(vacc_hi0));
int16x8_t vacc1 = vcombine_s16(vqmovn_s32(vacc_lo1), vqmovn_s32(vacc_hi1));
int16x8_t vacc2 = vcombine_s16(vqmovn_s32(vacc_lo2), vqmovn_s32(vacc_hi2));
int16x8_t vacc3 = vcombine_s16(vqmovn_s32(vacc_lo3), vqmovn_s32(vacc_hi3));
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
vacc2 = vqaddq_s16(vacc2, voutput_zero_point);
vacc3 = vqaddq_s16(vacc3, voutput_zero_point);
const int8x8_t vy0 = vqmovn_s16(vacc0);
const int8x8_t vy1 = vqmovn_s16(vacc1);
const int8x8_t vy2 = vqmovn_s16(vacc2);
const int8x8_t vy3 = vqmovn_s16(vacc3);
vst1_s8(output, vy0); output += 8;
vst1_s8(output, vy1); output += 8;
vst1_s8(output, vy2); output += 8;
vst1_s8(output, vy3); output += 8;
}
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vx = vld1q_s16(input); input += 8;
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 7 * sizeof(int16_t));
const int16x8_t vx = vld1q_s16(input);
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int16_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int16_t))) {
vst1_lane_s8((void*) output, vy, 0);
}
}
}
| 4,437
| 38.981982
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__neon_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vx = vld1q_s16(input); input += 8;
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 7 * sizeof(int16_t));
const int16x8_t vx = vld1q_s16(input);
int32x4_t vacc_lo = vshll_n_s16(vget_low_s16(vx), 15);
int32x4_t vacc_hi = vshll_n_s16(vget_high_s16(vx), 15);
vacc_lo = vqrdmulhq_s32(vacc_lo, vmultiplier);
vacc_hi = vqrdmulhq_s32(vacc_hi, vmultiplier);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int16_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int16_t))) {
vst1_lane_s8((void*) output, vy, 0);
}
}
}
| 2,357
| 34.19403
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__scalar_x1(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vmultiplier = params->scalar.multiplier;
const int64_t vbias = (int64_t) params->scalar.bias;
do {
const int32_t vx = (int32_t) *input++;
int32_t vout = (int32_t) math_asr_s64(math_mulext_s32(vx, vmultiplier) + vbias, 16);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int16_t);
} while (batch != 0);
}
| 1,068
| 25.073171
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__scalar_x2(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vmultiplier = params->scalar.multiplier;
const int64_t vbias = (int64_t) params->scalar.bias;
for (; batch >= 2 * sizeof(int16_t); batch -= 2 * sizeof(int16_t)) {
const int32_t vx0 = (int32_t) input[0];
const int32_t vx1 = (int32_t) input[1];
input += 2;
int32_t vout0 = (int32_t) math_asr_s64(math_mulext_s32(vx0, vmultiplier) + vbias, 16);
int32_t vout1 = (int32_t) math_asr_s64(math_mulext_s32(vx1, vmultiplier) + vbias, 16);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t vx = (int32_t) *input++;
int32_t vout = (int32_t) math_asr_s64(math_mulext_s32(vx, vmultiplier) + vbias, 16);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int16_t);
} while (batch != 0);
}
}
| 1,721
| 26.774194
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__scalar_x4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vmultiplier = params->scalar.multiplier;
const int64_t vbias = (int64_t) params->scalar.bias;
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const int32_t vx0 = (int32_t) input[0];
const int32_t vx1 = (int32_t) input[1];
const int32_t vx2 = (int32_t) input[2];
const int32_t vx3 = (int32_t) input[3];
input += 4;
int32_t vout0 = (int32_t) math_asr_s64(math_mulext_s32(vx0, vmultiplier) + vbias, 16);
int32_t vout1 = (int32_t) math_asr_s64(math_mulext_s32(vx1, vmultiplier) + vbias, 16);
int32_t vout2 = (int32_t) math_asr_s64(math_mulext_s32(vx2, vmultiplier) + vbias, 16);
int32_t vout3 = (int32_t) math_asr_s64(math_mulext_s32(vx3, vmultiplier) + vbias, 16);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout2 = math_max_s32(vout2, -128);
vout3 = math_max_s32(vout3, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
vout2 = math_min_s32(vout2, 127);
vout3 = math_min_s32(vout3, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t vx = (int32_t) *input++;
int32_t vout = (int32_t) math_asr_s64(math_mulext_s32(vx, vmultiplier) + vbias, 16);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int16_t);
} while (batch != 0);
}
}
| 2,209
| 29.694444
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse2_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->sse2.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
__m128i vx2 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Add 0x8000 to convert signed inputs to unsigned.
vx0 = _mm_xor_si128(vx0, vinput_bias);
vx2 = _mm_xor_si128(vx2, vinput_bias);
// Move int16 to upper part of int32
__m128i vacce0 = _mm_unpacklo_epi16(vzero, vx0);
__m128i vacce1 = _mm_unpackhi_epi16(vzero, vx0);
__m128i vacce2 = _mm_unpacklo_epi16(vzero, vx2);
__m128i vacce3 = _mm_unpackhi_epi16(vzero, vx2);
__m128i vacco0 = _mm_shuffle_epi32(vacce0, _MM_SHUFFLE(3, 3, 1, 1));
__m128i vacco1 = _mm_shuffle_epi32(vacce1, _MM_SHUFFLE(3, 3, 1, 1));
__m128i vacco2 = _mm_shuffle_epi32(vacce2, _MM_SHUFFLE(3, 3, 1, 1));
__m128i vacco3 = _mm_shuffle_epi32(vacce3, _MM_SHUFFLE(3, 3, 1, 1));
vacce0 = _mm_mul_epu32(vacce0, vmultiplier);
vacco0 = _mm_mul_epu32(vacco0, vmultiplier);
vacce1 = _mm_mul_epu32(vacce1, vmultiplier);
vacco1 = _mm_mul_epu32(vacco1, vmultiplier);
vacce2 = _mm_mul_epu32(vacce2, vmultiplier);
vacco2 = _mm_mul_epu32(vacco2, vmultiplier);
vacce3 = _mm_mul_epu32(vacce3, vmultiplier);
vacco3 = _mm_mul_epu32(vacco3, vmultiplier);
vacce0 = _mm_add_epi64(vacce0, vbias);
vacco0 = _mm_add_epi64(vacco0, vbias);
vacce1 = _mm_add_epi64(vacce1, vbias);
vacco1 = _mm_add_epi64(vacco1, vbias);
vacce2 = _mm_add_epi64(vacce2, vbias);
vacco2 = _mm_add_epi64(vacco2, vbias);
vacce3 = _mm_add_epi64(vacce3, vbias);
vacco3 = _mm_add_epi64(vacco3, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce0),
_mm_castsi128_ps(vacco0),
_MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce1),
_mm_castsi128_ps(vacco1),
_MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc2 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce2),
_mm_castsi128_ps(vacco2),
_MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce3),
_mm_castsi128_ps(vacco3),
_MM_SHUFFLE(3, 1, 3, 1)));
// Shuffle order from 3,1,2,0 to 3,2,1,0
vacc0 = _mm_shuffle_epi32(vacc0, _MM_SHUFFLE(3, 1, 2, 0));
vacc1 = _mm_shuffle_epi32(vacc1, _MM_SHUFFLE(3, 1, 2, 0));
vacc2 = _mm_shuffle_epi32(vacc2, _MM_SHUFFLE(3, 1, 2, 0));
vacc3 = _mm_shuffle_epi32(vacc3, _MM_SHUFFLE(3, 1, 2, 0));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc2 = _mm_packs_epi32(vacc2, vacc3);
// Pack 16 shorts into 16 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc2);
_mm_storeu_si128((__m128i*) output, vy0); output += 16;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 6,239
| 41.739726
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse2_x4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->sse2.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 3,015
| 36.234568
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse2_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->sse2.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Add 0x8000 to convert signed inputs to unsigned.
vx0 = _mm_xor_si128(vx0, vinput_bias);
// Move int16 to upper part of int32
__m128i vacce0 = _mm_unpacklo_epi16(vzero, vx0);
__m128i vacce1 = _mm_unpackhi_epi16(vzero, vx0);
__m128i vacco0 = _mm_shuffle_epi32(vacce0, _MM_SHUFFLE(3, 3, 1, 1));
__m128i vacco1 = _mm_shuffle_epi32(vacce1, _MM_SHUFFLE(3, 3, 1, 1));
vacce0 = _mm_mul_epu32(vacce0, vmultiplier);
vacco0 = _mm_mul_epu32(vacco0, vmultiplier);
vacce1 = _mm_mul_epu32(vacce1, vmultiplier);
vacco1 = _mm_mul_epu32(vacco1, vmultiplier);
vacce0 = _mm_add_epi64(vacce0, vbias);
vacco0 = _mm_add_epi64(vacco0, vbias);
vacce1 = _mm_add_epi64(vacce1, vbias);
vacco1 = _mm_add_epi64(vacco1, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce0),
_mm_castsi128_ps(vacco0),
_MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce1),
_mm_castsi128_ps(vacco1),
_MM_SHUFFLE(3, 1, 3, 1)));
// Shuffle order from 3,1,2,0 to 3,2,1,0
vacc0 = _mm_shuffle_epi32(vacc0, _MM_SHUFFLE(3, 1, 2, 0));
vacc1 = _mm_shuffle_epi32(vacc1, _MM_SHUFFLE(3, 1, 2, 0));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
// Pack 8 shorts into 8 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc0);
_mm_storel_epi64((__m128i*) output, vy0); output += 8;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacce = _mm_unpacklo_epi16(vzero, vx);
__m128i vacco = _mm_shuffle_epi32(vacce, _MM_SHUFFLE(3, 3, 1, 1));
vacce = _mm_mul_epu32(vacce, vmultiplier);
vacco = _mm_mul_epu32(vacco, vmultiplier);
vacce = _mm_add_epi64(vacce, vbias);
vacco = _mm_add_epi64(vacco, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacce), _mm_castsi128_ps(vacco),
_MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_shuffle_epi32(vacc, _MM_SHUFFLE(3, 1, 2, 0));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 4,828
| 38.260163
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse41_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->sse4.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->sse4.shuffle67);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
const __m128i vx2 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
__m128i vacc2lo = _mm_shuffle_epi8(vx2, vshuffle01);
__m128i vacc2hi = _mm_shuffle_epi8(vx2, vshuffle23);
__m128i vacc3lo = _mm_shuffle_epi8(vx2, vshuffle45);
__m128i vacc3hi = _mm_shuffle_epi8(vx2, vshuffle67);
vacc0lo = _mm_mul_epi32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epi32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epi32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epi32(vacc1hi, vmultiplier);
vacc2lo = _mm_mul_epi32(vacc2lo, vmultiplier);
vacc2hi = _mm_mul_epi32(vacc2hi, vmultiplier);
vacc3lo = _mm_mul_epi32(vacc3lo, vmultiplier);
vacc3hi = _mm_mul_epi32(vacc3hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
vacc2lo = _mm_add_epi64(vacc2lo, vbias);
vacc2hi = _mm_add_epi64(vacc2hi, vbias);
vacc3lo = _mm_add_epi64(vacc3lo, vbias);
vacc3hi = _mm_add_epi64(vacc3hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc2 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc2lo), _mm_castsi128_ps(vacc2hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc3lo), _mm_castsi128_ps(vacc3hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc2 = _mm_packs_epi32(vacc2, vacc3);
// Pack 16 shorts into 16 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc2);
_mm_storeu_si128((__m128i*) output, vy0); output += 16;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,204
| 41.317073
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse41_x4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,718
| 35.253333
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__sse41_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse4.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->sse4.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->sse4.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->sse4.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->sse4.shuffle67);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
vacc0lo = _mm_mul_epi32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epi32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epi32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epi32(vacc1hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
// Pack 8 shorts into 8 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc0);
_mm_storel_epi64((__m128i*) output, vy0); output += 8;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epi32(vacclo, vmultiplier);
vacchi = _mm_mul_epi32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,196
| 38.224299
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__ssse3_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->ssse3.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->ssse3.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->ssse3.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->ssse3.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->ssse3.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->ssse3.shuffle67);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
__m128i vx2 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Add 0x8000 to convert signed inputs to unsigned.
vx0 = _mm_xor_si128(vx0, vinput_bias);
vx2 = _mm_xor_si128(vx2, vinput_bias);
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
__m128i vacc2lo = _mm_shuffle_epi8(vx2, vshuffle01);
__m128i vacc2hi = _mm_shuffle_epi8(vx2, vshuffle23);
__m128i vacc3lo = _mm_shuffle_epi8(vx2, vshuffle45);
__m128i vacc3hi = _mm_shuffle_epi8(vx2, vshuffle67);
vacc0lo = _mm_mul_epu32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epu32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epu32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epu32(vacc1hi, vmultiplier);
vacc2lo = _mm_mul_epu32(vacc2lo, vmultiplier);
vacc2hi = _mm_mul_epu32(vacc2hi, vmultiplier);
vacc3lo = _mm_mul_epu32(vacc3lo, vmultiplier);
vacc3hi = _mm_mul_epu32(vacc3hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
vacc2lo = _mm_add_epi64(vacc2lo, vbias);
vacc2hi = _mm_add_epi64(vacc2hi, vbias);
vacc3lo = _mm_add_epi64(vacc3lo, vbias);
vacc3hi = _mm_add_epi64(vacc3hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc2 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc2lo), _mm_castsi128_ps(vacc2hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc3lo), _mm_castsi128_ps(vacc3hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc2 = _mm_packs_epi32(vacc2, vacc3);
// Pack 16 shorts into 16 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc2);
_mm_storeu_si128((__m128i*) output, vy0); output += 16;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadu_si128((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 5,548
| 41.037879
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-ssse3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__ssse3_x4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->ssse3.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->ssse3.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->ssse3.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->ssse3.shuffle23);
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadu_si128((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 2,929
| 35.625
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-ssse3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__ssse3_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_bias = _mm_load_si128((const __m128i*) params->ssse3.input_bias);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->ssse3.bias);
const __m128i vshuffle01 = _mm_load_si128((const __m128i*) params->ssse3.shuffle01);
const __m128i vshuffle23 = _mm_load_si128((const __m128i*) params->ssse3.shuffle23);
const __m128i vshuffle45 = _mm_load_si128((const __m128i*) params->ssse3.shuffle45);
const __m128i vshuffle67 = _mm_load_si128((const __m128i*) params->ssse3.shuffle67);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) input); input += 8;
// Add 0x8000 to convert signed inputs to unsigned.
vx0 = _mm_xor_si128(vx0, vinput_bias);
// Move int16 to upper part of int32
__m128i vacc0lo = _mm_shuffle_epi8(vx0, vshuffle01);
__m128i vacc0hi = _mm_shuffle_epi8(vx0, vshuffle23);
__m128i vacc1lo = _mm_shuffle_epi8(vx0, vshuffle45);
__m128i vacc1hi = _mm_shuffle_epi8(vx0, vshuffle67);
vacc0lo = _mm_mul_epu32(vacc0lo, vmultiplier);
vacc0hi = _mm_mul_epu32(vacc0hi, vmultiplier);
vacc1lo = _mm_mul_epu32(vacc1lo, vmultiplier);
vacc1hi = _mm_mul_epu32(vacc1hi, vmultiplier);
vacc0lo = _mm_add_epi64(vacc0lo, vbias);
vacc0hi = _mm_add_epi64(vacc0hi, vbias);
vacc1lo = _mm_add_epi64(vacc1lo, vbias);
vacc1hi = _mm_add_epi64(vacc1hi, vbias);
__m128i vacc0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc0lo), _mm_castsi128_ps(vacc0hi), _MM_SHUFFLE(3, 1, 3, 1)));
__m128i vacc1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacc1lo), _mm_castsi128_ps(vacc1hi), _MM_SHUFFLE(3, 1, 3, 1)));
// Pack 8 ints into 8 shorts
vacc0 = _mm_packs_epi32(vacc0, vacc1);
// Pack 8 shorts into 8 bytes
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc0);
_mm_storel_epi64((__m128i*) output, vy0); output += 8;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) input); input += 4;
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storeu_si32(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
__m128i vx = _mm_loadu_si128((const __m128i*) input);
vx = _mm_xor_si128(vx, vinput_bias);
__m128i vacclo = _mm_shuffle_epi8(vx, vshuffle01);
__m128i vacchi = _mm_shuffle_epi8(vx, vshuffle23);
vacclo = _mm_mul_epu32(vacclo, vmultiplier);
vacchi = _mm_mul_epu32(vacchi, vmultiplier);
vacclo = _mm_add_epi64(vacclo, vbias);
vacchi = _mm_add_epi64(vacchi, vbias);
__m128i vacc = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(vacclo), _mm_castsi128_ps(vacchi), _MM_SHUFFLE(3, 1, 3, 1)));
vacc = _mm_packs_epi32(vacc, vacc);
const __m128i vy = _mm_packs_epi16(vacc, vacc);
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int16_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 4,503
| 38.165217
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t vbias = wasm_v128_load64_splat(¶ms->wasmsimd.bias);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
const v128_t vx0 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx1 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx2 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx3 = wasm_i32x4_load16x4(input); input += 4;
v128_t vacc0lo = wasm_i64x2_extmul_low_i32x4(vx0, vmultiplier);
v128_t vacc0hi = wasm_i64x2_extmul_high_i32x4(vx0, vmultiplier);
v128_t vacc1lo = wasm_i64x2_extmul_low_i32x4(vx1, vmultiplier);
v128_t vacc1hi = wasm_i64x2_extmul_high_i32x4(vx1, vmultiplier);
v128_t vacc2lo = wasm_i64x2_extmul_low_i32x4(vx2, vmultiplier);
v128_t vacc2hi = wasm_i64x2_extmul_high_i32x4(vx2, vmultiplier);
v128_t vacc3lo = wasm_i64x2_extmul_low_i32x4(vx3, vmultiplier);
v128_t vacc3hi = wasm_i64x2_extmul_high_i32x4(vx3, vmultiplier);
vacc0lo = wasm_i64x2_add(vacc0lo, vbias);
vacc0hi = wasm_i64x2_add(vacc0hi, vbias);
vacc1lo = wasm_i64x2_add(vacc1lo, vbias);
vacc1hi = wasm_i64x2_add(vacc1hi, vbias);
vacc2lo = wasm_i64x2_add(vacc2lo, vbias);
vacc2hi = wasm_i64x2_add(vacc2hi, vbias);
vacc3lo = wasm_i64x2_add(vacc3lo, vbias);
vacc3hi = wasm_i64x2_add(vacc3hi, vbias);
vacc0lo = wasm_i64x2_shr(vacc0lo, 16);
vacc0hi = wasm_i64x2_shr(vacc0hi, 16);
vacc1lo = wasm_i64x2_shr(vacc1lo, 16);
vacc1hi = wasm_i64x2_shr(vacc1hi, 16);
vacc2lo = wasm_i64x2_shr(vacc2lo, 16);
vacc2hi = wasm_i64x2_shr(vacc2hi, 16);
vacc3lo = wasm_i64x2_shr(vacc3lo, 16);
vacc3hi = wasm_i64x2_shr(vacc3hi, 16);
v128_t vacc0 = wasm_v32x4_shuffle(vacc0lo, vacc0hi, 0, 2, 4, 6);
v128_t vacc1 = wasm_v32x4_shuffle(vacc1lo, vacc1hi, 0, 2, 4, 6);
v128_t vacc2 = wasm_v32x4_shuffle(vacc2lo, vacc2hi, 0, 2, 4, 6);
v128_t vacc3 = wasm_v32x4_shuffle(vacc3lo, vacc3hi, 0, 2, 4, 6);
vacc0 = wasm_i16x8_narrow_i32x4(vacc0, vacc0);
vacc1 = wasm_i16x8_narrow_i32x4(vacc1, vacc1);
vacc2 = wasm_i16x8_narrow_i32x4(vacc2, vacc2);
vacc3 = wasm_i16x8_narrow_i32x4(vacc3, vacc3);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc0);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc1, vacc1);
const v128_t vy2 = wasm_i8x16_narrow_i16x8(vacc2, vacc2);
const v128_t vy3 = wasm_i8x16_narrow_i16x8(vacc3, vacc3);
wasm_v128_store32_lane(output, vy0, 0); output += 4;
wasm_v128_store32_lane(output, vy1, 0); output += 4;
wasm_v128_store32_lane(output, vy2, 0); output += 4;
wasm_v128_store32_lane(output, vy3, 0); output += 4;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const v128_t vx = wasm_i32x4_load16x4(input); input += 4;
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store32_lane(output, vy, 0); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const v128_t vx = wasm_i32x4_load16x4(input);
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,962
| 39.680328
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t vbias = wasm_v128_load64_splat(¶ms->wasmsimd.bias);
for (; batch >= 32 * sizeof(int16_t); batch -= 32 * sizeof(int16_t)) {
const v128_t vx0 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx1 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx2 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx3 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx4 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx5 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx6 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx7 = wasm_i32x4_load16x4(input); input += 4;
v128_t vacc0lo = wasm_i64x2_extmul_low_i32x4(vx0, vmultiplier);
v128_t vacc0hi = wasm_i64x2_extmul_high_i32x4(vx0, vmultiplier);
v128_t vacc1lo = wasm_i64x2_extmul_low_i32x4(vx1, vmultiplier);
v128_t vacc1hi = wasm_i64x2_extmul_high_i32x4(vx1, vmultiplier);
v128_t vacc2lo = wasm_i64x2_extmul_low_i32x4(vx2, vmultiplier);
v128_t vacc2hi = wasm_i64x2_extmul_high_i32x4(vx2, vmultiplier);
v128_t vacc3lo = wasm_i64x2_extmul_low_i32x4(vx3, vmultiplier);
v128_t vacc3hi = wasm_i64x2_extmul_high_i32x4(vx3, vmultiplier);
v128_t vacc4lo = wasm_i64x2_extmul_low_i32x4(vx4, vmultiplier);
v128_t vacc4hi = wasm_i64x2_extmul_high_i32x4(vx4, vmultiplier);
v128_t vacc5lo = wasm_i64x2_extmul_low_i32x4(vx5, vmultiplier);
v128_t vacc5hi = wasm_i64x2_extmul_high_i32x4(vx5, vmultiplier);
v128_t vacc6lo = wasm_i64x2_extmul_low_i32x4(vx6, vmultiplier);
v128_t vacc6hi = wasm_i64x2_extmul_high_i32x4(vx6, vmultiplier);
v128_t vacc7lo = wasm_i64x2_extmul_low_i32x4(vx7, vmultiplier);
v128_t vacc7hi = wasm_i64x2_extmul_high_i32x4(vx7, vmultiplier);
vacc0lo = wasm_i64x2_add(vacc0lo, vbias);
vacc0hi = wasm_i64x2_add(vacc0hi, vbias);
vacc1lo = wasm_i64x2_add(vacc1lo, vbias);
vacc1hi = wasm_i64x2_add(vacc1hi, vbias);
vacc2lo = wasm_i64x2_add(vacc2lo, vbias);
vacc2hi = wasm_i64x2_add(vacc2hi, vbias);
vacc3lo = wasm_i64x2_add(vacc3lo, vbias);
vacc3hi = wasm_i64x2_add(vacc3hi, vbias);
vacc4lo = wasm_i64x2_add(vacc4lo, vbias);
vacc4hi = wasm_i64x2_add(vacc4hi, vbias);
vacc5lo = wasm_i64x2_add(vacc5lo, vbias);
vacc5hi = wasm_i64x2_add(vacc5hi, vbias);
vacc6lo = wasm_i64x2_add(vacc6lo, vbias);
vacc6hi = wasm_i64x2_add(vacc6hi, vbias);
vacc7lo = wasm_i64x2_add(vacc7lo, vbias);
vacc7hi = wasm_i64x2_add(vacc7hi, vbias);
vacc0lo = wasm_i64x2_shr(vacc0lo, 16);
vacc0hi = wasm_i64x2_shr(vacc0hi, 16);
vacc1lo = wasm_i64x2_shr(vacc1lo, 16);
vacc1hi = wasm_i64x2_shr(vacc1hi, 16);
vacc2lo = wasm_i64x2_shr(vacc2lo, 16);
vacc2hi = wasm_i64x2_shr(vacc2hi, 16);
vacc3lo = wasm_i64x2_shr(vacc3lo, 16);
vacc3hi = wasm_i64x2_shr(vacc3hi, 16);
vacc4lo = wasm_i64x2_shr(vacc4lo, 16);
vacc4hi = wasm_i64x2_shr(vacc4hi, 16);
vacc5lo = wasm_i64x2_shr(vacc5lo, 16);
vacc5hi = wasm_i64x2_shr(vacc5hi, 16);
vacc6lo = wasm_i64x2_shr(vacc6lo, 16);
vacc6hi = wasm_i64x2_shr(vacc6hi, 16);
vacc7lo = wasm_i64x2_shr(vacc7lo, 16);
vacc7hi = wasm_i64x2_shr(vacc7hi, 16);
v128_t vacc0 = wasm_v32x4_shuffle(vacc0lo, vacc0hi, 0, 2, 4, 6);
v128_t vacc1 = wasm_v32x4_shuffle(vacc1lo, vacc1hi, 0, 2, 4, 6);
v128_t vacc2 = wasm_v32x4_shuffle(vacc2lo, vacc2hi, 0, 2, 4, 6);
v128_t vacc3 = wasm_v32x4_shuffle(vacc3lo, vacc3hi, 0, 2, 4, 6);
v128_t vacc4 = wasm_v32x4_shuffle(vacc4lo, vacc4hi, 0, 2, 4, 6);
v128_t vacc5 = wasm_v32x4_shuffle(vacc5lo, vacc5hi, 0, 2, 4, 6);
v128_t vacc6 = wasm_v32x4_shuffle(vacc6lo, vacc6hi, 0, 2, 4, 6);
v128_t vacc7 = wasm_v32x4_shuffle(vacc7lo, vacc7hi, 0, 2, 4, 6);
vacc0 = wasm_i16x8_narrow_i32x4(vacc0, vacc0);
vacc1 = wasm_i16x8_narrow_i32x4(vacc1, vacc1);
vacc2 = wasm_i16x8_narrow_i32x4(vacc2, vacc2);
vacc3 = wasm_i16x8_narrow_i32x4(vacc3, vacc3);
vacc4 = wasm_i16x8_narrow_i32x4(vacc4, vacc4);
vacc5 = wasm_i16x8_narrow_i32x4(vacc5, vacc5);
vacc6 = wasm_i16x8_narrow_i32x4(vacc6, vacc6);
vacc7 = wasm_i16x8_narrow_i32x4(vacc7, vacc7);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc0);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc1, vacc1);
const v128_t vy2 = wasm_i8x16_narrow_i16x8(vacc2, vacc2);
const v128_t vy3 = wasm_i8x16_narrow_i16x8(vacc3, vacc3);
const v128_t vy4 = wasm_i8x16_narrow_i16x8(vacc4, vacc4);
const v128_t vy5 = wasm_i8x16_narrow_i16x8(vacc5, vacc5);
const v128_t vy6 = wasm_i8x16_narrow_i16x8(vacc6, vacc6);
const v128_t vy7 = wasm_i8x16_narrow_i16x8(vacc7, vacc7);
wasm_v128_store32_lane(output, vy0, 0); output += 4;
wasm_v128_store32_lane(output, vy1, 0); output += 4;
wasm_v128_store32_lane(output, vy2, 0); output += 4;
wasm_v128_store32_lane(output, vy3, 0); output += 4;
wasm_v128_store32_lane(output, vy4, 0); output += 4;
wasm_v128_store32_lane(output, vy5, 0); output += 4;
wasm_v128_store32_lane(output, vy6, 0); output += 4;
wasm_v128_store32_lane(output, vy7, 0); output += 4;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const v128_t vx = wasm_i32x4_load16x4(input); input += 4;
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store32_lane(output, vy, 0); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const v128_t vx = wasm_i32x4_load16x4(input);
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 7,434
| 43.789157
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs16-qs8-vcvt/gen/qs16-qs8-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs16-qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs16_qs8_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t vbias = wasm_v128_load64_splat(¶ms->wasmsimd.bias);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const v128_t vx0 = wasm_i32x4_load16x4(input); input += 4;
const v128_t vx1 = wasm_i32x4_load16x4(input); input += 4;
v128_t vacc0lo = wasm_i64x2_extmul_low_i32x4(vx0, vmultiplier);
v128_t vacc0hi = wasm_i64x2_extmul_high_i32x4(vx0, vmultiplier);
v128_t vacc1lo = wasm_i64x2_extmul_low_i32x4(vx1, vmultiplier);
v128_t vacc1hi = wasm_i64x2_extmul_high_i32x4(vx1, vmultiplier);
vacc0lo = wasm_i64x2_add(vacc0lo, vbias);
vacc0hi = wasm_i64x2_add(vacc0hi, vbias);
vacc1lo = wasm_i64x2_add(vacc1lo, vbias);
vacc1hi = wasm_i64x2_add(vacc1hi, vbias);
vacc0lo = wasm_i64x2_shr(vacc0lo, 16);
vacc0hi = wasm_i64x2_shr(vacc0hi, 16);
vacc1lo = wasm_i64x2_shr(vacc1lo, 16);
vacc1hi = wasm_i64x2_shr(vacc1hi, 16);
v128_t vacc0 = wasm_v32x4_shuffle(vacc0lo, vacc0hi, 0, 2, 4, 6);
v128_t vacc1 = wasm_v32x4_shuffle(vacc1lo, vacc1hi, 0, 2, 4, 6);
vacc0 = wasm_i16x8_narrow_i32x4(vacc0, vacc0);
vacc1 = wasm_i16x8_narrow_i32x4(vacc1, vacc1);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc0);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc1, vacc1);
wasm_v128_store32_lane(output, vy0, 0); output += 4;
wasm_v128_store32_lane(output, vy1, 0); output += 4;
}
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const v128_t vx = wasm_i32x4_load16x4(input); input += 4;
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store32_lane(output, vy, 0); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int16_t));
assert(batch <= 3 * sizeof(int16_t));
const v128_t vx = wasm_i32x4_load16x4(input);
v128_t vacclo = wasm_i64x2_extmul_low_i32x4(vx, vmultiplier);
v128_t vacchi = wasm_i64x2_extmul_high_i32x4(vx, vmultiplier);
vacclo = wasm_i64x2_add(vacclo, vbias);
vacchi = wasm_i64x2_add(vacchi, vbias);
vacclo = wasm_i64x2_shr(vacclo, 16);
vacchi = wasm_i64x2_shr(vacchi, 16);
v128_t vacc = wasm_v32x4_shuffle(vacclo, vacchi, 0, 2, 4, 6);
vacc = wasm_i16x8_narrow_i32x4(vacc, vacc);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (2 * sizeof(int16_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int16_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,723
| 36.24
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p1c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9++;
const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10++;
const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11++;
const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12++;
const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13++;
const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14++;
const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15++;
const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16++;
const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17++;
const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18++;
const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19++;
const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20++;
const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21++;
const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22++;
const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23++;
const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24++;
const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24];
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,951
| 37.130268
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p1c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9++;
const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10++;
const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11++;
const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12++;
const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13++;
const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14++;
const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15++;
const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16++;
const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17++;
const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18++;
const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19++;
const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20++;
const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21++;
const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22++;
const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23++;
const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24++;
const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24];
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,837
| 36.549618
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p1c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9++;
const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10++;
const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11++;
const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12++;
const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13++;
const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14++;
const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15++;
const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16++;
const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17++;
const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18++;
const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19++;
const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20++;
const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21++;
const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22++;
const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23++;
const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24++;
const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24];
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,855
| 36.762452
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p1c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9++;
const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10++;
const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11++;
const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12++;
const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13++;
const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14++;
const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15++;
const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16++;
const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17++;
const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18++;
const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19++;
const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20++;
const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21++;
const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22++;
const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23++;
const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24++;
const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24];
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,969
| 37.199234
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p2c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p2c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) i9[0];
const int32_t vi9x1 = (int32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19];
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) i10[0];
const int32_t vi10x1 = (int32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21];
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) i11[0];
const int32_t vi11x1 = (int32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23];
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) i12[0];
const int32_t vi12x1 = (int32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25];
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) i13[0];
const int32_t vi13x1 = (int32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27];
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) i14[0];
const int32_t vi14x1 = (int32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29];
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) i15[0];
const int32_t vi15x1 = (int32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31];
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) i16[0];
const int32_t vi16x1 = (int32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33];
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) i17[0];
const int32_t vi17x1 = (int32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35];
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) i18[0];
const int32_t vi18x1 = (int32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37];
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) i19[0];
const int32_t vi19x1 = (int32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39];
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) i20[0];
const int32_t vi20x1 = (int32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41];
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) i21[0];
const int32_t vi21x1 = (int32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43];
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) i22[0];
const int32_t vi22x1 = (int32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45];
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) i23[0];
const int32_t vi23x1 = (int32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47];
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) i24[0];
const int32_t vi24x1 = (int32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49];
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9;
const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10;
const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11;
const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12;
const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13;
const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14;
const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15;
const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16;
const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17;
const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18;
const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19;
const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20;
const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21;
const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22;
const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23;
const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24;
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,456
| 37.024164
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) i9[0];
const int32_t vi9x1 = (int32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19];
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) i10[0];
const int32_t vi10x1 = (int32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21];
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) i11[0];
const int32_t vi11x1 = (int32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23];
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) i12[0];
const int32_t vi12x1 = (int32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25];
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) i13[0];
const int32_t vi13x1 = (int32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27];
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) i14[0];
const int32_t vi14x1 = (int32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29];
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) i15[0];
const int32_t vi15x1 = (int32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31];
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) i16[0];
const int32_t vi16x1 = (int32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33];
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) i17[0];
const int32_t vi17x1 = (int32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35];
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) i18[0];
const int32_t vi18x1 = (int32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37];
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) i19[0];
const int32_t vi19x1 = (int32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39];
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) i20[0];
const int32_t vi20x1 = (int32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41];
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) i21[0];
const int32_t vi21x1 = (int32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43];
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) i22[0];
const int32_t vi22x1 = (int32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45];
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) i23[0];
const int32_t vi23x1 = (int32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47];
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) i24[0];
const int32_t vi24x1 = (int32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49];
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9;
const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10;
const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11;
const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12;
const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13;
const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14;
const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15;
const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16;
const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17;
const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18;
const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19;
const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20;
const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21;
const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22;
const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23;
const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24;
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,273
| 36.405904
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p2c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) i9[0];
const int32_t vi9x1 = (int32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19];
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) i10[0];
const int32_t vi10x1 = (int32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21];
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) i11[0];
const int32_t vi11x1 = (int32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23];
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) i12[0];
const int32_t vi12x1 = (int32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25];
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) i13[0];
const int32_t vi13x1 = (int32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27];
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) i14[0];
const int32_t vi14x1 = (int32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29];
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) i15[0];
const int32_t vi15x1 = (int32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31];
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) i16[0];
const int32_t vi16x1 = (int32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33];
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) i17[0];
const int32_t vi17x1 = (int32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35];
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) i18[0];
const int32_t vi18x1 = (int32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37];
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) i19[0];
const int32_t vi19x1 = (int32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39];
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) i20[0];
const int32_t vi20x1 = (int32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41];
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) i21[0];
const int32_t vi21x1 = (int32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43];
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) i22[0];
const int32_t vi22x1 = (int32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45];
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) i23[0];
const int32_t vi23x1 = (int32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47];
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) i24[0];
const int32_t vi24x1 = (int32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49];
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9;
const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10;
const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11;
const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12;
const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13;
const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14;
const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15;
const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16;
const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17;
const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18;
const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19;
const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20;
const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21;
const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22;
const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23;
const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24;
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,352
| 36.830855
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) i9[0];
const int32_t vi9x1 = (int32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19];
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) i10[0];
const int32_t vi10x1 = (int32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21];
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) i11[0];
const int32_t vi11x1 = (int32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23];
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) i12[0];
const int32_t vi12x1 = (int32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25];
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) i13[0];
const int32_t vi13x1 = (int32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27];
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) i14[0];
const int32_t vi14x1 = (int32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29];
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) i15[0];
const int32_t vi15x1 = (int32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31];
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) i16[0];
const int32_t vi16x1 = (int32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33];
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) i17[0];
const int32_t vi17x1 = (int32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35];
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) i18[0];
const int32_t vi18x1 = (int32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37];
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) i19[0];
const int32_t vi19x1 = (int32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39];
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) i20[0];
const int32_t vi20x1 = (int32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41];
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) i21[0];
const int32_t vi21x1 = (int32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43];
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) i22[0];
const int32_t vi22x1 = (int32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45];
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) i23[0];
const int32_t vi23x1 = (int32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47];
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) i24[0];
const int32_t vi24x1 = (int32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49];
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) *i9;
const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18];
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) *i10;
const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20];
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) *i11;
const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22];
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) *i12;
const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24];
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) *i13;
const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26];
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) *i14;
const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28];
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) *i15;
const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30];
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) *i16;
const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32];
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) *i17;
const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34];
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) *i18;
const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36];
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) *i19;
const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38];
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) *i20;
const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40];
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) *i21;
const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42];
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) *i22;
const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44];
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) *i23;
const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46];
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) *i24;
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,514
| 37.13197
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-25p8c-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_25p8c__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
const int8x8_t vi9x01234567 = vld1_s8(i9); i9 += 8;
const int8x8_t vk9x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi9x01234567, vk9x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi10x01234567 = vld1_s8(i10); i10 += 8;
const int8x8_t vk10x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi10x01234567, vk10x01234567);
const int8x8_t vi11x01234567 = vld1_s8(i11); i11 += 8;
const int8x8_t vk11x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi11x01234567, vk11x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi12x01234567 = vld1_s8(i12); i12 += 8;
const int8x8_t vk12x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi12x01234567, vk12x01234567);
const int8x8_t vi13x01234567 = vld1_s8(i13); i13 += 8;
const int8x8_t vk13x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi13x01234567, vk13x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi14x01234567 = vld1_s8(i14); i14 += 8;
const int8x8_t vk14x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi14x01234567, vk14x01234567);
const int8x8_t vi15x01234567 = vld1_s8(i15); i15 += 8;
const int8x8_t vk15x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi15x01234567, vk15x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi16x01234567 = vld1_s8(i16); i16 += 8;
const int8x8_t vk16x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi16x01234567, vk16x01234567);
const int8x8_t vi17x01234567 = vld1_s8(i17); i17 += 8;
const int8x8_t vk17x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi17x01234567, vk17x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi18x01234567 = vld1_s8(i18); i18 += 8;
const int8x8_t vk18x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi18x01234567, vk18x01234567);
const int8x8_t vi19x01234567 = vld1_s8(i19); i19 += 8;
const int8x8_t vk19x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi19x01234567, vk19x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi20x01234567 = vld1_s8(i20); i20 += 8;
const int8x8_t vk20x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi20x01234567, vk20x01234567);
const int8x8_t vi21x01234567 = vld1_s8(i21); i21 += 8;
const int8x8_t vk21x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi21x01234567, vk21x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi22x01234567 = vld1_s8(i22); i22 += 8;
const int8x8_t vk22x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi22x01234567, vk22x01234567);
const int8x8_t vi23x01234567 = vld1_s8(i23); i23 += 8;
const int8x8_t vk23x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi23x01234567, vk23x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi24x01234567 = vld1_s8(i24); i24 += 8;
const int8x8_t vk24x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi24x01234567, vk24x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w);
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8((const void*) ((const int8_t*) w + 8));
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8((const void*) ((const int8_t*) w + 16));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8((const void*) ((const int8_t*) w + 24));
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8((const void*) ((const int8_t*) w + 32));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8((const void*) ((const int8_t*) w + 40));
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8((const void*) ((const int8_t*) w + 48));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8((const void*) ((const int8_t*) w + 56));
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8((const void*) ((const int8_t*) w + 64));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
const int8x8_t vi9x01234567 = vld1_s8(i9);
const int8x8_t vk9x01234567 = vld1_s8((const void*) ((const int8_t*) w + 72));
vprod01234567 = vmlal_s8(vprod01234567, vi9x01234567, vk9x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi10x01234567 = vld1_s8(i10);
const int8x8_t vk10x01234567 = vld1_s8((const void*) ((const int8_t*) w + 80));
vprod01234567 = vmull_s8(vi10x01234567, vk10x01234567);
const int8x8_t vi11x01234567 = vld1_s8(i11);
const int8x8_t vk11x01234567 = vld1_s8((const void*) ((const int8_t*) w + 88));
vprod01234567 = vmlal_s8(vprod01234567, vi11x01234567, vk11x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi12x01234567 = vld1_s8(i12);
const int8x8_t vk12x01234567 = vld1_s8((const void*) ((const int8_t*) w + 96));
vprod01234567 = vmull_s8(vi12x01234567, vk12x01234567);
const int8x8_t vi13x01234567 = vld1_s8(i13);
const int8x8_t vk13x01234567 = vld1_s8((const void*) ((const int8_t*) w + 104));
vprod01234567 = vmlal_s8(vprod01234567, vi13x01234567, vk13x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi14x01234567 = vld1_s8(i14);
const int8x8_t vk14x01234567 = vld1_s8((const void*) ((const int8_t*) w + 112));
vprod01234567 = vmull_s8(vi14x01234567, vk14x01234567);
const int8x8_t vi15x01234567 = vld1_s8(i15);
const int8x8_t vk15x01234567 = vld1_s8((const void*) ((const int8_t*) w + 120));
vprod01234567 = vmlal_s8(vprod01234567, vi15x01234567, vk15x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi16x01234567 = vld1_s8(i16);
const int8x8_t vk16x01234567 = vld1_s8((const void*) ((const int8_t*) w + 128));
vprod01234567 = vmull_s8(vi16x01234567, vk16x01234567);
const int8x8_t vi17x01234567 = vld1_s8(i17);
const int8x8_t vk17x01234567 = vld1_s8((const void*) ((const int8_t*) w + 136));
vprod01234567 = vmlal_s8(vprod01234567, vi17x01234567, vk17x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi18x01234567 = vld1_s8(i18);
const int8x8_t vk18x01234567 = vld1_s8((const void*) ((const int8_t*) w + 144));
vprod01234567 = vmull_s8(vi18x01234567, vk18x01234567);
const int8x8_t vi19x01234567 = vld1_s8(i19);
const int8x8_t vk19x01234567 = vld1_s8((const void*) ((const int8_t*) w + 152));
vprod01234567 = vmlal_s8(vprod01234567, vi19x01234567, vk19x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi20x01234567 = vld1_s8(i20);
const int8x8_t vk20x01234567 = vld1_s8((const void*) ((const int8_t*) w + 160));
vprod01234567 = vmull_s8(vi20x01234567, vk20x01234567);
const int8x8_t vi21x01234567 = vld1_s8(i21);
const int8x8_t vk21x01234567 = vld1_s8((const void*) ((const int8_t*) w + 168));
vprod01234567 = vmlal_s8(vprod01234567, vi21x01234567, vk21x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi22x01234567 = vld1_s8(i22);
const int8x8_t vk22x01234567 = vld1_s8((const void*) ((const int8_t*) w + 176));
vprod01234567 = vmull_s8(vi22x01234567, vk22x01234567);
const int8x8_t vi23x01234567 = vld1_s8(i23);
const int8x8_t vk23x01234567 = vld1_s8((const void*) ((const int8_t*) w + 184));
vprod01234567 = vmlal_s8(vprod01234567, vi23x01234567, vk23x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi24x01234567 = vld1_s8(i24);
const int8x8_t vk24x01234567 = vld1_s8((const void*) ((const int8_t*) w + 192));
vprod01234567 = vmull_s8(vi24x01234567, vk24x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,319
| 39.804388
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l16c8s8r-minmax-rndnu-neon-mla8-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_5f5m5l16c8s8r__neon_mla8_ld128(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,781
| 41.823308
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l16c8s8r-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_5f5m5l16c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,215
| 40.309609
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,403
| 33.277778
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,291
| 32.603687
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,307
| 32.833333
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,421
| 33.361111
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,905
| 32.670702
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,728
| 31.923261
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,801
| 32.418886
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,963
| 32.811138
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,947
| 36.425891
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,706
| 35.562152
| 100
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.