repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/aten/src/ATen/native/cpu/SoftmaxKernel.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <cstdint>
namespace at {
class Tensor;
namespace native {
using forward_fn = void (*)(const Tensor&, const Tensor&);
using backward_fn = void(*)(const Tensor &, const Tensor &, const Tensor&);
DECLARE_DISPATCH(forward_fn, softmax_lastdim_kernel);
DECLARE_DISPATCH(forward_fn, log_softmax_lastdim_kernel);
DECLARE_DISPATCH(backward_fn, softmax_backward_lastdim_kernel);
DECLARE_DISPATCH(backward_fn, log_softmax_backward_lastdim_kernel);
using forward_fn_with_dim = void(*)(const Tensor &, const Tensor &, const int64_t);
using backward_fn_with_dim =
void (*)(const Tensor&, const Tensor&, const Tensor&, const int64_t);
DECLARE_DISPATCH(forward_fn_with_dim, softmax_kernel);
DECLARE_DISPATCH(forward_fn_with_dim, log_softmax_kernel);
DECLARE_DISPATCH(backward_fn_with_dim, softmax_backward_kernel);
DECLARE_DISPATCH(backward_fn_with_dim, log_softmax_backward_kernel);
}
}
| 943
| 31.551724
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/WeightNormKernel.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <cstdint>
namespace at {
class TensorBase;
}
namespace at { namespace native {
using weight_norm_fn = void(*)(
TensorBase&, TensorBase&, const TensorBase&, const TensorBase&, int64_t);
using weight_norm_backward_fn = void(*)(
TensorBase&, TensorBase&, const TensorBase&, const TensorBase&,
const TensorBase&, const TensorBase&, int64_t);
DECLARE_DISPATCH(weight_norm_fn, weight_norm_stub);
DECLARE_DISPATCH(weight_norm_backward_fn, weight_norm_backward_stub);
}} // namespace at::native
| 565
| 25.952381
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/avx_mathfun.h
|
#pragma once
/*
AVX implementation of sin, cos, sincos, exp and log
Based on "sse_mathfun.h", by Julien Pommier
http://gruntthepeon.free.fr/ssemath/
Copyright (C) 2012 Giovanni Garberoglio
Interdisciplinary Laboratory for Computational Science (LISC)
Fondazione Bruno Kessler and University of Trento
via Sommarive, 18
I-38123 Trento (Italy)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
(this is the zlib license)
*/
#include <ATen/native/cpu/Intrinsics.h>
/* The original source of this file has been modified. */
#if defined(CPU_CAPABILITY_AVX2)
#if defined(__GNUC__)
# define ALIGN32_BEG __attribute__((aligned(32)))
#elif defined(_WIN32)
# define ALIGN32_BEG __declspec(align(32))
#endif
typedef __m256 v8sf; // vector of 8 float (avx2)
typedef __m256i v8si; // vector of 8 int (avx2)
/* declare some AVX constants -- why can't I figure a better way to do that? */
#define _PS256_CONST(Name, Val) \
static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
#define _PI32_CONST256(Name, Val) \
static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
#define _PS256_CONST_TYPE(Name, Type, Val) \
static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
_PS256_CONST(1 , 1.0f);
_PS256_CONST(0p5, 0.5f);
/* the smallest non denormalized float number */
_PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
_PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
_PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
_PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
_PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
_PI32_CONST256(0, 0);
_PI32_CONST256(1, 1);
_PI32_CONST256(inv1, ~1);
_PI32_CONST256(2, 2);
_PI32_CONST256(4, 4);
_PI32_CONST256(0x7f, 0x7f);
_PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
_PS256_CONST(cephes_log_p0, 7.0376836292E-2);
_PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
_PS256_CONST(cephes_log_p2, 1.1676998740E-1);
_PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
_PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
_PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
_PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
_PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
_PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
_PS256_CONST(cephes_log_q1, -2.12194440e-4);
_PS256_CONST(cephes_log_q2, 0.693359375);
/* natural logarithm computed for 8 simultaneous float
return NaN for x <= 0
*/
inline v8sf log256_ps(v8sf x) {
v8si imm0;
v8sf one = *(v8sf*)_ps256_1;
//v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
// can be done with AVX2
imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
/* keep only the fractional part */
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
// this is again another AVX2 instruction
imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
v8sf e = _mm256_cvtepi32_ps(imm0);
e = _mm256_add_ps(e, one);
/* part2:
if( x < SQRTHF ) {
e -= 1;
x = x + x - 1.0;
} else { x = x - 1.0; }
*/
//v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
v8sf tmp = _mm256_and_ps(x, mask);
x = _mm256_sub_ps(x, one);
e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
x = _mm256_add_ps(x, tmp);
v8sf z = _mm256_mul_ps(x,x);
v8sf y = *(v8sf*)_ps256_cephes_log_p0;
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
y = _mm256_mul_ps(y, x);
y = _mm256_mul_ps(y, z);
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
y = _mm256_add_ps(y, tmp);
tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
y = _mm256_sub_ps(y, tmp);
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
x = _mm256_add_ps(x, y);
x = _mm256_add_ps(x, tmp);
x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
return x;
}
_PS256_CONST(exp_hi, 88.3762626647949f);
_PS256_CONST(exp_lo, -88.3762626647949f);
_PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
_PS256_CONST(cephes_exp_C1, 0.693359375);
_PS256_CONST(cephes_exp_C2, -2.12194440e-4);
_PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
_PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
_PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
_PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
_PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
_PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
inline v8sf exp256_ps(v8sf x) {
v8sf tmp = _mm256_setzero_ps(), fx;
v8si imm0;
v8sf one = *(v8sf*)_ps256_1;
x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
/* express exp(x) as exp(g + n*log(2)) */
fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
/* how to perform a floorf with SSE: just below */
//imm0 = _mm256_cvttps_epi32(fx);
//tmp = _mm256_cvtepi32_ps(imm0);
tmp = _mm256_floor_ps(fx);
/* if greater, subtract 1 */
//v8sf mask = _mm256_cmpgt_ps(tmp, fx);
v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
mask = _mm256_and_ps(mask, one);
fx = _mm256_sub_ps(tmp, mask);
tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
x = _mm256_sub_ps(x, tmp);
x = _mm256_sub_ps(x, z);
z = _mm256_mul_ps(x,x);
v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
y = _mm256_mul_ps(y, x);
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, x);
y = _mm256_add_ps(y, one);
/* build 2^n */
imm0 = _mm256_cvttps_epi32(fx);
// another two AVX2 instructions
imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
imm0 = _mm256_slli_epi32(imm0, 23);
v8sf pow2n = _mm256_castsi256_ps(imm0);
y = _mm256_mul_ps(y, pow2n);
return y;
}
_PS256_CONST(minus_cephes_DP1, -0.78515625);
_PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
_PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
_PS256_CONST(sincof_p0, -1.9515295891E-4);
_PS256_CONST(sincof_p1, 8.3321608736E-3);
_PS256_CONST(sincof_p2, -1.6666654611E-1);
_PS256_CONST(coscof_p0, 2.443315711809948E-005);
_PS256_CONST(coscof_p1, -1.388731625493765E-003);
_PS256_CONST(coscof_p2, 4.166664568298827E-002);
_PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
/* evaluation of 8 sines at onces using AVX intrisics
The code is the exact rewriting of the cephes sinf function.
Precision is excellent as long as x < 8192 (I did not bother to
take into account the special handling they have for greater values
-- it does not return garbage for arguments over 8192, though, but
the extra precision is missing).
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
surprising but correct result.
*/
inline v8sf sin256_ps(v8sf x) { // any x
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
v8si imm0, imm2;
sign_bit = x;
/* take the absolute value */
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
/* extract the sign bit (upper one) */
sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
/* scale by 4/Pi */
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
/*
Here we start a series of integer operations, which are in the
realm of AVX2.
If we don't have AVX, let's perform them using SSE2 directives
*/
/* store the integer part of y in mm0 */
imm2 = _mm256_cvttps_epi32(y);
/* j=(j+1) & (~1) (see the cephes sources) */
// another two AVX2 instruction
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
y = _mm256_cvtepi32_ps(imm2);
/* get the swap sign flag */
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
imm0 = _mm256_slli_epi32(imm0, 29);
/* get the polynom selection mask
there is one polynom for 0 <= x <= Pi/4
and another one for Pi/4<x<=Pi/2
Both branches will be computed.
*/
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
v8sf poly_mask = _mm256_castsi256_ps(imm2);
sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
/* The magic pass: "Extended precision modular arithmetic"
x = ((x - y * DP1) - y * DP2) - y * DP3; */
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
xmm1 = _mm256_mul_ps(y, xmm1);
xmm2 = _mm256_mul_ps(y, xmm2);
xmm3 = _mm256_mul_ps(y, xmm3);
x = _mm256_add_ps(x, xmm1);
x = _mm256_add_ps(x, xmm2);
x = _mm256_add_ps(x, xmm3);
/* Evaluate the first polynom (0 <= x <= Pi/4) */
y = *(v8sf*)_ps256_coscof_p0;
v8sf z = _mm256_mul_ps(x,x);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
y = _mm256_mul_ps(y, z);
y = _mm256_mul_ps(y, z);
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
y = _mm256_sub_ps(y, tmp);
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_mul_ps(y2, x);
y2 = _mm256_add_ps(y2, x);
/* select the correct result from the two polynoms */
xmm3 = poly_mask;
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
y = _mm256_andnot_ps(xmm3, y);
y = _mm256_add_ps(y,y2);
/* update the sign */
y = _mm256_xor_ps(y, sign_bit);
return y;
}
/* almost the same as sin_ps */
inline v8sf cos256_ps(v8sf x) { // any x
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
v8si imm0, imm2;
/* take the absolute value */
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
/* scale by 4/Pi */
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
/* store the integer part of y in mm0 */
imm2 = _mm256_cvttps_epi32(y);
/* j=(j+1) & (~1) (see the cephes sources) */
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
y = _mm256_cvtepi32_ps(imm2);
imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
/* get the swap sign flag */
imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
imm0 = _mm256_slli_epi32(imm0, 29);
/* get the polynom selection mask */
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
v8sf sign_bit = _mm256_castsi256_ps(imm0);
v8sf poly_mask = _mm256_castsi256_ps(imm2);
/* The magic pass: "Extended precision modular arithmetic"
x = ((x - y * DP1) - y * DP2) - y * DP3; */
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
xmm1 = _mm256_mul_ps(y, xmm1);
xmm2 = _mm256_mul_ps(y, xmm2);
xmm3 = _mm256_mul_ps(y, xmm3);
x = _mm256_add_ps(x, xmm1);
x = _mm256_add_ps(x, xmm2);
x = _mm256_add_ps(x, xmm3);
/* Evaluate the first polynom (0 <= x <= Pi/4) */
y = *(v8sf*)_ps256_coscof_p0;
v8sf z = _mm256_mul_ps(x,x);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
y = _mm256_mul_ps(y, z);
y = _mm256_mul_ps(y, z);
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
y = _mm256_sub_ps(y, tmp);
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_mul_ps(y2, x);
y2 = _mm256_add_ps(y2, x);
/* select the correct result from the two polynoms */
xmm3 = poly_mask;
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
y = _mm256_andnot_ps(xmm3, y);
y = _mm256_add_ps(y,y2);
/* update the sign */
y = _mm256_xor_ps(y, sign_bit);
return y;
}
/* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
it is almost as fast, and gives you a free cosine with your sine */
inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
v8si imm0, imm2, imm4;
sign_bit_sin = x;
/* take the absolute value */
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
/* extract the sign bit (upper one) */
sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
/* scale by 4/Pi */
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
/* store the integer part of y in imm2 */
imm2 = _mm256_cvttps_epi32(y);
/* j=(j+1) & (~1) (see the cephes sources) */
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
y = _mm256_cvtepi32_ps(imm2);
imm4 = imm2;
/* get the swap sign flag for the sine */
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
imm0 = _mm256_slli_epi32(imm0, 29);
//v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
/* get the polynom selection mask for the sine*/
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
//v8sf poly_mask = _mm256_castsi256_ps(imm2);
v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
v8sf poly_mask = _mm256_castsi256_ps(imm2);
/* The magic pass: "Extended precision modular arithmetic"
x = ((x - y * DP1) - y * DP2) - y * DP3; */
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
xmm1 = _mm256_mul_ps(y, xmm1);
xmm2 = _mm256_mul_ps(y, xmm2);
xmm3 = _mm256_mul_ps(y, xmm3);
x = _mm256_add_ps(x, xmm1);
x = _mm256_add_ps(x, xmm2);
x = _mm256_add_ps(x, xmm3);
imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
imm4 = _mm256_slli_epi32(imm4, 29);
v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
/* Evaluate the first polynom (0 <= x <= Pi/4) */
v8sf z = _mm256_mul_ps(x,x);
y = *(v8sf*)_ps256_coscof_p0;
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
y = _mm256_mul_ps(y, z);
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
y = _mm256_mul_ps(y, z);
y = _mm256_mul_ps(y, z);
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
y = _mm256_sub_ps(y, tmp);
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_mul_ps(y2, x);
y2 = _mm256_add_ps(y2, x);
/* select the correct result from the two polynoms */
xmm3 = poly_mask;
v8sf ysin2 = _mm256_and_ps(xmm3, y2);
v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
y2 = _mm256_sub_ps(y2,ysin2);
y = _mm256_sub_ps(y, ysin1);
xmm1 = _mm256_add_ps(ysin1,ysin2);
xmm2 = _mm256_add_ps(y,y2);
/* update the sign */
*s = _mm256_xor_ps(xmm1, sign_bit_sin);
*c = _mm256_xor_ps(xmm2, sign_bit_cos);
}
#endif // CPU_CAPABILITY_AVX2
| 17,448
| 32.363289
| 96
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/mixed_data_type.h
|
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
inline ScalarType first_type() {
return ScalarType::Undefined;
}
template <typename... Args>
inline ScalarType first_type(const Tensor& arg, const Args&... parameters) {
return arg.defined() ? arg.scalar_type() : first_type(parameters...);
}
template <typename... Args>
inline bool is_mixed_type(const Tensor& input, const Args&... parameters) {
const auto parameter_type = first_type(parameters...);
return ((parameter_type != ScalarType::Undefined) &&
(parameter_type != input.scalar_type()));
}
// currently on CPU, mixed data type is only supported
// when input is 'BFloat16' and parameters are 'Float'
inline void check_mixed_data_type(const Tensor& input) {
TORCH_CHECK(input.scalar_type() == ScalarType::BFloat16,
"mixed dtype (CPU): all inputs must share same datatype.");
}
template <typename... Args>
inline void check_mixed_data_type(const Tensor& input, const Tensor& parameter, const Args&... parameters) {
TORCH_CHECK(!parameter.defined() || parameter.scalar_type() == ScalarType::Float,
"mixed dtype (CPU): expect parameter to have scalar type of Float");
check_mixed_data_type(input, parameters...);
}
inline ScalarType param_scalar_type(const Tensor& t, bool is_mixed_type) {
return is_mixed_type ? ScalarType::Float : t.scalar_type();
}
}} // namespace at::native
| 1,408
| 32.547619
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/moments_utils.h
|
#pragma once
#include <array>
#include <cstring>
#include <numeric>
#include <utility>
#include <vector>
#include <ATen/Parallel.h>
#include <ATen/OpMathType.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/native/cpu/utils.h>
#include <c10/util/SmallVector.h>
#include <c10/util/irange.h>
namespace at {
namespace native {
inline namespace CPU_CAPABILITY {
template<typename T> using acc_t = at::opmath_type<T>;
constexpr int64_t kChunkSize = 16;
template <typename T>
void AddMoments(
int64_t m0_add,
const T& m1_add,
const T& m2_add,
int64_t& m0,
T& m1,
T& m2) {
const int64_t n = m0 + m0_add;
const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
const T delta = m1_add - m1;
m1 += c * delta;
m2 += m2_add + delta * delta * c * static_cast<T>(m0);
m0 = n;
}
template <typename T>
C10_ALWAYS_INLINE void AddMomentsVec(
int64_t m0_add,
const vec::Vectorized<T>& m1_add,
const vec::Vectorized<T>& m2_add,
int64_t& m0,
vec::Vectorized<T>& m1,
vec::Vectorized<T>& m2) {
using Vec = vec::Vectorized<T>;
const int64_t n = m0 + m0_add;
const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
const Vec c_vec(c);
const Vec delta = m1_add - m1;
m1 += c_vec * delta;
m2 += m2_add + delta * delta * c_vec * Vec(static_cast<T>(m0));
m0 = n;
}
template <typename T>
inline void UpdateMomentsVec(
int64_t m0,
const T* X_ptr,
const std::array<vec::Vectorized<acc_t<T>>, kChunkSize>& c_vecs,
int64_t& m0_stk0,
vec::Vectorized<acc_t<T>>& m1_stk0,
vec::Vectorized<acc_t<T>>& m2_stk0) {
using Vec = vec::Vectorized<acc_t<T>>;
Vec m1_vec(0);
Vec m2_vec(0);
for (const auto j : c10::irange(m0)) {
const Vec x_vec = Vec::loadu(X_ptr + j * Vec::size());
const Vec delta_vec = x_vec - m1_vec;
m1_vec += delta_vec * c_vecs[j];
m2_vec += delta_vec * (x_vec - m1_vec);
}
AddMomentsVec(m0, m1_vec, m2_vec, m0_stk0, m1_stk0, m2_stk0);
}
// each bfloat16 vector will be converted to two float vectors,
// and accumulated successively on m1_stk0/m2_stk0.
template <>
inline void UpdateMomentsVec<BFloat16>(
int64_t m0,
const BFloat16* X_ptr,
const std::array<vec::Vectorized<float>, kChunkSize>& c_vecs,
int64_t& m0_stk0,
vec::Vectorized<float>& m1_stk0,
vec::Vectorized<float>& m2_stk0) {
using bVec = vec::Vectorized<BFloat16>;
using fVec = vec::Vectorized<float>;
fVec m1_fvec0(0), m1_fvec1(0);
fVec m2_fvec0(0), m2_fvec1(0);
for (const auto j : c10::irange(m0)) {
const bVec x_bvec = bVec::loadu(X_ptr + j * bVec::size());
fVec x_fvec0, x_fvec1;
std::tie(x_fvec0, x_fvec1) = convert_bfloat16_float(x_bvec);
const fVec delta_fvec0 = x_fvec0 - m1_fvec0;
const fVec delta_fvec1 = x_fvec1 - m1_fvec1;
m1_fvec0 += delta_fvec0 * c_vecs[j];
m1_fvec1 += delta_fvec1 * c_vecs[j];
m2_fvec0 += delta_fvec0 * (x_fvec0 - m1_fvec0);
m2_fvec1 += delta_fvec1 * (x_fvec1 - m1_fvec1);
}
AddMomentsVec(m0, m1_fvec0, m2_fvec0, m0_stk0, m1_stk0, m2_stk0);
AddMomentsVec(m0, m1_fvec1, m2_fvec1, m0_stk0, m1_stk0, m2_stk0);
}
// Compute rowwise moments by Welford algorithm and cascade sum to improve
// numerical stability.
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
// https://en.wikipedia.org/wiki/Pairwise_summation
template <typename T, int64_t kMaxDepth>
std::pair<acc_t<T>, acc_t<T>> RowwiseMomentsImpl(const T* X, int64_t N, int64_t ddof = 0) {
using T_ACC = acc_t<T>;
constexpr int64_t kVecSize = vec::Vectorized<T>::size();
constexpr int64_t kAccVecSize = vec::Vectorized<T_ACC>::size();
const int64_t n = N / kVecSize;
const int64_t m = divup(n, kChunkSize);
const int64_t depth = utils::CeilLog2(m);
using Vec = vec::Vectorized<T_ACC>;
const Vec kZeroVec(T_ACC(0));
c10::SmallVector<int64_t, kMaxDepth> m0_stk(depth, 0);
c10::SmallVector<Vec, kMaxDepth> m1_stk(depth, kZeroVec);
c10::SmallVector<Vec, kMaxDepth> m2_stk(depth, kZeroVec);
for (const auto i : c10::irange(m)) {
const T* X_ptr = X + i * kChunkSize * kVecSize;
const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize);
static std::array<Vec, kChunkSize> c_vecs = ([]() {
std::array<Vec, kChunkSize> result;
for (const auto i : c10::irange(kChunkSize)) {
result[i] = Vec(T_ACC(1) / static_cast<T_ACC>(i + 1));
}
return result;
})();
UpdateMomentsVec(m0, X_ptr, c_vecs, m0_stk[0], m1_stk[0], m2_stk[0]);
int64_t mask = i + 1;
for (int64_t j = 1; j < depth && (mask & 1) == 0; ++j) {
AddMomentsVec(
m0_stk[j - 1],
m1_stk[j - 1],
m2_stk[j - 1],
m0_stk[j],
m1_stk[j],
m2_stk[j]);
m0_stk[j - 1] = 0;
m1_stk[j - 1] = kZeroVec;
m2_stk[j - 1] = kZeroVec;
mask >>= 1;
}
}
for (const auto i : c10::irange(1, depth)) {
AddMomentsVec(
m0_stk[i], m1_stk[i], m2_stk[i], m0_stk[0], m1_stk[0], m2_stk[0]);
}
std::array<T_ACC, kAccVecSize> m1_arr{};
std::array<T_ACC, kAccVecSize> m2_arr{};
m1_stk[0].store(m1_arr.data());
m2_stk[0].store(m2_arr.data());
int64_t m0 = 0;
T_ACC m1 = 0;
T_ACC m2 = 0;
for (int64_t i = n * kVecSize; i < N; ++i) {
T_ACC x = static_cast<T_ACC>(X[i]);
const T_ACC delta = x - m1;
++m0;
m1 += delta / static_cast<T_ACC>(m0);
m2 += delta * (x - m1);
}
// for BFloat16, each vector in m1_arr/m2_arr holds 2*n accumulated result
int64_t m0_add = n * kVecSize / kAccVecSize;
for (const auto i : c10::irange(kAccVecSize)) {
AddMoments(m0_add, m1_arr[i], m2_arr[i], m0, m1, m2);
}
return std::make_pair(m1, m2 / static_cast<T_ACC>(N - ddof));
}
template <typename T>
std::pair<acc_t<T>, acc_t<T>> RowwiseMoments(const T* X, int64_t N, int64_t ddof = 0) {
using Vec = vec::Vectorized<T>;
constexpr int64_t kVecSize = Vec::size();
const int64_t n = N / kVecSize;
const int64_t m = divup(n, kChunkSize);
const int64_t depth = utils::CeilLog2(m);
if (depth <= 4) {
return RowwiseMomentsImpl<T, 4>(X, N, ddof);
} else if (depth <= 8) {
return RowwiseMomentsImpl<T, 8>(X, N, ddof);
} else if (depth <= 16) {
return RowwiseMomentsImpl<T, 16>(X, N, ddof);
} else if (depth <= 32) {
return RowwiseMomentsImpl<T, 32>(X, N, ddof);
} else {
return RowwiseMomentsImpl<T, 64>(X, N, ddof);
}
}
} // namespace CPU_CAPABILITY
} // namespace native
} // namespace at
| 6,480
| 30.461165
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/utils.h
|
#pragma once
#include <ATen/Parallel.h>
#include <ATen/cpu/vec/vec.h>
#include <c10/util/llvmMathExtras.h>
#ifdef USE_FBGEMM
#include <fbgemm/Fbgemm.h>
#endif
namespace at {
namespace native {
inline namespace CPU_CAPABILITY {
template <typename T>
inline T data_index_init(T offset) {
return offset;
}
template <typename T, typename... Args>
inline T data_index_init(T offset, T& x, const T& X, Args&&... args) {
offset = data_index_init(offset, std::forward<Args>(args)...);
x = offset % X;
return offset / X;
}
inline bool data_index_step() {
return true;
}
template <typename T, typename... Args>
inline bool data_index_step(T& x, const T& X, Args&&... args) {
if (data_index_step(std::forward<Args>(args)...)) {
x = ((x + 1) == X) ? 0 : (x + 1);
return x == 0;
}
return false;
}
// Helper struct for bfloat16 vectorization
// Useful when you need float as immediate dtype or accumulate dtype
using namespace vec;
struct Vec2 {
Vectorized<float> val0, val1;
Vec2(Vectorized<float> v0, Vectorized<float> v1) : val0(v0), val1(v1) {}
Vec2(float v) : val0(v), val1(v) {}
static Vec2 loadu(const BFloat16* ptr) {
Vectorized<float> v0, v1;
std::tie(v0, v1) = convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
return {v0, v1};
}
static Vec2 loadu(const float* ptr) {
return {Vectorized<float>::loadu(ptr), Vectorized<float>::loadu(ptr + Vectorized<float>::size())};
}
void store(BFloat16* ptr) const {
Vectorized<BFloat16> val = convert_float_bfloat16(val0, val1);
val.store(ptr);
}
void store(float* ptr) const {
val0.store(ptr);
val1.store(ptr + Vectorized<float>::size());
}
};
inline Vec2 operator+(const Vec2& a, const Vec2& b) { return {a.val0 + b.val0, a.val1 + b.val1}; }
inline Vec2 operator*(const Vec2& a, const Vec2& b) { return {a.val0 * b.val0, a.val1 * b.val1}; }
inline Vec2 operator-(const Vec2& a, const Vec2& b) { return {a.val0 - b.val0, a.val1 - b.val1}; }
inline Vec2 operator/(const Vec2& a, const Vec2& b) { return {a.val0 / b.val0, a.val1 / b.val1}; }
inline Vec2 maximum(const Vec2& a, const Vec2& b) { return {vec::maximum(a.val0, b.val0), vec::maximum(a.val1, b.val1)}; }
inline Vec2 minimum(const Vec2& a, const Vec2& b) { return {vec::minimum(a.val0, b.val0), vec::minimum(a.val1, b.val1)}; }
template <typename scalar_t> struct VectorizedType { using type = Vectorized<scalar_t>; };
template <> struct VectorizedType<BFloat16> { using type = Vec2; };
template <typename scalar_t> using VecType = typename VectorizedType<scalar_t>::type;
// Helper for mixed data type parameter Vec::load
inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr) {
return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
}
inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr) {
using Vec = Vectorized<float>;
return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size()));
}
inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr, int64_t count) {
return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr, count));
}
inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr, int64_t count) {
using Vec = Vectorized<float>;
if (count > Vec::size()) {
return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size(), count - Vec::size()));
} else {
return std::make_tuple(Vec::loadu(ptr, count), Vec(0));
}
}
} // namespace
namespace utils {
template <typename T>
T CeilLog2(const T& x) {
if (x <= 2) {
return 1;
}
// Last set bit is floor(log2(x)), floor + 1 is ceil
// except when x is an exact powers of 2, so subtract 1 first
return static_cast<T>(llvm::findLastSet(static_cast<uint64_t>(x) - 1)) + 1;
}
// matrix transpose:
// src has shape of M by N, with leading dimension of ld_src
// dst has shape of N by M, with leading dimension of ld_dst
template <typename T>
inline void transpose(int64_t M, int64_t N, const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
for (int64_t j = 0; j < N; j++) {
for (int64_t i = 0; i < M; i++) {
dst[j * ld_dst + i] = src[i * ld_src + j];
}
}
}
#ifdef USE_FBGEMM
template <>
inline void transpose<float>(int64_t M, int64_t N, const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
TORCH_CHECK(fbgemm::fbgemmSupportedCPU(), "Your CPU does not support FBGEMM.");
fbgemm::transpose_simd<float>(M, N, src, ld_src, dst, ld_dst);
}
#endif
template <typename index_t, typename F>
inline void parallel_sparse_csr(
const TensorAccessor<index_t, 1>& crow_acc,
const int64_t M,
const int64_t nnz,
const F& f) {
TORCH_CHECK(crow_acc.size(0) == M + 1);
// directly parallel on `M` may lead to load imbalance,
// statically determine thread partition here to average payload
// for each thread.
int num_threads = at::get_num_threads();
std::vector<int64_t> thread_splits(num_threads + 1, M);
int64_t thread_averge_payload = std::max((int64_t)1, divup(nnz, num_threads));
thread_splits[0] = 0;
int64_t sum = 0;
int64_t t = 1;
for (const auto m : c10::irange(M)) {
int64_t row_start = crow_acc[m];
int64_t row_end = crow_acc[m + 1];
sum += row_end - row_start;
if (sum > t * thread_averge_payload) {
thread_splits[t] = m;
t++;
}
}
// need to restore the last index,
// due to rounding error when calculating `thread_averge_payload`.
thread_splits[num_threads] = M;
at::parallel_for(0, num_threads, 1, [&](int64_t cbegin, int64_t cend) {
int tid = at::get_thread_num();
int64_t begin = thread_splits[tid];
int64_t end = thread_splits[tid + 1];
f(begin, end);
});
}
} // namespace utils
} // namespace native
} // namespace at
| 5,775
| 31.632768
| 122
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/zmath.h
|
#pragma once
// Complex number math operations that act as no-ops for other dtypes.
#include <c10/util/complex.h>
#include <c10/util/math_compat.h>
#include <c10/util/MathConstants.h>
#include<ATen/NumericUtils.h>
namespace at { namespace native {
inline namespace CPU_CAPABILITY {
template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
inline VALUE_TYPE zabs (SCALAR_TYPE z) {
return z;
}
template<>
inline c10::complex<float> zabs <c10::complex<float>> (c10::complex<float> z) {
return c10::complex<float>(std::abs(z));
}
template<>
inline float zabs <c10::complex<float>, float> (c10::complex<float> z) {
return std::abs(z);
}
template<>
inline c10::complex<double> zabs <c10::complex<double>> (c10::complex<double> z) {
return c10::complex<double>(std::abs(z));
}
template<>
inline double zabs <c10::complex<double>, double> (c10::complex<double> z) {
return std::abs(z);
}
// This overload corresponds to non-complex dtypes.
// The function is consistent with its NumPy equivalent
// for non-complex dtypes where `pi` is returned for
// negative real numbers and `0` is returned for 0 or positive
// real numbers.
// Note: `nan` is propagated.
template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
inline VALUE_TYPE angle_impl (SCALAR_TYPE z) {
if (at::_isnan(z)) {
return z;
}
return z < 0 ? c10::pi<double> : 0;
}
template<>
inline c10::complex<float> angle_impl <c10::complex<float>> (c10::complex<float> z) {
return c10::complex<float>(std::arg(z), 0.0);
}
template<>
inline float angle_impl <c10::complex<float>, float> (c10::complex<float> z) {
return std::arg(z);
}
template<>
inline c10::complex<double> angle_impl <c10::complex<double>> (c10::complex<double> z) {
return c10::complex<double>(std::arg(z), 0.0);
}
template<>
inline double angle_impl <c10::complex<double>, double> (c10::complex<double> z) {
return std::arg(z);
}
template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
constexpr VALUE_TYPE real_impl (SCALAR_TYPE z) {
return z; //No-Op
}
template<>
constexpr c10::complex<float> real_impl <c10::complex<float>> (c10::complex<float> z) {
return c10::complex<float>(z.real(), 0.0);
}
template<>
constexpr float real_impl <c10::complex<float>, float> (c10::complex<float> z) {
return z.real();
}
template<>
constexpr c10::complex<double> real_impl <c10::complex<double>> (c10::complex<double> z) {
return c10::complex<double>(z.real(), 0.0);
}
template<>
constexpr double real_impl <c10::complex<double>, double> (c10::complex<double> z) {
return z.real();
}
template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
constexpr VALUE_TYPE imag_impl (SCALAR_TYPE /*z*/) {
return 0;
}
template<>
constexpr c10::complex<float> imag_impl <c10::complex<float>> (c10::complex<float> z) {
return c10::complex<float>(z.imag(), 0.0);
}
template<>
constexpr float imag_impl <c10::complex<float>, float> (c10::complex<float> z) {
return z.imag();
}
template<>
constexpr c10::complex<double> imag_impl <c10::complex<double>> (c10::complex<double> z) {
return c10::complex<double>(z.imag(), 0.0);
}
template<>
constexpr double imag_impl <c10::complex<double>, double> (c10::complex<double> z) {
return z.imag();
}
template <typename TYPE>
inline TYPE conj_impl (TYPE z) {
return z; //No-Op
}
template<>
inline c10::complex<at::Half> conj_impl <c10::complex<at::Half>> (c10::complex<at::Half> z) {
return c10::complex<at::Half>{z.real(), -z.imag()};
}
template<>
inline c10::complex<float> conj_impl <c10::complex<float>> (c10::complex<float> z) {
return c10::complex<float>(z.real(), -z.imag());
}
template<>
inline c10::complex<double> conj_impl <c10::complex<double>> (c10::complex<double> z) {
return c10::complex<double>(z.real(), -z.imag());
}
template <typename TYPE>
inline TYPE ceil_impl (TYPE z) {
return std::ceil(z);
}
template <>
inline c10::complex<float> ceil_impl (c10::complex<float> z) {
return c10::complex<float>(std::ceil(z.real()), std::ceil(z.imag()));
}
template <>
inline c10::complex<double> ceil_impl (c10::complex<double> z) {
return c10::complex<double>(std::ceil(z.real()), std::ceil(z.imag()));
}
template<typename T>
inline c10::complex<T> sgn_impl (c10::complex<T> z) {
if (z == c10::complex<T>(0, 0)) {
return c10::complex<T>(0, 0);
} else {
return z / zabs(z);
}
}
template <typename TYPE>
inline TYPE floor_impl (TYPE z) {
return std::floor(z);
}
template <>
inline c10::complex<float> floor_impl (c10::complex<float> z) {
return c10::complex<float>(std::floor(z.real()), std::floor(z.imag()));
}
template <>
inline c10::complex<double> floor_impl (c10::complex<double> z) {
return c10::complex<double>(std::floor(z.real()), std::floor(z.imag()));
}
template <typename TYPE>
inline TYPE round_impl (TYPE z) {
return std::nearbyint(z);
}
template <>
inline c10::complex<float> round_impl (c10::complex<float> z) {
return c10::complex<float>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
}
template <>
inline c10::complex<double> round_impl (c10::complex<double> z) {
return c10::complex<double>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
}
template <typename TYPE>
inline TYPE trunc_impl (TYPE z) {
return std::trunc(z);
}
template <>
inline c10::complex<float> trunc_impl (c10::complex<float> z) {
return c10::complex<float>(std::trunc(z.real()), std::trunc(z.imag()));
}
template <>
inline c10::complex<double> trunc_impl (c10::complex<double> z) {
return c10::complex<double>(std::trunc(z.real()), std::trunc(z.imag()));
}
template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
inline TYPE max_impl (TYPE a, TYPE b) {
if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
return std::numeric_limits<TYPE>::quiet_NaN();
} else {
return std::max(a, b);
}
}
template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
inline TYPE max_impl (TYPE a, TYPE b) {
if (_isnan<TYPE>(a)) {
return a;
} else if (_isnan<TYPE>(b)) {
return b;
} else {
return std::abs(a) > std::abs(b) ? a : b;
}
}
template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
inline TYPE min_impl (TYPE a, TYPE b) {
if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
return std::numeric_limits<TYPE>::quiet_NaN();
} else {
return std::min(a, b);
}
}
template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
inline TYPE min_impl (TYPE a, TYPE b) {
if (_isnan<TYPE>(a)) {
return a;
} else if (_isnan<TYPE>(b)) {
return b;
} else {
return std::abs(a) < std::abs(b) ? a : b;
}
}
} // end namespace
}} //end at::native
| 6,668
| 25.464286
| 93
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/Activation.h
|
#pragma once
#include <ATen/native/Activation.h>
#include <cstdint>
namespace at {
struct TensorIteratorBase;
class TensorBase;
}
namespace at { namespace native {
void launch_glu_backward_kernel(const TensorIteratorBase& iter,
int64_t gI_stride, int64_t I_stride);
void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
}} // namespace at::native
| 548
| 25.142857
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/BinaryInternal.h
|
// DON'T include this except from Binary*.cu files. It should not leak into
// headers.
#pragma once
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <type_traits>
namespace at {
namespace native {
namespace binary_internal {
template <typename scalar_t>
struct DivFunctor {
__device__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a / b;
}
};
template <typename T>
struct MulFunctor {
__device__ T operator()(T a, T b) const {
return a * b;
}
};
// Workaround for the error: '*' in boolean context, suggest '&&' instead
// [-Werror=int-in-bool-context]
template <>
struct MulFunctor<bool> {
__device__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void div_true_kernel_cuda(TensorIteratorBase& iter);
void div_trunc_kernel_cuda(TensorIteratorBase& iter);
} // namespace binary_internal
} // namespace native
} // namespace at
| 1,237
| 24.265306
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/CompositeRandomAccessor.h
|
#pragma once
#include <ATen/native/CompositeRandomAccessorCommon.h>
#include <thrust/tuple.h>
namespace at { namespace native {
struct TupleInfoCPU {
template <typename ...Types>
using tuple = thrust::tuple<Types...>;
template <typename ...Types>
static constexpr auto tie(Types&... args) noexcept {
return thrust::tie(args...);
}
};
template <typename KeyAccessor, typename ValueAccessor>
using CompositeRandomAccessorCPU =
CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
template <typename Values, typename References>
void swap(
references_holder<Values, References> rh1,
references_holder<Values, References> rh2
) {
return thrust::swap(rh1.data(), rh2.data());
}
template <int N, typename Values, typename References>
auto get(references_holder<Values, References> rh) -> decltype(thrust::get<N>(rh.data())) {
return thrust::get<N>(rh.data());
}
}} // namespace at::native
| 929
| 24.833333
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/CuFFTPlanCache.h
|
#include <ATen/Config.h>
#include <ATen/core/DimVector.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/CuFFTUtils.h>
#include <ATen/native/utils/ParamsHash.h>
#include <c10/util/accumulate.h>
#include <c10/util/irange.h>
#include <cufft.h>
#include <cufftXt.h>
#include <limits>
#include <list>
#include <sstream>
#include <stdexcept>
#include <string>
#include <unordered_map>
namespace at { namespace native { namespace detail {
// Enum representing the FFT type
enum class CuFFTTransformType : int8_t {
C2C, // Complex-to-complex
R2C, // Real-to-complex
C2R, // Complex-to-real
};
// This struct is used to let us easily compute hashes of the
// parameters.
// It will be the **key** to the plan cache.
struct CuFFTParams
{
int64_t signal_ndim_; // between 1 and max_rank, i.e., 1 <= signal_ndim <= 3
// These include additional batch dimension as well.
int64_t sizes_[max_rank + 1];
int64_t input_strides_[max_rank + 1];
int64_t output_strides_[max_rank + 1];
CuFFTTransformType fft_type_;
ScalarType value_type_;
CuFFTParams() = default;
CuFFTParams(IntArrayRef in_strides, IntArrayRef out_strides,
IntArrayRef signal_sizes, CuFFTTransformType fft_type, ScalarType value_type) {
// Padding bits must be zeroed for hashing
memset(this, 0, sizeof(*this));
signal_ndim_ = signal_sizes.size() - 1;
fft_type_ = fft_type;
value_type_ = value_type;
TORCH_INTERNAL_ASSERT(in_strides.size() == signal_sizes.size());
TORCH_INTERNAL_ASSERT(out_strides.size() == signal_sizes.size());
TORCH_INTERNAL_ASSERT(1 <= signal_ndim_ && signal_ndim_ <= max_rank);
std::copy(signal_sizes.cbegin(), signal_sizes.cend(), sizes_);
std::copy(in_strides.cbegin(), in_strides.cend(), input_strides_);
std::copy(out_strides.cbegin(), out_strides.cend(), output_strides_);
}
};
static_assert(std::is_trivial<CuFFTParams>::value, "");
// Returns true if the transform type has complex input
inline bool cufft_complex_input(CuFFTTransformType type) {
switch (type) {
case CuFFTTransformType::C2C:
case CuFFTTransformType::C2R:
return true;
case CuFFTTransformType::R2C:
return false;
}
TORCH_INTERNAL_ASSERT(false);
}
// Returns true if the transform type has complex output
inline bool cufft_complex_output(CuFFTTransformType type) {
switch (type) {
case CuFFTTransformType::C2C:
case CuFFTTransformType::R2C:
return true;
case CuFFTTransformType::C2R:
return false;
}
TORCH_INTERNAL_ASSERT(false);
}
// Create transform type enum from bools representing if input and output are complex
inline CuFFTTransformType GetCuFFTTransformType(bool complex_input, bool complex_output) {
if (complex_input && complex_output) {
return CuFFTTransformType::C2C;
} else if (complex_input && !complex_output) {
return CuFFTTransformType::C2R;
} else if (!complex_input && complex_output) {
return CuFFTTransformType::R2C;
}
TORCH_INTERNAL_ASSERT(false, "Real to real FFTs are not supported");
}
class CuFFTHandle {
::cufftHandle handle_;
public:
CuFFTHandle() {
CUFFT_CHECK(cufftCreate(&handle_));
}
::cufftHandle & get() { return handle_; }
const ::cufftHandle & get() const { return handle_; }
~CuFFTHandle() {
// Not using fftDestroy() for rocFFT to work around double freeing of handles
#if !defined(USE_ROCM)
cufftDestroy(handle_);
#endif
}
};
__forceinline__
static bool is_pow_of_two(int64_t x) {
return (x & (x - 1)) == 0;
}
#if defined(USE_ROCM)
using cufft_size_type = int;
#else
using cufft_size_type = long long int;
#endif
using CuFFTDimVector = c10::SmallVector<cufft_size_type, at::kDimVectorStaticSize>;
// Struct representing a tensor in CuFFT's data layout for planning transforms
// See NOTE [ cuFFT Embedded Strides ].
struct CuFFTDataLayout {
CuFFTDimVector embed;
cufft_size_type stride, dist;
bool must_clone, simple;
};
// Returns a cufft embedding for a contiguous signal of the given size.
// e.g. if the input is cloned, this will be the resulting data layout
// See NOTE [ cuFFT Embedded Strides ].
inline CuFFTDataLayout cufft_simple_embed(IntArrayRef sizes, bool onesided) {
CuFFTDataLayout layout;
layout.simple = true;
layout.must_clone = false;
layout.embed.assign(sizes.cbegin() + 1, sizes.cend());
if (onesided) {
layout.embed.back() = sizes.back() / 2 + 1;
}
layout.stride = 1;
layout.dist = 1;
for (const auto& len : layout.embed) {
layout.dist *= len;
}
return layout;
}
// Convert strides to a CuFFT embedded representation.
// If strides cannot be embedded, returns a simple layout and sets must_clone flag
// See NOTE [ cuFFT Embedded Strides ].
inline CuFFTDataLayout as_cufft_embed(IntArrayRef strides, IntArrayRef sizes, bool onesided) {
const auto signal_ndim = strides.size() - 1;
CuFFTDataLayout layout;
auto last_stride = strides[signal_ndim];
layout.must_clone = (last_stride <= 0);
const auto last_dim_size = onesided ?
sizes[signal_ndim] / 2 + 1 : sizes[signal_ndim];
const auto signal_numel = c10::multiply_integers(sizes.slice(1, sizes.size() - 2)) * last_dim_size;
// Zero stides are not allowed, even if the batch size is one.
// If that happens just set a dummy case
if (sizes[0] == 1) {
layout.dist = signal_numel;
} else if (strides[0] == 0) {
layout.must_clone = true;
} else {
layout.dist = strides[0];
}
// Calculate the embedding shape, or set must_clone if the strides cannot be embedded
layout.embed.resize(signal_ndim);
for (auto i = signal_ndim - 1; !layout.must_clone && i > 0; i--) {
auto stride = strides[i];
if (sizes[i] == 1) {
layout.embed[i] = 1;
} else if (stride > 0 && stride % last_stride == 0) {
layout.embed[i] = stride / last_stride;
last_stride = stride;
} else {
layout.must_clone = true;
}
}
if (layout.must_clone) {
// If the input needs to be cloned, assume it will be contiguous
layout = cufft_simple_embed(sizes, onesided);
layout.must_clone = true;
} else {
layout.embed[0] = sizes[1];
layout.stride = strides[signal_ndim];
// Determine if layout represents a simple embedding (contiguous data)
layout.simple = [&] {
for (const auto i : c10::irange(1, signal_ndim - 1)) {
if (layout.embed[i] != sizes[i + 1]) {
return false;
}
}
return (layout.stride == 1 && layout.dist == signal_numel &&
layout.embed.back() == last_dim_size);
}();
}
return layout;
}
// This class contains all the information needed to execute a cuFFT plan:
// 1. the plan
// 2. whether to clone input before executing the plan
// 3. the workspace size needed
//
// This class will be the **value** in the plan cache.
// It **owns** the raw plan via a unique_ptr.
class CuFFTConfig {
public:
// Only move semantics is enought for this class. Although we already use
// unique_ptr for the plan, still remove copy constructor and assignment op so
// we don't accidentally copy and take perf hit.
CuFFTConfig(const CuFFTConfig&) = delete;
CuFFTConfig& operator=(CuFFTConfig const&) = delete;
explicit CuFFTConfig(const CuFFTParams& params):
CuFFTConfig(
IntArrayRef(params.input_strides_, params.signal_ndim_ + 1),
IntArrayRef(params.output_strides_, params.signal_ndim_ + 1),
IntArrayRef(params.sizes_, params.signal_ndim_ + 1),
params.fft_type_,
params.value_type_) {}
// For complex types, strides are in units of 2 * element_size(dtype)
// sizes are for the full signal, including batch size and always two-sided
CuFFTConfig(IntArrayRef in_strides, IntArrayRef out_strides,
IntArrayRef sizes, CuFFTTransformType fft_type, ScalarType dtype):
fft_type_(fft_type), value_type_(dtype) {
// signal sizes (excluding batch dim)
CuFFTDimVector signal_sizes(sizes.begin() + 1, sizes.end());
// input batch size
const int64_t batch = sizes[0];
const int64_t signal_ndim = sizes.size() - 1;
// Since cuFFT has limited non-unit stride support and various constraints, we
// use a flag to keep track throughout this function to see if we need to
// input = input.clone();
#if defined(USE_ROCM)
// clone input to avoid issues with hipfft clobering the input and failing tests
clone_input = true;
#else
clone_input = false;
#endif
// For half, base strides on the real part of real-to-complex and
// complex-to-real transforms are not supported. Since our output is always
// contiguous, only need to check real-to-complex case.
if (dtype == ScalarType::Half) {
// cuFFT on half requires compute capability of at least SM_53
auto dev_prop = at::cuda::getCurrentDeviceProperties();
TORCH_CHECK(dev_prop->major >= 5 && !(dev_prop->major == 5 && dev_prop->minor < 3),
"cuFFT doesn't support signals of half type with compute "
"capability less than SM_53, but the device containing input half "
"tensor only has SM_", dev_prop->major, dev_prop->minor);
for (const auto i : c10::irange(signal_ndim)) {
TORCH_CHECK(is_pow_of_two(sizes[i + 1]),
"cuFFT only supports dimensions whose sizes are powers of two when"
" computing in half precision, but got a signal size of",
sizes.slice(1));
}
clone_input |= in_strides.back() != 1;
}
CuFFTDataLayout in_layout;
if (clone_input) {
in_layout = cufft_simple_embed(sizes, fft_type == CuFFTTransformType::C2R);
} else {
in_layout = as_cufft_embed(in_strides, sizes, fft_type == CuFFTTransformType::C2R);
}
auto out_layout = as_cufft_embed(out_strides, sizes, fft_type == CuFFTTransformType::R2C);
TORCH_INTERNAL_ASSERT(!out_layout.must_clone, "Out strides cannot be represented as CuFFT embedding");
clone_input |= in_layout.must_clone;
// Check if we can take advantage of simple data layout.
//
// See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
const bool simple_layout = in_layout.simple && out_layout.simple;
#if defined(USE_ROCM)
hipfftType exec_type = [&]{
if (dtype == kFloat) {
switch (fft_type) {
case CuFFTTransformType::C2C: return HIPFFT_C2C;
case CuFFTTransformType::R2C: return HIPFFT_R2C;
case CuFFTTransformType::C2R: return HIPFFT_C2R;
}
} else if (dtype == kDouble) {
switch (fft_type) {
case CuFFTTransformType::C2C: return HIPFFT_Z2Z;
case CuFFTTransformType::R2C: return HIPFFT_D2Z;
case CuFFTTransformType::C2R: return HIPFFT_Z2D;
}
}
TORCH_CHECK(false, "hipFFT doesn't support transforms of type: ", dtype);
}();
#else
cudaDataType itype, otype, exec_type;
const auto complex_input = cufft_complex_input(fft_type);
const auto complex_output = cufft_complex_output(fft_type);
if (dtype == ScalarType::Float) {
itype = complex_input ? CUDA_C_32F : CUDA_R_32F;
otype = complex_output ? CUDA_C_32F : CUDA_R_32F;
exec_type = CUDA_C_32F;
} else if (dtype == ScalarType::Double) {
itype = complex_input ? CUDA_C_64F : CUDA_R_64F;
otype = complex_output ? CUDA_C_64F : CUDA_R_64F;
exec_type = CUDA_C_64F;
} else if (dtype == ScalarType::Half) {
itype = complex_input ? CUDA_C_16F : CUDA_R_16F;
otype = complex_output ? CUDA_C_16F : CUDA_R_16F;
exec_type = CUDA_C_16F;
} else {
TORCH_CHECK(false, "cuFFT doesn't support tensor of type: ", dtype);
}
#endif
// disable auto allocation of workspace to use THC allocator
CUFFT_CHECK(cufftSetAutoAllocation(plan(), /* autoAllocate */ 0));
size_t ws_size_t;
// make plan
if (simple_layout) {
// If with unit-stride, we tell cuFFT by setting inembed == onembed == NULL.
// In such case, cuFFT ignores istride, ostride, idist, and odist
// by assuming istride = ostride = 1.
//
// See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
#if defined(USE_ROCM)
CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
/* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1,
/* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1,
exec_type, batch, &ws_size_t));
#else
CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
/* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, itype,
/* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, otype,
batch, &ws_size_t, exec_type));
#endif
} else {
#if defined(USE_ROCM)
CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
in_layout.embed.data(), in_layout.stride, in_layout.dist,
out_layout.embed.data(), out_layout.stride, out_layout.dist,
exec_type, batch, &ws_size_t));
#else
CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
in_layout.embed.data(), in_layout.stride, in_layout.dist, itype,
out_layout.embed.data(), out_layout.stride, out_layout.dist, otype,
batch, &ws_size_t, exec_type));
#endif
}
ws_size = static_cast<int64_t>(ws_size_t);
}
const cufftHandle &plan() const { return plan_ptr.get(); }
CuFFTTransformType transform_type() const { return fft_type_; }
ScalarType data_type() const { return value_type_; }
bool should_clone_input() const { return clone_input; }
int64_t workspace_size() const { return ws_size; }
private:
CuFFTHandle plan_ptr;
bool clone_input;
int64_t ws_size;
CuFFTTransformType fft_type_;
ScalarType value_type_;
};
#if defined(USE_ROCM)
// Note that the max plan number for CUDA version < 10 has to be 1023
// due to a bug that fails on the 1024th plan
constexpr int64_t CUFFT_MAX_PLAN_NUM = 1023;
constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = CUFFT_MAX_PLAN_NUM;
#else
constexpr int64_t CUFFT_MAX_PLAN_NUM = std::numeric_limits<int64_t>::max();
// The default max cache size chosen for CUDA version > 10 is arbitrary.
// This number puts a limit on how big of a plan cache should we maintain by
// default. Users can always configure it via cufft_set_plan_cache_max_size.
constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = 4096;
#endif
static_assert(0 <= CUFFT_MAX_PLAN_NUM && CUFFT_MAX_PLAN_NUM <= std::numeric_limits<int64_t>::max(),
"CUFFT_MAX_PLAN_NUM not in size_t range");
static_assert(CUFFT_DEFAULT_CACHE_SIZE >= 0 && CUFFT_DEFAULT_CACHE_SIZE <= CUFFT_MAX_PLAN_NUM,
"CUFFT_DEFAULT_CACHE_SIZE not in [0, CUFFT_MAX_PLAN_NUM] range");
// This cache assumes that the mapping from key to value never changes.
// This is **NOT** thread-safe. Please use a mutex when using it **AND** the
// value returned from try_emplace_value.
// The contract of using this cache is that try_emplace_value should only be
// used when the max_size is positive.
class CuFFTParamsLRUCache {
public:
using kv_t = typename std::pair<CuFFTParams, CuFFTConfig>;
using map_t = typename std::unordered_map<std::reference_wrapper<CuFFTParams>,
typename std::list<kv_t>::iterator,
ParamsHash<CuFFTParams>,
ParamsEqual<CuFFTParams>>;
using map_kkv_iter_t = typename map_t::iterator;
CuFFTParamsLRUCache() : CuFFTParamsLRUCache(CUFFT_DEFAULT_CACHE_SIZE) {}
CuFFTParamsLRUCache(int64_t max_size) {
_set_max_size(max_size);
}
CuFFTParamsLRUCache(CuFFTParamsLRUCache&& other) noexcept :
_usage_list(std::move(other._usage_list)),
_cache_map(std::move(other._cache_map)),
_max_size(other._max_size) {}
CuFFTParamsLRUCache& operator=(CuFFTParamsLRUCache&& other) noexcept {
_usage_list = std::move(other._usage_list);
_cache_map = std::move(other._cache_map);
_max_size = other._max_size;
return *this;
}
// If key is in this cache, return the cached config. Otherwise, emplace the
// config in this cache and return it.
// Return const reference because CuFFTConfig shouldn't be tampered with once
// created.
const CuFFTConfig &lookup(CuFFTParams params) {
AT_ASSERT(_max_size > 0);
map_kkv_iter_t map_it = _cache_map.find(params);
// Hit, put to list front
if (map_it != _cache_map.end()) {
_usage_list.splice(_usage_list.begin(), _usage_list, map_it->second);
return map_it->second->second;
}
// Miss
// remove if needed
if (_usage_list.size() >= _max_size) {
auto last = _usage_list.end();
last--;
_cache_map.erase(last->first);
_usage_list.pop_back();
}
// construct new plan at list front, then insert into _cache_map
_usage_list.emplace_front(std::piecewise_construct,
std::forward_as_tuple(params),
std::forward_as_tuple(params));
auto kv_it = _usage_list.begin();
_cache_map.emplace(std::piecewise_construct,
std::forward_as_tuple(kv_it->first),
std::forward_as_tuple(kv_it));
return kv_it->second;
}
void clear() {
_cache_map.clear();
_usage_list.clear();
}
void resize(int64_t new_size) {
_set_max_size(new_size);
auto cur_size = _usage_list.size();
if (cur_size > _max_size) {
auto delete_it = _usage_list.end();
for (size_t i = 0; i < cur_size - _max_size; i++) {
delete_it--;
_cache_map.erase(delete_it->first);
}
_usage_list.erase(delete_it, _usage_list.end());
}
}
size_t size() const { return _cache_map.size(); }
size_t max_size() const noexcept { return _max_size; }
std::mutex mutex;
private:
// Only sets size and does value check. Does not resize the data structures.
void _set_max_size(int64_t new_size) {
// We check that 0 <= new_size <= CUFFT_MAX_PLAN_NUM here. Since
// CUFFT_MAX_PLAN_NUM is of type size_t, we need to do non-negativity check
// first.
TORCH_CHECK(new_size >= 0,
"cuFFT plan cache size must be non-negative, but got ", new_size);
TORCH_CHECK(new_size <= CUFFT_MAX_PLAN_NUM,
"cuFFT plan cache size can not be larger than ", CUFFT_MAX_PLAN_NUM, ", but got ", new_size);
_max_size = static_cast<size_t>(new_size);
}
std::list<kv_t> _usage_list;
map_t _cache_map;
size_t _max_size;
};
// Since ATen is separated into CPU build and CUDA build, we need a way to call
// these functions only when CUDA is loaded. We use CUDA hooks for this purpose
// (at cuda/detail/CUDAHooks.cpp), and call the hooked functions from the actual
// native function counterparts (at native/SpectralOps.cpp), i.e.,
// _cufft_get_plan_cache_max_size, _cufft_set_plan_cache_max_size
// _cufft_get_plan_cache_size, and _cufft_clear_plan_cache.
int64_t cufft_get_plan_cache_max_size_impl(int64_t device_index);
void cufft_set_plan_cache_max_size_impl(int64_t device_index, int64_t max_size);
int64_t cufft_get_plan_cache_size_impl(int64_t device_index);
void cufft_clear_plan_cache_impl(int64_t device_index);
}}} // namespace at::native::detail
| 19,257
| 35.131332
| 106
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/CuFFTUtils.h
|
#pragma once
#include <ATen/Config.h>
#include <string>
#include <stdexcept>
#include <sstream>
#include <cufft.h>
#include <cufftXt.h>
namespace at { namespace native {
// This means that max dim is 3 + 2 = 5 with batch dimension and possible
// complex dimension
constexpr int max_rank = 3;
static inline std::string _cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
#if !defined(USE_ROCM)
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
#endif
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
default:
std::ostringstream ss;
ss << "unknown error " << error;
return ss.str();
}
}
static inline void CUFFT_CHECK(cufftResult error)
{
if (error != CUFFT_SUCCESS) {
std::ostringstream ss;
ss << "cuFFT error: " << _cudaGetErrorEnum(error);
AT_ERROR(ss.str());
}
}
}} // at::native
| 1,863
| 24.189189
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/Distributions.h
|
#pragma once
namespace at {
struct CUDAGeneratorImpl;
struct TensorIteratorBase;
class TensorBase;
namespace native {
void launch_poisson_cuda_kernel(
const TensorBase &ret, const TensorBase &lambda, CUDAGeneratorImpl *gen);
void launch_gamma_kernel(
const TensorBase &ret, const TensorBase &alpha, CUDAGeneratorImpl *gen);
void launch_binomial_cuda_kernel(
TensorIteratorBase &iter, CUDAGeneratorImpl *gen);
void launch_dirichlet_kernel(TensorIteratorBase &iter);
void launch_standard_gamma_grad_kernel(TensorIteratorBase &iter);
void launch_dirichlet_grad_kernel(TensorIteratorBase &iter);
}} // namespace at::native
| 641
| 23.692308
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/GridSampler.h
|
#pragma once
#include <array>
#include <cstdint>
namespace at {
class TensorBase;
}
namespace at {
namespace native {
void launch_grid_sampler_2d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
void launch_grid_sampler_3d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
void launch_grid_sampler_2d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase &grad_output, const TensorBase &input,
const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool, 2> output_mask);
void launch_grid_sampler_3d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase &grad_output, const TensorBase &input,
const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool, 2> output_mask);
}} // namespace at::native
| 1,157
| 34.090909
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/MiscUtils.h
|
#pragma once
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAConfig.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
namespace at {
namespace native {
static inline int cuda_int_cast(int64_t value, const char* varname) {
auto result = static_cast<int>(value);
TORCH_CHECK(static_cast<int64_t>(result) == value,
"cuda_int_cast: The value of ", varname, "(", (long long)value,
") is too large to fit into a int (", sizeof(int), " bytes)");
return result;
}
// Creates an array of size elements of type T, backed by pinned memory
// wrapped in a Storage
template<class T>
static inline Storage pin_memory(int64_t size) {
auto* allocator = cuda::getPinnedMemoryAllocator();
int64_t adjusted_size = size * sizeof(T);
return Storage(
Storage::use_byte_size_t(),
adjusted_size,
allocator,
/*resizable=*/false);
}
} // namespace native
} // namespace at
| 958
| 28.060606
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/Resize.h
|
#pragma once
#include <ATen/EmptyTensor.h>
#include <ATen/native/ResizeCommon.h>
#include <c10/cuda/CUDAGuard.h>
namespace at { namespace native {
TORCH_CUDA_CPP_API void resize_bytes_cuda(StorageImpl* storage, size_t size_bytes);
static inline void maybe_resize_storage_cuda(TensorImpl* self, size_t new_size_bytes) {
// It does not make sense to try to resize a storage
// to hold 0 elements, and this can break
// if storage_offset is positive but
// new_size is 0, so just bail in that case
// (same comment is in Resize.h)
if (self->numel() == 0) {
return;
}
const Storage &storage = self->unsafe_storage();
TORCH_CHECK(storage, "Tensor: invalid null storage");
if (new_size_bytes > storage.nbytes()) {
resize_bytes_cuda(storage.unsafeGetStorageImpl(), new_size_bytes);
}
}
inline TensorImpl* resize_impl_cuda_(
TensorImpl* self,
IntArrayRef size,
at::OptionalIntArrayRef stride,
bool device_guard = true) {
if (self->sizes() == size && (!stride || self->strides() == stride)) {
return self;
}
// NB: We don't need to hold the device guard when calling from TH
cuda::OptionalCUDAGuard guard;
if (device_guard) {
guard.set_index(self->storage().device().index());
}
const auto itemsize = self->dtype().itemsize();
const auto storage_offset = self->storage_offset();
size_t storage_size = 1;
if (stride) {
self->set_sizes_and_strides(size, *stride);
storage_size = at::detail::computeStorageNbytes(
size, *stride, itemsize, storage_offset);
} else {
self->set_sizes_contiguous(size);
storage_size = at::detail::computeStorageNbytesContiguous(
size, itemsize, storage_offset);
}
maybe_resize_storage_cuda(self, storage_size);
return self;
}
}}
| 1,771
| 27.580645
| 87
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/ScanKernels.h
|
#pragma once
#include <cstdint>
namespace at {
class TensorBase;
namespace native {
// NOTE: these functions require output tensors to be contiguous
void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values,
const TensorBase& indices, int64_t dim);
void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values,
const TensorBase& indices, int64_t dim);
void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
}} // namespace at::native
| 779
| 40.052632
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/jit_utils.h
|
#pragma once
#include <string>
#include <sstream>
#include <unordered_map>
#include <vector>
#include <c10/util/irange.h>
#include <ATen/jit_macros.h>
#include <ATen/cuda/detail/LazyNVRTC.h>
namespace at { namespace cuda { namespace jit {
enum class BinaryFuncVariant {NoScalar, RhsScalar, LhsScalar};
struct NvrtcFunction {
CUmodule module = CUmodule();
CUfunction function = nullptr;
};
struct KernelDescriptor {
std::string name;
std::string f;
c10::ScalarType f_inputs_type;
c10::ScalarType result_type;
c10::SmallVector<c10::ScalarType> extra_args_types;
int nInputs, nOutputs;
};
// Helper function to return a vector<string>
// corresponding to the type of the arguments in parameter pack.
template <typename... Args>
c10::SmallVector<at::ScalarType> get_extra_args_types() {
return {c10::CppTypeToScalarType<Args>::value ...};
}
template <
typename result_type,
typename f_inputs_type,
typename... ExtraArgs>
KernelDescriptor make_kernel_descriptor(
std::string name,
std::string f,
int nInputs,
int nOutputs) {
KernelDescriptor ret;
ret.name = std::move(name);
ret.f = std::move(f);
ret.f_inputs_type = c10::CppTypeToScalarType<f_inputs_type>::value;
ret.result_type = c10::CppTypeToScalarType<result_type>::value;
ret.extra_args_types = get_extra_args_types<ExtraArgs...>();
ret.nInputs = nInputs;
ret.nOutputs = nOutputs;
return ret;
}
inline int can_vectorize_up_to(size_t default_alignment, void *pointer) {
auto ip = reinterpret_cast<uintptr_t>(pointer);
if (ip % (4 * default_alignment) == 0) {
return 4;
}
if (ip % (2 * default_alignment) == 0) {
return 2;
}
return 1;
}
inline int can_vectorize_up_to(const KernelDescriptor &desc, c10::ArrayRef<char*> pointers) {
TORCH_INTERNAL_ASSERT(desc.nOutputs == 1);
TORCH_INTERNAL_ASSERT(static_cast<int64_t>(pointers.size()) == 1 + desc.nInputs);
// Deals with output
auto result_size = c10::scalarTypeToTypeMeta(desc.result_type).itemsize();
int result = can_vectorize_up_to(result_size, pointers[0]);
// Incorporates input(s)
auto input_size = c10::scalarTypeToTypeMeta(desc.f_inputs_type).itemsize();
for (auto i : c10::irange(1, pointers.size())) {
result = std::min(result, can_vectorize_up_to(input_size, pointers[i]));
}
return result;
}
std::string generate_code(
int nInputs,
int nOutputs,
const std::string& func,
const std::string& name,
const std::string& f_input_type,
const std::string& compute_type,
const std::string& result_type,
bool contiguous,
bool dynamic_casting,
BinaryFuncVariant scalar_pos,
c10::SmallVector<std::string>& extra_args_typenames,
bool vectorized=false,
int vec_size=0,
bool return_by_ref=false);
std::string generate_code(
const KernelDescriptor &desc,
bool contiguous,
bool dynamic_casting,
BinaryFuncVariant scalar_pos,
bool vectorized=false,
int vec_size=0,
bool return_by_ref=false);
std::string generate_reduction_code(
int nOutputs,
const std::string& func,
const std::string& name,
const int vt0,
const std::string& f_inputs_type,
const std::string& reduction_accum_type,
const std::string& result_type,
bool contiguous,
bool vectorized,
int vec_size,
int max_threads_codegen);
std::string generate_reduction_code(
const KernelDescriptor &desc,
const int vt0,
bool contiguous,
bool vectorized,
int vec_size,
int max_threads_codegen);
NvrtcFunction jit_pwise_function(
const std::string& code,
const std::string& kernel_name);
void launch_jitted_pwise_function(
NvrtcFunction function,
void* args[],
const dim3 nBlocks,
const dim3 kBlockSize,
const int smem=0);
template <typename T>
struct delayed_false : std::false_type {
};
// Defines type names
// NOTE: General case is instantiated only for invalid types.
// All the valid types have specialization using the TYPE_NAME_FN
// macro below.
template <typename T>
inline std::string typeName() {
// we can't use static_assert(false) directly as the
// program will be not compiled even if the template is not
// instantiated, so we use `delayed_false`
// to make sure compiler doesn't eagerly raise
// fail this assertion.
static_assert(delayed_false<T>::value, "invalid type for jiterator");
return "void";
}
#define TYPE_NAME_FN(ctype, name) \
template <> inline std::string typeName<ctype>(){ \
return std::string(#ctype); \
}
AT_FORALL_SCALAR_TYPES(TYPE_NAME_FN)
#undef TYPE_NAME_FN
// JIT uses std::complex directly, because nvRTC compile programs
// with -default-device, so there is no such issue like:
// "std::sin(complex) is __host__ only"
template <> inline std::string typeName<bool>(){
return "bool";
}
template <> inline std::string typeName<c10::complex<at::Half>>(){
return "std::complex<at::Half>";
}
template <> inline std::string typeName<c10::complex<float>>(){
return "std::complex<float>";
}
template <> inline std::string typeName<c10::complex<double>>(){
return "std::complex<double>";
}
template <> inline std::string typeName<at::Half>(){
return "at::Half";
}
template <> inline std::string typeName<at::BFloat16>(){
return "at::BFloat16";
}
#define TYPE_NAME_CASE(ctype, scalartype) \
case ScalarType::scalartype: return typeName<ctype>();
inline std::string typeName(ScalarType t) {
switch (t) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(TYPE_NAME_CASE)
default:
TORCH_CHECK(false, "invalid type for jiterator");
}
}
#undef TYPE_NAME_CASE
TORCH_CUDA_CPP_API void initializeCudaContext();
}}} // namespace at::cuda::jit
| 5,705
| 26.970588
| 93
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/thread_constants.h
|
#pragma once
#include <c10/macros/Macros.h>
// Marks a lambda as executable on both the host and device. The __host__
// attribute is important so that we can access static type information from
// the host, even if the function is typically only executed on the device.
#ifndef GPU_LAMBDA
#define GPU_LAMBDA __host__ __device__
#endif
#if defined(USE_ROCM)
constexpr int num_threads() {
return 256;
}
#else
constexpr uint32_t num_threads() {
return C10_WARP_SIZE * 4;
}
#endif
constexpr int thread_work_size() { return 4; }
constexpr int block_work_size() { return thread_work_size() * num_threads(); }
| 611
| 25.608696
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/cuda/linalg/MagmaUtils.h
|
#pragma once
#include <ATen/cuda/CUDAConfig.h>
#if AT_MAGMA_ENABLED()
#include <magma_types.h>
#include <magma_v2.h>
#endif
namespace at {
namespace native {
#if AT_MAGMA_ENABLED()
// RAII for a MAGMA Queue
struct MAGMAQueue {
// Default constructor without a device will cause
// destroying a queue which has not been initialized.
MAGMAQueue() = delete;
// Constructor
explicit MAGMAQueue(int64_t device_id) {
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
#if !defined(USE_ROCM)
// Magma operations is numerically sensitive, so TF32 should be off
// regardless of the global flag.
TORCH_CUDABLAS_CHECK(cublasGetMathMode(handle, &original_math_mode));
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif
magma_queue_create_from_cuda(
device_id,
at::cuda::getCurrentCUDAStream(),
handle,
at::cuda::getCurrentCUDASparseHandle(),
&magma_queue_);
}
// Getter
magma_queue_t get_queue() const { return magma_queue_; }
// Destructor
~MAGMAQueue() {
#if !defined(USE_ROCM)
// We've manually set the math mode to CUBLAS_DEFAULT_MATH, now we
// should restore the original math mode back
cublasHandle_t handle = magma_queue_get_cublas_handle(magma_queue_);
cublasSetMathMode(handle, original_math_mode);
#endif
magma_queue_destroy(magma_queue_);
}
private:
magma_queue_t magma_queue_;
#if !defined(USE_ROCM)
cublasMath_t original_math_mode;
#endif
};
static inline magma_int_t magma_int_cast(int64_t value, const char* varname) {
auto result = static_cast<magma_int_t>(value);
if (static_cast<int64_t>(result) != value) {
AT_ERROR("magma: The value of ", varname, "(", (long long)value,
") is too large to fit into a magma_int_t (", sizeof(magma_int_t), " bytes)");
}
return result;
}
// MAGMA functions that don't take a magma_queue_t aren't stream safe
// Work around this by synchronizing with the default stream
struct MagmaStreamSyncGuard {
MagmaStreamSyncGuard() {
auto stream = at::cuda::getCurrentCUDAStream();
if (stream != at::cuda::getDefaultCUDAStream()) {
at::cuda::stream_synchronize(stream);
}
}
~MagmaStreamSyncGuard() noexcept(false) {
auto default_stream = at::cuda::getDefaultCUDAStream();
if (at::cuda::getCurrentCUDAStream() != default_stream) {
at::cuda::stream_synchronize(default_stream);
}
}
};
#endif
} // namespace native
} // namespace at
| 2,481
| 26.88764
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/native/cudnn/ConvShared.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/cudnn/cudnn-wrapper.h>
#include <ATen/cudnn/Descriptors.h>
#include <ATen/cudnn/Types.h>
#include <ATen/native/ConvUtils.h>
#if CUDNN_VERSION < 8000
#define AT_CUDNN_CONV_BIAS_RELU_FALLBACK
#endif
namespace at { namespace native {
// ---------------------------------------------------------------------
//
// Helper classes
//
// ---------------------------------------------------------------------
// This POD struct is used to let us easily compute hashes of the
// parameters
struct ConvolutionParams
{
c10::DeviceIndex device_id;
cudnnDataType_t dataType;
int input_size[2 + max_dim];
uint8_t input_dim;
at::MemoryFormat memory_format;
int weight_size[2 + max_dim];
int padding[max_dim];
int stride[max_dim];
int dilation[max_dim];
int64_t groups;
bool deterministic;
bool allow_tf32;
// NB: transposed purposely omitted: transposed just swaps
// forward and backward, so you can reuse the benchmark entry,
};
std::ostream& operator<<(std::ostream & out, const ConvolutionParams& params);
// NB: This can't be a constructor, because then ConvolutionParams
// would not be a POD anymore.
// TODO: Use TensorGeometry here instead of the entire Tensor, which we
// don't actually need. (OTOH: We can always pass in
// grad_input/grad_output, so this is not very pressing)
void setConvolutionParams(
ConvolutionParams* params,
const at::Tensor& input, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
int64_t groups, bool deterministic, bool allow_tf32, at::MemoryFormat memory_format);
std::string repro_from_args(const ConvolutionParams& args);
// ---------------------------------------------------------------------
//
// Raw functions
//
// ---------------------------------------------------------------------
void raw_cudnn_convolution_forward_out(
const Tensor& output, const Tensor& input, const Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_backward_input_out(
const at::Tensor& grad_input,
const at::Tensor& grad_output,
const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_backward_weight_out(
const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_add_relu_out(
const Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& z,
float alpha,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups,
bool benchmark,
bool deterministic,
bool allow_tf32);
void raw_cudnn_convolution_add_relu_fallback_out(
const Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& z,
float alpha,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups,
bool benchmark,
bool deterministic,
bool allow_tf32);
#if AT_CUDNN_ENABLED()
#include <ATen/native/cudnn/Macros.h>
#if HAS_CUDNN_V8()
// v7 functions are preserved here to allow for runtime switching to v7
// (e.g., TORCH_CUDNN_V8_API_DISABLED=1).
// Note that v7 forward/backward out can have different behavior from the v8
// versions, as v7 explicitly splits large tensors as a 32-bit indexing
// workaround whereas v8 expects cuDNN to handle large tensors.
void raw_cudnn_convolution_forward_out_v7(
const Tensor& output, const Tensor& input, const Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_backward_input_out_v7(
const at::Tensor& grad_input,
const at::Tensor& grad_output,
const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_backward_weight_out_v7(
const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, bool allow_tf32);
void raw_cudnn_convolution_add_relu_out_v7(
const Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& z,
float alpha,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups,
bool benchmark,
bool deterministic,
bool allow_tf32);
#endif
#endif
}}
| 4,960
| 31.214286
| 89
|
h
|
null |
pytorch-main/aten/src/ATen/native/cudnn/RNNUtils.h
|
#include <ATen/cudnn/cudnn-wrapper.h>
#include <ATen/cudnn/Descriptors.h>
#include <ATen/cudnn/Types.h>
#include <ATen/cudnn/Utils.h>
// Declares utilities used by RNN.cpp and also needed by external consumers
namespace at {
namespace native {
namespace cudnn_rnn {
TORCH_CUDA_CPP_API std::tuple<Tensor, std::vector<Tensor>>
copy_weights_to_flat_buf_views(
TensorList weight_arr,
int64_t weight_stride0,
int64_t input_size,
int64_t mode,
int64_t hidden_size,
int64_t proj_size,
int64_t num_layers,
bool batch_first,
bool bidirectional,
const cudnnDataType_t flat_buf_datatype,
const TensorOptions& flat_buf_options,
bool set_orig_weights_to_flat_buf,
bool allow_type_change = false,
bool include_bias = true);
} // namespace cudnn_rnn
} // namespace native
} // namespace at
| 834
| 25.935484
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalCommandBuffer.h
|
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
@protocol PTMetalCommandBuffer<NSObject>
@optional
- (void)beginSynchronization;
- (void)endSynchronization:(NSError*)error;
@end
@interface MetalCommandBuffer : NSObject
@property(nonatomic, strong, readonly) id<MTLCommandBuffer> buffer;
@property(nonatomic, assign, readonly) BOOL valid;
+ (MetalCommandBuffer*)newBuffer;
+ (MetalCommandBuffer*)currentBuffer;
- (void)addSubscriber:(id<PTMetalCommandBuffer>)subscriber;
- (void)removeSubscriber:(id<PTMetalCommandBuffer>)subscriber;
- (void)commit;
- (void)add:(MPSTemporaryImage*)image;
- (void)remove:(MPSTemporaryImage*)image;
@end
| 651
| 28.636364
| 67
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalContext.h
|
#import <Foundation/Foundation.h>
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <string>
API_AVAILABLE(ios(11.0), macos(10.13))
@interface MetalContext : NSObject
@property(nonatomic, strong, readonly) id<MTLDevice> device;
@property(nonatomic, strong, readonly) id<MTLCommandQueue> commandQueue;
@property(nonatomic, strong, readonly) id<MTLLibrary> library;
+ (instancetype)sharedInstance;
- (BOOL)available;
- (id<MTLComputePipelineState>)pipelineState:(const std::string&)kernel;
- (id<MTLComputePipelineState>)specializedPipelineState:(const std::string&)kernel
Constants:(NSArray<NSNumber*>*)
constants;
- (id<MTLBuffer>)emptyMTLBuffer:(int64_t) size;
@end
| 819
| 38.047619
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalConvParams.h
|
#ifndef MetalConvParams_h
#define MetalConvParams_h
#include <c10/util/ArrayRef.h>
namespace at {
namespace native {
namespace metal {
struct Conv2DParams final {
Conv2DParams() {}
Conv2DParams(
c10::IntArrayRef inputSizes,
c10::IntArrayRef weightSizes,
c10::IntArrayRef padding,
c10::IntArrayRef stride,
c10::IntArrayRef dilation,
int64_t groups);
std::vector<int64_t> output_sizes() const {
return {N, OC, OH, OW};
}
bool isDepthwise() const {
// Currently, only channel multiplier of 1 is supported
// i.e. inputFeatureChannels == outputFeatureChannels
return G > 1 && IC == 1 && OC == G && OC == C;
}
int64_t N; // batch size
int64_t C; // channels
int64_t H; // input height
int64_t W; // input width
int64_t OC; // output channels
int64_t IC; // input channels
int64_t KH; // kernel height
int64_t KW; // kernel width
int64_t SY; // stride y (height)
int64_t SX; // stride x (width)
int64_t PY; // padding y (height)
int64_t PX; // padding x (width)
int64_t DY; // dilation y (height)
int64_t DX; // dilation x (width)
int64_t G; // groups
int64_t OW; // output width
int64_t OH; // output height
};
} // namespace metal
} // namespace native
} // namespace at
#endif /* MetalConvParams_h */
| 1,304
| 23.166667
| 59
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalDevice.h
|
#ifndef PYTORCH_MOBILE_METAL_DEVICE_H_
#define PYTORCH_MOBILE_METAL_DEVICE_H_
#import <Metal/Metal.h>
#include <string>
namespace at {
namespace native {
namespace metal {
struct MetalDeviceInfo {
std::string name;
MTLLanguageVersion languageVersion;
};
static inline MetalDeviceInfo createDeviceInfo(id<MTLDevice> device) {
MetalDeviceInfo device_info;
if (device.name != nil) {
device_info.name = device.name.UTF8String;
}
if (@available(macOS 11.0, iOS 14.0, *)) {
device_info.languageVersion = MTLLanguageVersion2_3;
} else if (@available(macOS 10.15, iOS 13.0, *)) {
device_info.languageVersion = MTLLanguageVersion2_2;
} else if (@available(macOS 10.14, iOS 12.0, *)) {
device_info.languageVersion = MTLLanguageVersion2_1;
} else if (@available(macOS 10.13, iOS 11.0, *)) {
device_info.languageVersion = MTLLanguageVersion2_0;
} else if (@available(macOS 10.12, iOS 10.0, *)) {
device_info.languageVersion = MTLLanguageVersion1_2;
} else if (@available(macOS 10.11, iOS 9.0, *)) {
device_info.languageVersion = MTLLanguageVersion1_1;
}
#if ( \
defined(__IPHONE_9_0) && \
__IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_9_0) || \
(defined(__MAC_10_11) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_11)
#else
#error "Metal is not available on the current platform."
#endif
return device_info;
}
}
}
}
#endif
| 1,462
| 28.26
| 76
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalNeuronType.h
|
#ifndef MetalNeuronType_h
#define MetalNeuronType_h
#import <ATen/native/metal/mpscnn/MPSCNNNeuronOp.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <ATen/ATen.h>
namespace at {
namespace native {
namespace metal {
enum class NeuronType {
None,
Clamp,
Relu,
Sigmoid,
HardSigmoid,
Tanh,
};
static inline NeuronType neuronType(
c10::optional<c10::Scalar> output_min,
c10::optional<c10::Scalar> output_max) {
float inf_max = std::numeric_limits<float>::infinity();
float inf_min = -std::numeric_limits<float>::infinity();
float output_max_ =
output_max.has_value() ? output_max.value().toFloat() : inf_max;
float output_min_ =
output_min.has_value() ? output_min.value().toFloat() : inf_min;
if (output_max_ == inf_max && output_min_ == 0) {
return NeuronType::Relu;
} else if (output_max_ < inf_max && output_min_ > inf_min) {
return NeuronType::Clamp;
} else {
return NeuronType::None;
}
}
static inline MPSCNNNeuron* neuron(NeuronType type) {
if (type == NeuronType::Relu) {
return [MPSCNNNeuronOp relu];
} else if (type == NeuronType::Sigmoid) {
return [MPSCNNNeuronOp sigmoid];
} else if (type == NeuronType::Tanh) {
return [MPSCNNNeuronOp tanh];
} else if (type == NeuronType::HardSigmoid) {
return [MPSCNNNeuronOp hardSigmoid];
} else {
return nil;
}
}
API_AVAILABLE(ios(11.3), macos(10.13), macCatalyst(13.0))
static inline MPSNNNeuronDescriptor* neuronDescriptor(NeuronType type) {
if (type == NeuronType::Relu) {
return [MPSCNNNeuronOpDescriptor reluDescriptor];
} else if (type == NeuronType::Sigmoid) {
return [MPSCNNNeuronOpDescriptor sigmoidDescriptor];
} else if (type == NeuronType::Tanh) {
return [MPSCNNNeuronOpDescriptor tanhDescriptor];
} else if (type == NeuronType::HardSigmoid) {
return [MPSCNNNeuronOpDescriptor hardSigmoidDescriptor];
} else {
return [MPSNNNeuronDescriptor cnnNeuronDescriptorWithType:MPSCNNNeuronTypeNone];
}
}
}
}
}
#endif /* MetalNeuronType_h */
| 2,045
| 26.648649
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalPrepackOpContext.h
|
#pragma once
#include <ATen/Tensor.h>
#include <torch/custom_class.h>
namespace at {
namespace native {
namespace metal {
using SerializationTypeConv2dPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
c10::optional<Scalar>,
c10::optional<Scalar>>;
class Conv2dOpContext : public torch::jit::CustomClassHolder {
public:
SerializationTypeConv2dPrePack pack() {
return std::make_tuple(
weight_,
bias_,
stride_,
padding_,
dilation_,
groups_,
output_min_,
output_max_);
}
Conv2dOpContext() = delete;
Conv2dOpContext(
at::Tensor&& weight,
c10::optional<at::Tensor>&& bias,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
std::vector<int64_t> dilation,
int64_t groups,
c10::optional<Scalar> output_min,
c10::optional<Scalar> output_max)
: weight_(std::move(weight)),
bias_(std::move(bias)),
stride_(std::move(stride)),
padding_(std::move(padding)),
dilation_(std::move(dilation)),
groups_(groups),
output_min_(std::move(output_min)),
output_max_(std::move(output_max)) {}
~Conv2dOpContext() override {
if (releaseCallback_) {
releaseCallback_(conv2dOp_);
}
}
void release_resources() override {
if (releaseCallback_) {
releaseCallback_(conv2dOp_);
}
}
const Tensor& get_weight() const {
return weight_;
}
const c10::optional<Tensor>& get_bias() const {
return bias_;
}
const std::vector<int64_t>& get_stride() const {
return stride_;
}
const std::vector<int64_t>& get_padding() const {
return padding_;
}
const std::vector<int64_t>& get_dilation() const {
return dilation_;
}
int64_t get_groups() const {
return groups_;
}
const c10::optional<Scalar>& get_output_min() const {
return output_min_;
}
const c10::optional<Scalar>& get_output_max() const {
return output_max_;
}
void set_conv2dOpPtr(void* ptr) {
conv2dOp_ = ptr;
}
void* get_conv2dOpPtr() const {
return conv2dOp_;
}
void set_releaseCallback(const std::function<void(void*)>& func) {
releaseCallback_ = func;
}
std::function<void(void*)>& get_releaseCallback() {
return releaseCallback_;
}
private:
Tensor weight_;
c10::optional<Tensor> bias_;
std::vector<int64_t> stride_;
std::vector<int64_t> padding_;
std::vector<int64_t> dilation_;
int64_t groups_;
c10::optional<Scalar> output_min_;
c10::optional<Scalar> output_max_;
std::function<void(void*)> releaseCallback_ = nullptr;
void* conv2dOp_ = nullptr; // reserved to hold MPSCNNConv2dOp objects
};
using SerializationTypeLinearPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
c10::optional<Scalar>,
c10::optional<Scalar>>;
class LinearOpContext : public torch::jit::CustomClassHolder {
public:
SerializationTypeLinearPrePack pack() {
return std::make_tuple(weight_, bias_, output_min_, output_max_);
}
LinearOpContext() = delete;
LinearOpContext(
at::Tensor&& weight,
c10::optional<at::Tensor>&& bias,
c10::optional<Scalar> output_min,
c10::optional<Scalar> output_max)
: weight_(std::move(weight)),
bias_(std::move(bias)),
output_min_(std::move(output_min)),
output_max_(std::move(output_max)) {}
~LinearOpContext() override {
if (releaseCallback_) {
releaseCallback_(opaqueOpPtr_);
}
}
void release_resources() override {
if (releaseCallback_) {
releaseCallback_(opaqueOpPtr_);
}
}
const Tensor& get_weight() const {
return weight_;
}
const c10::optional<Tensor>& get_bias() const {
return bias_;
}
const c10::optional<Scalar>& get_output_min() const {
return output_min_;
}
const c10::optional<Scalar>& get_output_max() const {
return output_max_;
}
void set_opaqueOpPtr(void* ptr) {
opaqueOpPtr_ = ptr;
}
void* get_opaqueOpPtr() const {
return opaqueOpPtr_;
}
void set_releaseCallback(const std::function<void(void*)>& func) {
releaseCallback_ = func;
}
std::function<void(void*)>& get_releaseCallback() {
return releaseCallback_;
}
private:
Tensor weight_;
c10::optional<Tensor> bias_;
c10::optional<Scalar> output_min_;
c10::optional<Scalar> output_max_;
void* opaqueOpPtr_ = nullptr; // reserved to hold MPSCNNFullyConnected objects
std::function<void(void*)> releaseCallback_ = nullptr;
};
} // namespace metal
} // namespace native
} // namespace at
| 4,687
| 22.093596
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalTensorImpl.h
|
#ifndef MetalTensorImpl_h
#define MetalTensorImpl_h
#include <ATen/OpaqueTensorImpl.h>
#include <ATen/WrapDimUtils.h>
#import <ATen/native/metal/MetalTensorImplStorage.h>
#import <ATen/native/metal/mpscnn/MPSImageWrapper.h>
namespace at {
template <typename OpaqueHandle>
struct TORCH_API MetalTensorImpl : public OpaqueTensorImpl<OpaqueHandle> {
MetalTensorImpl(
at::DispatchKeySet key_set,
const caffe2::TypeMeta& data_type,
c10::Device device,
OpaqueHandle opaque_handle,
c10::IntArrayRef sizes,
c10::IntArrayRef strides)
: OpaqueTensorImpl<OpaqueHandle>(
key_set,
data_type,
device,
opaque_handle,
sizes),
strides_(strides.vec()) {
}
// TODO: manually storing strides here is dumb
IntArrayRef strides_custom() const override {
return strides_;
}
c10::SymIntArrayRef sym_strides_custom() const override {
return c10::fromIntArrayRefKnownNonNegative(strides_);
}
bool is_contiguous_custom(c10::MemoryFormat memory_format) const override {
return true;
}
private:
const char* tensorimpl_type_name() const override {
return "MetalTensorImpl";
}
SmallVector<int64_t, 5> strides_;
};
} // namespace at
#endif /* MetalTensorImpl_h*/
| 1,290
| 23.826923
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalTensorImplStorage.h
|
#include <ATen/Tensor.h>
#include <c10/util/ArrayRef.h>
namespace at {
namespace native {
namespace metal {
class MPSImageWrapper;
class MetalTensorImplStorage final {
class Impl;
public:
MetalTensorImplStorage(){};
MetalTensorImplStorage(const std::vector<int64_t>& sizes);
MetalTensorImplStorage(
const std::vector<int64_t>& sizes,
const std::vector<int64_t>& strides);
~MetalTensorImplStorage() = default;
MetalTensorImplStorage(MetalTensorImplStorage&&) = default;
MetalTensorImplStorage& operator=(MetalTensorImplStorage&&) = default;
MetalTensorImplStorage(const MetalTensorImplStorage&) = default;
MetalTensorImplStorage& operator=(const MetalTensorImplStorage&) = default;
friend std::ostream& operator<<(
std::ostream& output,
const MetalTensorImplStorage& mt);
bool defined() const;
IntArrayRef sizes() const;
IntArrayRef strides() const;
int64_t dim() const;
int64_t numel() const;
void set_data_from_host(const float* inputData);
void copy_data_to_host(float* host);
MPSImageWrapper* texture() const;
private:
std::shared_ptr<Impl> impl();
std::shared_ptr<const Impl> impl() const;
std::shared_ptr<Impl> _impl;
};
} // namespace metal
} // namespace native
} // namespace at
| 1,264
| 25.354167
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/MetalTensorUtils.h
|
#include <ATen/Tensor.h>
#include <ATen/native/metal/MetalContext.h>
#include <ATen/native/metal/MetalCommandBuffer.h>
#include <ATen/native/metal/MetalTensorImpl.h>
#include <ATen/native/metal/MetalTensorImplStorage.h>
#if (defined(__ARM_NEON__) || defined(__ARM_NEON))
typedef float16_t fp16_t;
#else
typedef uint16_t fp16_t;
#endif
namespace at {
namespace native {
namespace metal {
uint32_t batchSize(const Tensor& tensor);
uint32_t channelsSize(const Tensor& tensor);
uint32_t heightSize(const Tensor& tensor);
uint32_t widthSize(const Tensor& tensor);
// When copying the result back to a CPU tensor, the memory format becomes NCHW.
// Thus,we compute the strides based on contiguous memory format.
static inline std::vector<int64_t> computeStrides(
const std::vector<int64_t>& sizes) {
const auto dim = sizes.size();
std::vector<int64_t> strides(dim, 0);
if (dim > 0) {
const auto last_idx = dim - 1;
strides[last_idx] = 1;
for (int64_t i = last_idx - 1; i >= 0; --i) {
strides[i] = strides[i + 1] * std::max<int64_t>(sizes[i + 1], 1);
}
}
return strides;
}
static inline MetalTensorImplStorage& getTensorImplStorage(
const at::Tensor& tensor) {
using MetalTensorImpl = at::MetalTensorImpl<MetalTensorImplStorage>;
TORCH_CHECK(tensor.is_metal());
MetalTensorImpl* impl =
static_cast<MetalTensorImpl*>(tensor.unsafeGetTensorImpl());
return impl->unsafe_opaque_handle();
}
static inline at::Tensor makeTensor(
MetalTensorImplStorage&& mt,
const TensorOptions& options) {
using MetalTensorImpl = at::MetalTensorImpl<MetalTensorImplStorage>;
auto sizes = mt.sizes(); // sizes is stored in TensorImpl
auto strides = mt.strides(); // strides is stored in MetalTensorImpl
return detail::make_tensor<MetalTensorImpl>(
DispatchKeySet(DispatchKey::Metal),
options.dtype(),
at::Device(at::kMetal),
std::move(mt),
std::vector<int64_t>(sizes.begin(), sizes.end()),
std::vector<int64_t>(strides.begin(), strides.end()));
}
static inline MetalCommandBuffer* getCommandBuffer(
const Tensor& tensor) {
TORCH_CHECK(tensor.is_metal());
auto implStorage = getTensorImplStorage(tensor);
MetalCommandBuffer* cmdBuffer = implStorage.texture()->commandBuffer();
if (!cmdBuffer || !cmdBuffer.valid) {
cmdBuffer = [MetalCommandBuffer currentBuffer];
}
return cmdBuffer;
}
} // namespace metal
} // namespace native
} // namespace at
| 2,449
| 31.236842
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSCNNConvOp.h
|
#import <ATen/native/metal/MetalConvParams.h>
#import <ATen/native/metal/MetalNeuronType.h>
#import <ATen/native/metal/mpscnn/MPSCNNOp.h>
#import <Foundation/Foundation.h>
API_AVAILABLE(ios(11.0), macos(10.13))
@interface MPSCNNConvDataSource : NSObject<MPSCNNConvolutionDataSource>
@property(nonatomic, assign) void* weights;
@property(nonatomic, assign) float* bias;
- (id)initWithWeights:(void*)weights
Bias:(float*)bias
Desc:(MPSCNNConvolutionDescriptor*)desc;
@end
using namespace at::native::metal;
API_AVAILABLE(ios(11.0), macos(10.13))
@interface MPSCNNConvOp : NSObject<MPSCNNOp>
+ (MPSCNNConvOp*)conv2d:(const Conv2DParams&)params
weights:(float*)w
bias:(float*)b
neuronFilter:(NeuronType)t;
@end
| 791
| 30.68
| 71
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSCNNFullyConnectedOp.h
|
#import <ATen/native/metal/MetalConvParams.h>
#import <ATen/native/metal/MetalNeuronType.h>
#import <ATen/native/metal/mpscnn/MPSCNNConvOp.h>
#import <Foundation/Foundation.h>
using namespace at::native::metal;
API_AVAILABLE(ios(11.0), macos(10.13))
@interface MPSCNNFullyConnectedOp : NSObject<MPSCNNOp>
+ (MPSCNNFullyConnectedOp*)linear:(const Conv2DParams&)params
weights:(float*)w
bias:(float*)b
neuronFilter:(NeuronType)t;
@end
| 510
| 35.5
| 61
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.h
|
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
@interface MPSCNNNeuronOp : NSObject
+ (MPSCNNNeuronHardSigmoid*)hardSigmoid API_AVAILABLE(ios(11.0), macos(10.13));
+ (MPSCNNNeuronReLU*)relu;
+ (MPSCNNNeuronSigmoid*)sigmoid;
+ (MPSCNNNeuronTanH*)tanh;
@end
API_AVAILABLE(ios(11.3), macos(10.13), macCatalyst(13.0))
@interface MPSCNNNeuronOpDescriptor : NSObject
+ (MPSNNNeuronDescriptor*)hardSigmoidDescriptor;
+ (MPSNNNeuronDescriptor*)reluDescriptor;
+ (MPSNNNeuronDescriptor*)sigmoidDescriptor;
+ (MPSNNNeuronDescriptor*)tanhDescriptor;
@end
| 563
| 25.857143
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSCNNOp.h
|
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
@protocol MPSCNNOp<NSObject>
@property(nonatomic, strong) MPSCNNKernel* kernel;
- (void)encode:(id<MTLCommandBuffer>)cb
sourceImage:(MPSImage*)src
destinationImage:(MPSImage*)dst;
@end
@protocol MPSCNNShaderOp<NSObject>
+ (id<MPSCNNShaderOp>)newWithTextures:(NSArray<MPSImage*>*)textures
Args:(NSArray<NSNumber*>*)args;
- (void)encode:(id<MTLCommandBuffer>)cb;
@end
| 503
| 23
| 67
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSCNNUtils.h
|
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <string>
// This is a utility macro that can be used to throw an exception when a Metal
// API function produces a NSError. The exception will contain a message with
// useful info extracted from the NSError.
#define METAL_THROW_IF_ERROR(error, preamble) \
do { \
if C10_LIKELY(error) { \
throw c10::Error( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str( \
preamble, \
" Error details: ", \
" Localized_description: ", error.localizedDescription.UTF8String, \
" Domain: ", error.domain.UTF8String, \
" Code: ", error.code, \
" User Info: ", error.userInfo.description.UTF8String)); \
} \
} while (false)
namespace at {
namespace native {
namespace metal {
namespace mpscnn {
struct LaunchParams {
MTLSize threadsPerThreadgroup;
MTLSize threadgroupsPerGrid;
MTLSize threadsPerGrid; // iOS 11.0
};
API_AVAILABLE(ios(11.0), macos(10.13))
LaunchParams spatialPointwiseKernelLaunchParams(
id<MTLComputePipelineState> pipeline,
MPSImage* im);
API_AVAILABLE(ios(11.0), macos(10.13))
LaunchParams spatialPointwiseKernelLaunchParams(
id<MTLComputePipelineState> pipeline,
NSUInteger numberOfImages,
NSUInteger featureChannels,
NSUInteger height,
NSUInteger width);
API_AVAILABLE(ios(11.0), macos(10.13))
static inline std::string kernelFor(
MPSImage* image,
const std::string& arrayKernel,
const std::string& nonArrayKernel) {
if (image.featureChannels > 4 || image.numberOfImages > 1) {
return arrayKernel;
}
return nonArrayKernel;
}
static inline int computeMPSAlignOffset(int kernel, int pad) {
// To set the offset, we can just match the top-left pixel (in the input
// image, with negative values for padding) that we look at. For 3x3s1p1, we
// look at the (-1, -1) pixel in the original impl. For 3x3s1p0, we look at
// (0, 0) pixel. For 3x3s1p2, look at (-2, -2) MPSCNN always looks at
// (-floor(kernel_size - 1 / 2), -floor(kernel_size - 1 / 2)) Thus, we just
// need to match this up.
// For 3x3s1p1, offset should be (0, 0)
// For 3x3s1p0, offset should be (1, 1)
// For 3x3s1p2, offset should be (-1, -1)
const int mps_offset = kernel / 2;
const int pt_offset = pad;
return mps_offset - pt_offset;
}
}
} // namespace metal
} // namespace native
} // namespace at
| 3,051
| 38.128205
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSImageUtils.h
|
#import <ATen/Tensor.h>
#import <ATen/native/metal/MetalCommandBuffer.h>
#import <ATen/native/metal/MetalTensorImpl.h>
#import <ATen/native/metal/MetalTensorUtils.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
namespace at {
namespace native {
namespace metal {
MPSImage* createStaticImage(IntArrayRef sizes);
MPSImage* createStaticImage(const float* src, const IntArrayRef sizes);
MPSImage* createStaticImage(
MPSTemporaryImage* image,
MetalCommandBuffer* buffer,
bool waitUntilCompleted);
MPSTemporaryImage* createTemporaryImage(
MetalCommandBuffer* buffer,
const IntArrayRef sizes);
MPSTemporaryImage* createTemporaryImage(
MetalCommandBuffer* buffer,
const IntArrayRef sizes,
const float* src);
MPSTemporaryImage* createTemporaryImage(
MetalCommandBuffer* buffer,
MPSImage* image);
void copyImageToFloatBuffer(float* dst, MPSImage* image);
void copyImageToMetalBuffer(
MetalCommandBuffer* buffer,
id<MTLBuffer> dst,
MPSImage* image);
static inline MPSImage* imageFromTensor(const Tensor& tensor) {
TORCH_CHECK(tensor.is_metal());
using MetalTensorImplStorage = at::native::metal::MetalTensorImplStorage;
using MetalTensorImpl = at::MetalTensorImpl<MetalTensorImplStorage>;
MetalTensorImpl* impl = (MetalTensorImpl*)tensor.unsafeGetTensorImpl();
MetalTensorImplStorage& implStorage = impl->unsafe_opaque_handle();
return implStorage.texture()->image();
}
/*
MPSImage carries a IntList shape which is identical to the shape of the CPU
tensor it’s converted from.
1) 1D tensors (W,) are always stored as MPSImage(N=1, C=1, H=1, W=W).
2) 2D tensors (H, W) are always stored as MPSImage(N=1, C=1, H=H, W=W).
3) 3D tensors (C, H, W) are always stored as MPSImage(N=1, C=C, H=H, W=W).
4) 4D tensors (N, C, H, W) are always stored as MPSImage(N=N, C=C, H=H, W=W).
5) 5D tensors (T, N, C, H, W) are always stored as MPSImage(N=T*N, C=C, H=H,
W=W). 6) ...
*/
static inline std::vector<int64_t> computeImageSize(IntArrayRef sizes) {
std::vector<int64_t> imageSize(4, 1);
int64_t index = 3;
int64_t batch = 1;
for (int64_t i = sizes.size() - 1; i >= 0; i--) {
if (index != 0) {
imageSize[index] = sizes[i];
index--;
continue;
}
// For higher dimensional tensors,
// multiply rest of dims into imageSize[0]
batch *= sizes[i];
}
imageSize[0] = batch;
return imageSize;
}
} // namespace metal
} // namespace native
} // namespace at
| 2,464
| 31.012987
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/MPSImageWrapper.h
|
#ifndef MPSImageWrapper_h
#define MPSImageWrapper_h
#import <ATen/native/metal/MetalCommandBuffer.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <c10/util/ArrayRef.h>
namespace at {
namespace native {
namespace metal {
class API_AVAILABLE(ios(11.0), macos(10.13)) MPSImageWrapper {
public:
MPSImageWrapper(IntArrayRef sizes);
~MPSImageWrapper();
void copyDataFromHost(const float* inputData);
void copyDataToHost(float* hostData);
void allocateStorage(IntArrayRef sizes);
void allocateTemporaryStorage(
IntArrayRef sizes,
MetalCommandBuffer* commandBuffer);
void setCommandBuffer(MetalCommandBuffer* buffer);
MetalCommandBuffer* commandBuffer() const;
void setImage(MPSImage* image);
MPSImage* image() const;
id<MTLBuffer> buffer() const;
void synchronize();
void prepare();
void release();
private:
std::vector<int64_t> _imageSizes;
MPSImage* _image = nil;
id<MTLBuffer> _buffer = nil;
MetalCommandBuffer* _commandBuffer = nil;
id<PTMetalCommandBuffer> _delegate = nil;
};
} // namespace metal
} // namespace native
} // namespace at
#endif /* MPSImageWrapper_h */
| 1,149
| 25.136364
| 62
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/mpscnn/tests/MPSCNNTests.h
|
#ifndef MPSCNNTests_h
#define MPSCNNTests_h
bool test_synchronization();
bool test_copy_nchw_to_metal();
bool test_conv2d();
bool test_depthwiseConv();
bool test_max_pool2d();
bool test_max_pool2d_ceil();
bool test_max_pool2d_padding();
bool test_relu();
bool test_addmm();
bool test_add();
bool test_add_broadcast();
bool test_add_broadcast2();
bool test_sub();
bool test_sub_broadcast();
bool test_sub_broadcast2();
bool test_mul();
bool test_mul_broadcast();
bool test_mul_broadcast2();
bool test_div();
bool test_div_broadcast();
bool test_div_broadcast2();
bool test_t();
bool test_transpose();
bool test_transpose2();
bool test_transpose3();
bool test_view();
bool test_view2();
bool test_view3();
bool test_view4();
bool test_cat_dim0();
bool test_cat_dim0_nonarray();
bool test_cat_dim1_0();
bool test_cat_dim1_1();
bool test_cat_dim1_nonarray_0();
bool test_cat_dim1_nonarray_1();
bool test_log_softmax();
bool test_softmax();
bool test_sigmoid();
bool test_hardsigmoid();
bool test_hardswish_();
bool test_hardswish();
bool test_hardshrink_();
bool test_hardshrink();
bool test_leaky_relu_();
bool test_leaky_relu();
bool test_upsampling_nearest2d_vec();
bool test_upsampling_nearest2d_vec2();
bool test_adaptive_avg_pool2d();
bool test_hardtanh_();
bool test_hardtanh();
bool test_reshape();
bool test_mean_dim();
bool test_mean_dim2();
bool test_mean_dim3();
bool test_chunk();
bool test_chunk2();
bool test_chunk3();
bool test_reflection_pad2d();
#endif
| 1,469
| 21.96875
| 38
|
h
|
null |
pytorch-main/aten/src/ATen/native/metal/ops/MetalConvolution.h
|
#import <ATen/native/metal/MetalConvParams.h>
#import <ATen/native/metal/MetalPrepackOpContext.h>
#include <c10/util/ArrayRef.h>
namespace at {
namespace native {
namespace metal {
Tensor conv2d(
const Tensor& input,
const Tensor& weight,
const c10::optional<at::Tensor>& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups);
namespace prepack {
Tensor conv2d(const Tensor& input, Conv2dOpContext& context);
}
} // namespace metal
} // namespace native
} // namespace at
| 535
| 20.44
| 61
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkl/LinearAlgebra.h
|
#pragma once
#include <ATen/native/TransposeType.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void mkl_gemm_batched(
TransposeType trans_A, TransposeType trans_B,
int batch_size, int M, int N, int K, float alpha,
const float** A, int lda, const float** B, int ldb, float beta,
float** C, int ldc);
void mkl_gemm_batched(
TransposeType trans_A, TransposeType trans_B,
int batch_size, int M, int N, int K, double alpha,
const double** A, int lda, const double** B, int ldb, double beta,
double** C, int ldc);
void mkl_gemm_batched(
TransposeType trans_A, TransposeType trans_B,
int batch_size, int M, int N, int K, c10::complex<float> alpha,
const c10::complex<float>** A, int lda, const c10::complex<float>** B, int ldb,
c10::complex<float> beta, c10::complex<float>** C, int ldc);
void mkl_gemm_batched(
TransposeType trans_A, TransposeType trans_B,
int batch_size, int M, int N, int K, c10::complex<double> alpha,
const c10::complex<double>** A, int lda, const c10::complex<double>** B, int ldb,
c10::complex<double> beta, c10::complex<double>** C, int ldc);
}} // namespace at::native
| 1,180
| 34.787879
| 85
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/Common.h
|
#pragma once
#include <ATen/ATen.h>
#include <ATen/Config.h>
#if AT_MKLDNN_ENABLED()
#include <ideep/tensor.hpp>
namespace at {
namespace native {
namespace mkldnn {
struct ContextConv final {
ideep::tensor weight_packed_;
c10::optional<at::Tensor> at_bias_;
std::vector<int64_t> padding_;
std::vector<int64_t> stride_;
std::vector<int64_t> dilation_;
int64_t groups_;
ideep::attr_t attr_;
ContextConv() = delete;
ContextConv(
ideep::tensor&& weight_packed,
c10::optional<at::Tensor> at_bias,
std::vector<int64_t> padding,
std::vector<int64_t> stride,
std::vector<int64_t> dilation,
int64_t groups,
ideep::attr_t attr)
: weight_packed_(std::move(weight_packed)),
at_bias_(std::move(at_bias)),
padding_(padding),
stride_(stride),
dilation_(dilation),
groups_(groups),
attr_(attr) {}
};
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()
| 1,000
| 20.297872
| 49
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/ConvPrepack.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/native/mkldnn/Common.h>
#include <ATen/native/mkldnn/OpContext.h>
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkldnn {
namespace internal {
namespace convolution {
c10::intrusive_ptr<mkldnn::ConvOpContext> createConvPrePackOpContext(
Tensor weight,
c10::optional<Tensor> bias,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
std::vector<int64_t> dilation,
int64_t groups,
std::vector<int64_t> input_size,
std::string attr);
Tensor conv_run(
const Tensor& input,
const c10::intrusive_ptr<mkldnn::ConvOpContext>& op_context);
ContextConv create(
const Tensor& weight,
const c10::optional<Tensor>& bias,
const IntArrayRef padding,
const IntArrayRef stride,
const IntArrayRef dilation,
const int64_t groups,
const IntArrayRef input_size,
const ideep::attr_t& attr);
Tensor run(ContextConv& context, const Tensor& input);
void run(ContextConv& context, const Tensor& input, void* output);
} // namespace convolution
} // namespace internal
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()
| 1,197
| 22.96
| 69
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/MKLDNNCommon.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#if AT_MKLDNN_ENABLED()
#include <ideep.hpp>
namespace at { namespace native {
// Mapping ScalarType to ideep tensor data_type
TORCH_API ideep::tensor::data_type get_mkldnn_dtype(ScalarType type);
static inline ideep::tensor::data_type get_mkldnn_dtype(const Tensor& t) {
return get_mkldnn_dtype(t.scalar_type());
}
// Construct aten MKL-DNN tensor given an ideep tensor
TORCH_API Tensor new_with_itensor_mkldnn(ideep::tensor&& it, c10::optional<ScalarType> dtype, c10::optional<Device> device);
// Retrieve `ideep::tensor` from MKL-DNN tensor
TORCH_API ideep::tensor& itensor_from_mkldnn(const Tensor& mkldnn_tensor);
// Construct an `ideep::tensor` "view" from dense tensor, note the
// ideep::tensor will share the underlying buffer
TORCH_API ideep::tensor itensor_view_from_dense(const Tensor& tensor);
// Helper function for getting an ideep tensor out of an aten Tensor or MKL-DNN tensor.
TORCH_API ideep::tensor itensor_from_tensor(const Tensor& tensor);
// Set MKLDNN verbose level
TORCH_API int set_verbose(int level);
}}
#endif // AT_MKLDNN_ENABLED
| 1,140
| 30.694444
| 124
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/Matmul.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <ATen/native/LinearAlgebraUtils.h> // For TransposeType
namespace at { namespace native {
// result = beta * result + alpha * gemm(mat1, mat2)
TORCH_API void mkldnn_matmul(
const Tensor &mat1,
const Tensor &mat2,
const Tensor &result,
float beta=1,
float alpha=1);
bool use_mkldnn_bf16_matmul(
const Tensor& mat1,
const Tensor& mat2,
const Tensor& result_opt);
// Try running mkldnn optimized gemm, or returns false if naive gemm would be faster
bool mkldnn_bf16_gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
float alpha,
const c10::BFloat16 *a, int64_t lda,
const c10::BFloat16 *b, int64_t ldb,
float beta,
c10::BFloat16 *c, int64_t ldc);
}
}
| 846
| 23.2
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/OpContext.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/core/ivalue.h>
#include <ATen/native/mkldnn/Common.h>
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkldnn {
const static std::map<std::string, ideep::attr_t> fusion_attr_map = {
{"none", ideep::attr_t()},
{"relu", ideep::attr_t::fuse_relu()},
};
using SerializationTypeConvPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
std::vector<int64_t>,
std::string>;
class ConvOpContext : public torch::jit::CustomClassHolder {
protected:
Tensor orig_weight_;
c10::optional<Tensor> orig_bias_;
std::vector<int64_t> stride_;
std::vector<int64_t> padding_;
std::vector<int64_t> dilation_;
int64_t groups_;
std::vector<int64_t> input_size_;
std::string attr_;
public:
SerializationTypeConvPrePack unpack() {
return std::make_tuple(
orig_weight_,
orig_bias_,
stride_,
padding_,
dilation_,
groups_,
input_size_,
attr_);
}
virtual Tensor run(const Tensor& input) = 0;
virtual void run(const Tensor& input, void* output) = 0;
};
class MkldnnConvOpContext final : public ConvOpContext {
private:
ContextConv op_context_;
public:
MkldnnConvOpContext(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
uint64_t groups,
std::vector<int64_t>&& input_size,
ContextConv&& op_context)
: op_context_(std::move(op_context)) {
orig_weight_ = std::move(weight);
orig_bias_ = std::move(bias);
padding_ = std::move(padding);
stride_ = std::move(stride);
dilation_ = std::move(dilation);
groups_ = groups;
input_size_ = std::move(input_size);
}
Tensor run(const Tensor& input) override;
void run(const Tensor& input, void* output) override;
static c10::intrusive_ptr<ConvOpContext> create_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
int64_t groups,
std::vector<int64_t>&& input_size,
const ideep::attr_t& attr);
};
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()
| 2,425
| 23.26
| 69
|
h
|
null |
pytorch-main/aten/src/ATen/native/mkldnn/Utils.h
|
#pragma once
#include <ATen/Config.h>
#include <ATen/core/List.h>
#include <ATen/core/Tensor.h>
#include <c10/util/ArrayRef.h>
#include <cpuinfo.h>
#include <vector>
#if AT_MKLDNN_ENABLED()
#include <ideep/tensor.hpp>
#endif // AT_MKLDNN_ENABLED()
namespace at { namespace native {
std::tuple<Tensor, Tensor, Tensor> mkldnn_layer_norm_last_index_weight_bias_f32(
const Tensor& input,
IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias,
double eps, bool inplace = false);
std::vector<int64_t> pool_output_sizes(
IntArrayRef input_size,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding_l,
IntArrayRef padding_r,
IntArrayRef dilation,
bool ceil_mode);
void check_mkldnn_binary_fusion_inputs(
const Tensor& input,
const Tensor& other,
const Tensor& weight,
const Tensor& bias);
static inline std::vector<int64_t> padding_r(
IntArrayRef padding, IntArrayRef output_padding)
{
// ConvTranpose padding adjustment
//
// PyTorch uses padding/output_padding:
// osize = (isize - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + output_padding + 1
//
// MKLDNN uses padding_l/padding_r:
// osize = (isize - 1) * stride - padding_l - padding_r + dilation * (kernel_size - 1) + 1
//
// So: padding_l = padding, padding_r = padding - output_padding
//
auto dim = padding.size();
std::vector<int64_t> pad_r(dim);
for (const auto d : c10::irange(dim)) {
pad_r[d] = padding[d] - output_padding[d];
}
return pad_r;
}
#if AT_MKLDNN_ENABLED()
using AttrFunction = std::function<ideep::attr_t(
torch::List<c10::optional<at::Scalar>>,
c10::optional<c10::string_view>)>;
const std::map<c10::string_view, AttrFunction>& fusion_unary_attr_map();
const std::map<c10::string_view, ideep::algorithm>& fusion_unary_alg_map();
const std::map<c10::string_view, ideep::algorithm>& fusion_binary_alg_map();
#endif // AT_MKLDNN_ENABLED()
};
inline bool mkldnn_bf16_device_check() {
return cpuinfo_initialize() && ((cpuinfo_has_x86_avx512bw()
&& cpuinfo_has_x86_avx512vl() && cpuinfo_has_x86_avx512dq()) || (cpuinfo_has_arm_bf16()));
}
#if defined(__aarch64__)
inline bool mkldnn_bf16_device_check_arm() {
return (cpuinfo_initialize() && cpuinfo_has_arm_bf16());
}
#else
constexpr bool mkldnn_bf16_device_check_arm() {
return false;
}
#endif
}
| 2,388
| 26.147727
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/mps/MPSGraphVenturaOps.h
|
#pragma once
#include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
// TODO: Remove me when moved to MacOS 13
@interface MPSGraph (VenturaOps)
#if !defined(__MAC_13_0) && \
(!defined(MAC_OS_X_VERSION_13_0) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_13_0))
typedef NS_ENUM(NSUInteger, MPSGraphResizeNearestRoundingMode)
{
MPSGraphResizeNearestRoundingModeRoundPreferCeil = 0L,
MPSGraphResizeNearestRoundingModeRoundPreferFloor = 1L,
MPSGraphResizeNearestRoundingModeCeil = 2L,
MPSGraphResizeNearestRoundingModeFloor = 3L,
MPSGraphResizeNearestRoundingModeRoundToEven = 4L,
MPSGraphResizeNearestRoundingModeRoundToOdd = 5L,
};
#endif
- (MPSGraphTensor * _Nonnull)cumulativeSumWithTensor:(MPSGraphTensor * _Nonnull)tensor
axis:(NSInteger)axis
name:(NSString * _Nullable)name;
- (MPSGraphTensor * _Nonnull)sortWithTensor:(MPSGraphTensor * _Nonnull)tensor
axis:(NSInteger)axis
name:(NSString * _Nullable)name;
- (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axis:(NSInteger) axis
descending:(BOOL) descending
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
descending:(BOOL) descending
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull)argSortWithTensor:(MPSGraphTensor * _Nonnull)tensor
axis:(NSInteger)axis
name:(NSString * _Nullable)name;
- (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axis:(NSInteger) axis
descending:(BOOL) descending
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
descending:(BOOL) descending
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull)inverseOfTensor:(MPSGraphTensor * _Nonnull) inputTensor
name:(NSString * _Nullable)name;
- (MPSGraphTensor * _Nonnull) resizeNearestWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
sizeTensor:(MPSGraphTensor * _Nonnull) size
nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
centerResult:(BOOL) centerResult
alignCorners:(BOOL) alignCorners
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeNearestWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
sizeTensor:(MPSGraphTensor * _Nonnull) size
scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeBilinearWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
sizeTensor:(MPSGraphTensor * _Nonnull) size
centerResult:(BOOL) centerResult
alignCorners:(BOOL) alignCorners
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeBilinearWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
sizeTensor:(MPSGraphTensor * _Nonnull) size
scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeNearestWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
input:(MPSGraphTensor * _Nonnull) input
nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
centerResult:(BOOL) centerResult
alignCorners:(BOOL) alignCorners
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeNearestWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
input:(MPSGraphTensor * _Nonnull) input
scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeBilinearWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
input:(MPSGraphTensor * _Nonnull) input
centerResult:(BOOL) centerResult
alignCorners:(BOOL) alignCorners
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) resizeBilinearWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
input:(MPSGraphTensor * _Nonnull) input
scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
layout:(MPSGraphTensorNamedDataLayout) layout
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) sampleGridWithSourceTensor:(MPSGraphTensor * _Nonnull) source
coordinateTensor:(MPSGraphTensor * _Nonnull) coordinates
layout:(MPSGraphTensorNamedDataLayout) layout
normalizeCoordinates:(BOOL) normalizeCoordinates
relativeCoordinates:(BOOL) relativeCoordinates
alignCorners:(BOOL) alignCorners
paddingMode:(MPSGraphPaddingMode) paddingMode
samplingMode:(MPSGraphResizeMode) samplingMode
constantValue:(double) constantValue
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) sampleGridWithSourceTensor:(MPSGraphTensor * _Nonnull) source
coordinateTensor:(MPSGraphTensor * _Nonnull) coordinates
layout:(MPSGraphTensorNamedDataLayout) layout
normalizeCoordinates:(BOOL) normalizeCoordinates
relativeCoordinates:(BOOL) relativeCoordinates
alignCorners:(BOOL) alignCorners
paddingMode:(MPSGraphPaddingMode) paddingMode
nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
constantValue:(double) constantValue
name:(NSString * _Nullable) name;
- (MPSGraphTensor * _Nonnull) truncateWithTensor:(MPSGraphTensor * _Nonnull) tensor
name:(NSString * _Nullable) name;
@end
| 9,522
| 64.675862
| 117
|
h
|
null |
pytorch-main/aten/src/ATen/native/mps/OperationUtils.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Tensor.h>
#include <ATen/Utils.h>
#include <ATen/mps/MPSStream.h>
#include <ATen/native/mps/TensorFactory.h>
#include <c10/util/Optional.h>
#include <c10/core/ScalarType.h>
#include <torch/library.h>
#include <unordered_map>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/zeros_like.h>
#endif
#ifdef __OBJC__
#include <MetalPerformanceShaders/MetalPerformanceShaders.h>
#endif
using namespace at::mps;
namespace at {
namespace native {
namespace mps {
struct MPSScalar {
id<MTLBuffer> getMTLBuffer() const { return __builtin_bit_cast(id<MTLBuffer>, buffer.get()); }
size_t size = 0;
ScalarType type = ScalarType::Undefined;
c10::DataPtr buffer; // stores MTLBuffer (frees buffer if MPSScalar instance goes out of scope)
union {
float f; // MPS doesn't support 'double'
at::Half h;
int64_t i;
bool b;
} value {};
};
void runMPSGraph(
MPSStream* mpsStream,
MPSGraph* mpsGraph,
NSDictionary* feeds,
NSDictionary* results);
MPSDataType getMPSDataType(ScalarType scalar_type);
static inline MPSDataType getMPSDataType(const Tensor& t) {
return getMPSDataType(t.scalar_type());
}
MPSDataType getMPSScalarType(ScalarType scalar_type);
static inline MPSDataType getMPSScalarType(const Tensor& t) {
return getMPSScalarType(t.scalar_type());
}
MPSScalar getMPSScalar(const Scalar& scalar, ScalarType type);
std::string getMPSTypeString(ScalarType scalar_type, bool short_name = false);
static inline std::string getMPSTypeString(const Tensor& t, bool short_name = false) {
return getMPSTypeString(t.scalar_type(), short_name);
}
std::string scalarToMetalTypeString(const c10::ScalarType& scalar_type);
NSArray<NSNumber*>* getTensorAxes(const Tensor& t);
NSArray<NSNumber*>* getTensorAxes(const IntArrayRef& sizes, at::OptionalIntArrayRef dim);
std::string getMPSShapeString(MPSShape* shape);
std::string getTensorsStringKey(const TensorList& tensors, bool short_dtype = true);
std::string getArrayRefString(const IntArrayRef s);
// use has_storage() on the returned tensor to determine if src actually is a view
Tensor gatherViewTensor(const at::Tensor& src, at::Tensor& dst);
Tensor& scatterViewTensor(const at::Tensor& src, at::Tensor& output);
bool canSliceViewTensor(const Tensor& src, MPSShape *mpsShape);
MPSGraphTensorData* getMPSGraphTensorDataForView(const Tensor& src, MPSShape *mpsShape, const MPSDataType mpsDataType);
MPSGraphTensor* castToIHFTypes(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor, const Tensor& input, bool includesInt64 = false);
MPSGraphTensor* castFromIHFTypes(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor, const Tensor& input, bool includesInt64 = false);
// The MPSShape could vary based on memory format
MPSShape* getMPSShape(const Tensor& t, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
MPSShape* getMPSShape(IntArrayRef sizes, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
static inline id<MTLBuffer> getMTLBufferStorage(const at::Tensor& tensor) {
return __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
}
class Placeholder {
public:
Placeholder() : _placeholder(nullptr), _value(nullptr), _tensor(Tensor()) {}
Placeholder(MPSGraphTensor* mpsGraphTensor) : _placeholder(mpsGraphTensor), _value(nullptr), _tensor(Tensor()) {}
Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& self, MPSShape *mpsShape = nullptr,
bool gatherTensorData = true, MPSDataType dataType = MPSDataTypeInvalid);
MPSGraphTensor* getMPSGraphTensor() {
return _placeholder;
}
MPSGraphTensorData* getMPSGraphTensorData() {
return _value;
}
bool isIntermediate() {
return _value == nullptr;
}
private:
MPSGraphTensor* _placeholder;
MPSGraphTensorData* _value;
Tensor _tensor;
};
void resize_tensor(Tensor* output);
Tensor wrapped_scalar_tensor_mps(const Scalar& scalar, const Device device);
MPSGraphTensor* trunc_tensor(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor);
MPSGraphTensor* convertNHWCtoNCHW(MPSGraph *mpsGraph, MPSGraphTensor* tensor);
MPSGraphTensor* castMPSTensor(MPSGraph *mpsGraph, MPSGraphTensor* tensor, ScalarType toType);
MPSGraphTensor* castMPSTensor(MPSGraph *mpsGraph, MPSGraphTensor* tensor, MPSDataType toType);
MPSGraphTensorData *getMPSGraphTensorData(MPSGraph* mpsGraph, MPSStream* mpsStream, const Tensor& tensor);
MPSGraphTensorData* getMPSGraphTensorFromScalar(MPSStream* mpsStream, MPSScalar& scalar);
MPSGraph* make_mps_graph();
void printTensorNDArray(const Tensor& t);
MPSNDArray* ndArrayFromTensor(const Tensor& tensor, MPSShape *shape, MPSDataType mpsType);
MPSGraphTensor* mpsGraphUnrankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType);
MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType, MPSShape* mpsShape);
MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, const Tensor& tensor);
MPSGraphTensor* mpsGraphScalarPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType);
MPSGraphTensor* mpsGraphScalarPlaceHolder(MPSGraph *mpsGraph, const Scalar& scalar);
string get_mem_format_string(c10::MemoryFormat memory_format);
using MPSCacheKey = uint64_t;
// derive this class to cache a graph and its inputs/outputs
// can be used to store any NSObject
struct MPSCachedGraph
{
MPSCachedGraph(NSObject *object) : _object([object retain]) {}
virtual ~MPSCachedGraph() {
[_object release];
_object = nullptr;
}
template<typename T>
inline T* as() {
return static_cast<T*>(this);
}
MPSGraph *graph() const { return (MPSGraph *)_object; }
NSObject *object() const { return _object; }
private:
NSObject *_object = nullptr;
};
struct MPSUnaryCachedGraph : public MPSCachedGraph
{
MPSUnaryCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
MPSGraphTensor *inputTensor_ = nil;
MPSGraphTensor *outputTensor_ = nil;
};
struct MPSUnaryGradCachedGraph : public MPSCachedGraph
{
MPSUnaryGradCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
MPSGraphTensor *gradOutputTensor_ = nil;
MPSGraphTensor *inputTensor_ = nil;
MPSGraphTensor *outputTensor_ = nil; // some backward input is actually the forward's output
MPSGraphTensor *gradInputTensor_ = nil;
};
struct MPSBinaryCachedGraph : public MPSCachedGraph
{
MPSBinaryCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
MPSGraphTensor *inputTensor_ = nil;
MPSGraphTensor *otherTensor_ = nil;
MPSGraphTensor *outputTensor_ = nil;
};
struct MPSBinaryGradCachedGraph : public MPSCachedGraph
{
MPSBinaryGradCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
MPSGraphTensor *gradOutputTensor_ = nil;
MPSGraphTensor *inputTensor_ = nil;
MPSGraphTensor *otherTensor_ = nil;
MPSGraphTensor *gradInputTensor_ = nil;
};
// TODO: Improve the overall design of MPSGraphCache.
// https://github.com/pytorch/pytorch/issues/77176
// Cache holding various keys mapped to graphs
struct MPSGraphCache
{
typedef MPSCachedGraph * (^CreateCachedGraphBlock)();
struct CacheEntry {
CacheEntry(const std::string& key, MPSCachedGraph *cachedGraph) : cachedGraph_(cachedGraph), key_(key) {}
MPSCachedGraph* cachedGraph_ = nullptr;
std::string key_;
};
public:
static MPSGraphCache* getInstance() {
if(_instance_cache == nullptr) {
_instance_cache = new MPSGraphCache();
}
return _instance_cache;
}
~MPSGraphCache() {
dispatch_release(serialQueue_);
for (const auto& i : cache_) {
delete i.second.cachedGraph_;
}
}
// Disallow the copy constructor and operator= functions
MPSGraphCache(const MPSGraphCache&) = delete;
void operator=(const MPSGraphCache&) = delete;
MPSCachedGraph* CreateCachedGraph(const std::string& key, CreateCachedGraphBlock createCacheBlock) {
__block MPSCachedGraph* cachedGraph = nil;
MPSCacheKey hash = std::hash<std::string>{}(key);
dispatch_sync(serialQueue_, ^() {
// verify the cached entry doesn't already exist
if (cache_.count(hash) != 0) {
auto& entry = cache_.at(hash);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(key == entry.key_, "Key collision in the MPS cached graph!\n");
cachedGraph = entry.cachedGraph_;
} else {
cachedGraph = createCacheBlock();
CacheEntry entry(key, cachedGraph);
cache_.emplace(hash, entry);
profileCachedGraph(entry);
}
});
return cachedGraph;
}
template<typename T>
inline T* CreateCachedGraphAs(const std::string& key, CreateCachedGraphBlock createCacheBlock) {
return static_cast<T *>(CreateCachedGraph(key, createCacheBlock));
}
MPSCachedGraph* LookUp(const std::string& key) const {
__block MPSCachedGraph* cachedGraph = nullptr;
MPSCacheKey hash = std::hash<std::string>{}(key);
dispatch_sync(serialQueue_, ^() {
if (cache_.count(hash) != 0) {
auto& entry = cache_.at(hash);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(key == entry.key_, "Key collision in the MPS cached graph!\n");
cachedGraph = entry.cachedGraph_;
profileCachedGraph(entry);
}
});
return cachedGraph;
}
template<typename T>
inline T* LookUpAs(const std::string& key) const {
return static_cast<T *>(LookUp(key));
}
private:
MPSGraphCache() {
serialQueue_ = dispatch_queue_create("cache queue", DISPATCH_QUEUE_SERIAL);
}
// this is defined in OperationUtils.mm to not include
// MPSProfiler.h in header OperationUtils.h
void profileCachedGraph(const CacheEntry& cacheEntry) const;
static MPSGraphCache* _instance_cache;
std::unordered_map<MPSCacheKey, CacheEntry> cache_;
dispatch_queue_t serialQueue_ = nullptr;
};
// Common template for creating graph with a specified cache if missing
template<typename T>
inline T* LookUpOrCreateCachedGraph(const std::string& key, std::function<void(MPSGraph*, T*)> instantiate) {
auto cache_ = MPSGraphCache::getInstance();
if (auto rc = cache_->LookUpAs<T>(key)) {
return rc;
}
return cache_->CreateCachedGraphAs<T>(key, ^mps::MPSCachedGraph*() {
T* newCachedGraph = nil;
@autoreleasepool {
// Initialize graph
auto mpsGraph = mps::make_mps_graph();
newCachedGraph = new T(mpsGraph);
instantiate(mpsGraph, newCachedGraph);
}
return newCachedGraph;
});
}
// Common math operations
MPSGraphTensor* log1p(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor);
#define MPS_CHECK_INT64_OP_SUPPORTED(input_tensor, mac_os_13_3_plus, op_name) \
if (!mac_os_13_3_plus && input_tensor.scalar_type() == kLong) { \
TORCH_WARN_ONCE("MPS: no support for int64 for ", op_name, \
", downcasting to a smaller data type (int32/float32). Native support for int64 has been added in macOS 13.3."); \
}
/**
* Returns distance from lowest to highest element offset in given tensor.
*/
size_t compute_storage_numel_distance(const at::Tensor& t);
/**
* Checks whether tensor is mapped to a contiguous area in the storage.
*/
inline bool is_dense_in_storage(const at::Tensor& t) {
return compute_storage_numel_distance(t) == static_cast<size_t>(t.numel());
}
} // namespace mps
} // namespace native
} // namespace at
| 11,499
| 33.743202
| 131
|
h
|
null |
pytorch-main/aten/src/ATen/native/mps/TensorFactory.h
|
// Copyright © 2022 Apple Inc.
#define AT_DISPATCH_MPS_TYPES(TYPE, NAME, ...) \
AT_DISPATCH_SWITCH( \
TYPE, NAME, \
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__))
| 758
| 57.384615
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/nested/NestedTensorMath.h
|
#pragma once
#include <ATen/core/ATen_fwd.h>
#include <ATen/NestedTensorImpl.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
TORCH_API Tensor NestedTensor_to_padded_tensor_generic(
const Tensor& t,
double padding,
OptionalIntArrayRef output_size);
template <typename Func>
Tensor map_nt(const Tensor& nt, Func f) {
auto* nt_impl = get_nested_tensor_impl(nt);
const auto& sizes = nt_impl->get_nested_sizes();
return at::detail::make_tensor<NestedTensorImpl>(f(nt_impl->get_buffer()), sizes);
}
template <typename Func>
Tensor map_nt_binary(const Tensor& nt_1, const Tensor& nt_2, Func f){
auto* nt_impl_1 = get_nested_tensor_impl(nt_1);
auto* nt_impl_2 = get_nested_tensor_impl(nt_2);
const auto& sizes = nt_impl_1->get_nested_sizes();
return at::detail::make_tensor<NestedTensorImpl>(f(nt_impl_1->get_buffer(), nt_impl_2->get_buffer()), sizes);
}
C10_ALWAYS_INLINE std::pair<int64_t, int64_t> _check_nested_layer_norm_inputs(
const NestedTensorImpl& input,
IntArrayRef normalized_shape,
const Tensor& weight /* optional */,
const Tensor& bias /* optional */) {
const size_t normalized_ndim = normalized_shape.size();
TORCH_CHECK(
normalized_ndim >= 1,
"Expected normalized_shape to be at least 1-dimensional, i.e., ",
"containing at least one element, but got normalized_shape = ",
normalized_shape);
TORCH_CHECK(
!weight.defined() || weight.sizes().equals(normalized_shape),
"Expected weight to be of same shape as normalized_shape, but got ",
"weight of shape ",
weight.sizes(),
" and normalized_shape = ",
normalized_shape);
TORCH_CHECK(
!bias.defined() || bias.sizes().equals(normalized_shape),
"Expected bias to be of same shape as normalized_shape, but got ",
"bias of shape ",
bias.sizes(),
" and normalized_shape = ",
normalized_shape);
// Check that the normalized_shape has the exact same sizes as the last dimensions from the NestedTensor input
// Also, compute M and N considering the idiosyncracies of NestedTensors
int64_t N = 1;
for (const auto i: c10::irange(normalized_ndim)) {
TORCH_CHECK(
input.opt_size(-normalized_ndim + i) != c10::nullopt,
"normalized_shape extends into irregular dimensions for the nested tensor"
);
TORCH_CHECK(
normalized_shape[i] == *input.opt_size(-normalized_ndim + i),
"The shape at dimension ",
i,
"of normalized_shape doesn't match the input"
);
N *= normalized_shape[i];
}
const int64_t M = input.numel() / N;
return std::make_pair(M, N);
}
} // namespace native
} // namespace at
| 2,676
| 32.4625
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.h
|
/**
* Transformer-specific NestedTensor utility functions.
*
* Not co-located with NestedTensor core code yet because they only
* support specific cases needed in transformers.
*/
#pragma once
#include <vector>
#include <c10/macros/Macros.h>
#include <c10/util/Optional.h>
namespace c10 {
class Scalar;
} // namespace c10
namespace at {
class Tensor;
namespace native {
struct NestedTensorImpl;
// Requires that self is a contiguous NestedTensor, other is not a
// NestedTensor, self.dim() == 3, and other.dim() == 2. Also, self
// must have a consistent last dimension across its included Tensors
// and that dimension must match other.size(0).
Tensor NestedTensor_matmul(const Tensor& self, const Tensor& other);
// Requires that mat1 is a contiguous NestedTensor, self & mat2 are
// not NestedTensors, mat1.dim() == 3, mat2.dim() == 2, and that mat1
// has a consistent last dimension across its included Tensors that
// matches mat2.size(0).
Tensor NestedTensor_times_Tensor_plus_Tensor_addmm(
const Tensor& self,
const Tensor& mat1,
const Tensor& mat2,
const c10::Scalar& beta,
const c10::Scalar& alpha,
c10::optional<bool> use_gelu = c10::nullopt);
Tensor NestedTensor_add_NestedTensor_in_place(
const Tensor& self,
const Tensor& other);
TORCH_API Tensor NestedTensor_batch_offsets_from_size_tensor(
const Tensor& sizes,
int64_t extra_elements);
Tensor NestedTensor_from_padded_tensor_cpu(
const Tensor& padded,
const NestedTensorImpl& nt);
Tensor NestedTensor_to_mask(const Tensor& nt, c10::optional<int64_t> mask_dim, c10::optional<int64_t> mask_dim_length);
template <typename T>
void remove_padding_kernelLauncher(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size);
template <typename T>
void remove_padding_transform0213_kernelLauncher(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size);
template <typename T>
void add_padding_kernelLauncher(
T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
const std::vector<int64_t>& output_sizes,
const int batch_size,
const int output_batch_size);
TORCH_API Tensor flash_attention_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_attn_weights,
bool is_causal);
TORCH_API std::tuple<Tensor, Tensor> mem_efficient_helper_nested_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_attn_weights,
bool is_causal);
} // namespace native
} // namespace at
| 2,817
| 26.096154
| 119
|
h
|
null |
pytorch-main/aten/src/ATen/native/nested/NestedTensorUtils.h
|
#pragma once
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/Parallel.h>
#include <ATen/core/Tensor.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/core/TensorImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/cat.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/ones_native.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/stack_native.h>
#include <ATen/ops/tensor.h>
#endif
#include <utility>
#include <vector>
namespace at {
namespace native {
struct NestedTensorImpl;
// The following functions are used to construct nested tensors from buffers and
// metadata.
inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
TORCH_CHECK(
buffer.dim() == 1,
"Expected given buffer to be 1dim, but got ",
buffer.dim(),
" instead.");
TORCH_CHECK(
buffer.is_contiguous(), "Expected given buffer to be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer), std::move(nested_sizes));
}
// TODO: Figure out if we need a non-moving wrap_buffer()
inline at::Tensor wrap_buffer(
at::Tensor buffer,
at::Tensor nested_sizes,
at::Tensor nested_strides,
at::Tensor storage_offsets) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
buffer.is_contiguous(), "Given buffer must be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer),
std::move(nested_sizes),
std::move(nested_strides),
std::move(storage_offsets));
}
inline at::Tensor get_buffer(const at::Tensor& tensor) {
return get_nested_tensor_impl(tensor)->get_buffer();
}
/**
* Create a new nested tensor that is a view of a base nested tensor
*
* create_view_tensor calls a specialized constructor that copys the
* the keys from base onto the new view tensor being created.
* The storage is shared between the base and the returned view tensor
*
* All callers of this helper must:
* - Only return a view of the input
* - Must be explicit and define a derivative
*
* @param base Base tensor to construct view from.
* @param nested_sizes View tensors' sizes.
* @param nested_strides View tensors' strides.
* @param storage_offsets View tensors' offsets.
* @return A newly constructed view tensor
*/
inline at::Tensor create_nested_view_tensor(
const at::Tensor& base,
at::Tensor nested_sizes,
at::Tensor nested_strides,
at::Tensor storage_offsets) {
TORCH_INTERNAL_ASSERT(
base.is_nested(),
"This function can only be used to create nested tensor views");
TORCH_INTERNAL_ASSERT(
c10::impl::tls_local_dispatch_key_set().excluded_.has(
c10::DispatchKey::AutogradFunctionality),
"Creating a non differentiable nested tensor view in a CompositeImplicit function is not allowed.");
return at::detail::make_tensor<NestedTensorImpl>(
c10::TensorImpl::VIEW,
base,
nested_sizes,
nested_strides,
storage_offsets);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Helper functions for getting information about a nested tensor's shape.
int64_t get_consistent_last_dim_of_nested_tensor(const NestedTensorImpl& nt);
// The sizes of the underlying tensors
inline std::vector<IntArrayRef> NestedTensor_get_sizes(
const NestedTensorImpl* self_ptr) {
int64_t ntensors = self_ptr->size(0);
std::vector<IntArrayRef> sizes(ntensors);
if (ntensors == 0) {
return sizes;
}
const Tensor& sizemat = self_ptr->get_nested_sizes();
int64_t orig_dim = sizemat.size(1);
// nesting scalars has empty sizes
if (orig_dim == 0) {
return sizes;
}
const int64_t* sizemat_ptr = sizemat.data_ptr<int64_t>();
for (const auto i : c10::irange(ntensors)) {
sizes[i] = IntArrayRef(sizemat_ptr, sizemat_ptr + orig_dim);
sizemat_ptr += orig_dim;
}
return sizes;
}
TORCH_API std::vector<int64_t> NestedTensor_get_max_size(
const NestedTensorImpl& nt);
std::vector<int64_t> NestedTensor_get_max_size_from_size_tensor(
const Tensor& sizes);
inline std::vector<IntArrayRef> NestedTensor_get_sizes(const at::Tensor& self) {
const NestedTensorImpl* self_ptr = get_nested_tensor_impl(self);
return NestedTensor_get_sizes(self_ptr);
}
// The strides of the underlying tensors
inline std::vector<IntArrayRef> NestedTensor_get_strides(
const NestedTensorImpl* self_ptr) {
int64_t ntensors = self_ptr->size(0);
std::vector<IntArrayRef> strides(ntensors);
if (ntensors == 0) {
return strides;
}
const Tensor& stridemat = self_ptr->get_nested_strides();
int64_t orig_dim = stridemat.size(1);
// nesting scalars has empty strides
if (orig_dim == 0) {
return strides;
}
const int64_t* stridemat_ptr = stridemat.data_ptr<int64_t>();
for (const auto i : c10::irange(ntensors)) {
strides[i] = IntArrayRef(stridemat_ptr, stridemat_ptr + orig_dim);
stridemat_ptr += orig_dim;
}
return strides;
}
inline std::vector<IntArrayRef> NestedTensor_get_strides(
const at::Tensor& self) {
const NestedTensorImpl* self_ptr = get_nested_tensor_impl(self);
return NestedTensor_get_strides(self_ptr);
}
inline void check_numel_equals_buffer_size(const at::Tensor& self) {
auto self_impl = get_nested_tensor_impl(self);
TORCH_CHECK(
self.numel() == self_impl->get_buffer_size(),
"Number of elements in nested tensor must match number of elements in buffer.");
}
inline void check_numel_equals_buffer_size(const NestedTensorImpl* self_ptr) {
TORCH_CHECK(
self_ptr->numel() == self_ptr->get_buffer_size(),
"Number of elements in nested tensor must match number of elements in buffer.");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Data structures and functions for generically applying a function on a nested
// tensor.
namespace impl {
template <typename T>
struct NestedNode {
NestedNode() = delete;
explicit NestedNode(std::vector<T>&& children)
: _is_leaf(false), _children(children) {}
explicit NestedNode(TensorList children)
: _is_leaf(false), _children(children.vec()) {}
// NestedNode(NestedNode&) = delete;
// NestedNode(const NestedNode&) = delete;
// NestedNode& operator=(NestedNode) = delete;
explicit NestedNode(T payload) : _is_leaf(true), _payload(std::move(payload)) {}
inline bool is_leaf() const {
return _is_leaf;
}
inline size_t degree() const {
return _children.size();
}
inline const std::vector<T> unbind() const {
return _children;
}
inline T children(size_t i) const {
return _children[i];
}
inline const T& payload() const {
return _payload;
}
inline T& payload() {
return _payload;
}
private:
bool _is_leaf;
std::vector<T> _children;
T _payload;
};
using TensorNode = NestedNode<at::Tensor>;
template <class F, class A, class TypeList>
class _map;
template <class F, class A, class... Args>
class _map<F, A, c10::guts::typelist::typelist<Args...>> {
public:
static A function_one(F&& fn, const Args&... nested_node) {
return std::forward<F>(fn)(nested_node...);
}
// NOTE: We must move F to avoid copying objects if it is a lambda with
// captures.
static NestedNode<A> function(
F&& fn,
const NestedNode<Args>&... nested_node) {
size_t degree = 0;
bool all_leaf = true;
c10::guts::tuple_map(
std::forward_as_tuple(nested_node...), [&all_leaf, °ree](auto n) {
all_leaf = all_leaf && (n.is_leaf());
if (degree > 1 && n.degree() > 1) {
TORCH_CHECK(
degree == n.degree(), "NestedNodes must match in degree.");
}
if (n.degree() > degree) {
degree = n.degree();
}
return nullptr;
});
// All NestedNodes just wrap regular objects.
if (all_leaf) {
return NestedNode<A>(std::forward<F>(fn)(nested_node.payload()...));
}
// Some NestedNodes wrap regular Tensors, some NestedTensors and some other
// types.
std::vector<A> result;
for (size_t i = 0; i < degree; i++) {
std::tuple<Args...> children = c10::guts::tuple_map(
std::forward_as_tuple(nested_node...), [&i](auto a) {
static_assert(
c10::guts::is_instantiation_of<NestedNode, decltype(a)>::value,
"Internal error.");
// Broadcast regular arguments across NestedTensor constituents.
// This could be a Tensor, integer or anything else really.
if (a.is_leaf()) {
return a.payload();
}
// Broadcast NestedTensors with one constituent.
if (a.degree() == 1 && !a.is_leaf()) {
return a.children(0);
}
TORCH_CHECK(a.degree() > 0, "Internal assert.");
return a.children(i);
});
c10::guts::apply(
[&result, &fn](Args... filtered) {
result.emplace_back(function_one(std::forward<F>(fn), filtered...));
},
std::move(children));
}
return NestedNode<A>(std::move(result));
}
};
// TODO: Add static assert to verify lambda arguments match nested_node types
template <class F, class... B>
static inline NestedNode<
typename c10::guts::infer_function_traits<F>::type::return_type>
map(F&& fn, const NestedNode<B>&... nested_node) {
return _map<
F,
typename c10::guts::infer_function_traits<F>::type::return_type,
typename c10::guts::infer_function_traits<F>::type::parameter_types>::
function(std::forward<F>(fn), nested_node...);
}
inline TensorNode get_nested_tensor_structure(at::Tensor tensor) {
if (get_nested_tensor_impl_or_null(tensor) == nullptr) {
return TensorNode(std::move(tensor));
}
return TensorNode(tensor.unbind());
}
inline Tensor wrap_tensor_node(
TensorNode tensor_node,
c10::optional<ScalarType> dtype,
c10::optional<Layout> layout,
c10::optional<Device> device,
c10::optional<bool> pin_memory) {
TORCH_CHECK(
!tensor_node.is_leaf(), "Expected TensorNode to wrap a list of Tensors.");
TensorOptions options_ =
TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(
pin_memory);
if (tensor_node.degree() == 0) {
return wrap_buffer(ones({0}, dtype, layout, device), ones({}));
}
// Fast path: if all tensors are on CPU, have contiguous memory, and the same
// dtype, copying can be done much faster.
bool all_tensors_cpu = true;
bool all_tensors_contiguous = true;
bool all_tensors_same_dtype = true;
auto first_dtype = tensor_node.children(0).dtype();
std::vector<long> start_offsets(tensor_node.degree());
start_offsets[0] = 0;
long total_size = 0;
for (const auto i : c10::irange(tensor_node.degree())) {
all_tensors_cpu = all_tensors_cpu && tensor_node.children(i).is_cpu();
all_tensors_contiguous =
all_tensors_contiguous && tensor_node.children(i).is_contiguous();
all_tensors_same_dtype = all_tensors_same_dtype &&
(first_dtype == tensor_node.children(i).dtype());
if (!(all_tensors_cpu && all_tensors_contiguous &&
all_tensors_same_dtype)) {
break;
}
if (i > 0) {
start_offsets[i] =
start_offsets[i - 1] + tensor_node.children(i - 1).numel();
}
total_size += tensor_node.children(i).numel();
}
TensorOptions options;
Tensor nt_buffer, nt_sizes;
if (all_tensors_cpu && all_tensors_contiguous && all_tensors_same_dtype) {
nt_buffer = at::empty({total_size}, tensor_node.children(0).options());
nt_sizes = at::empty(
{static_cast<long>(tensor_node.degree()),
static_cast<long>(tensor_node.children(0).sizes().size())},
TensorOptions().dtype(kLong));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
c10::typeMetaToScalarType(first_dtype),
"create_nt_buffer",
[&]() {
at::parallel_for(
0, tensor_node.degree(), 1, [&](int64_t begin, int64_t end) {
for (int64_t i = begin; i < end; ++i) {
// Only try copying memory if there is more than 0 elements
// for a certain tensor
if (tensor_node.children(i).numel() > 0) {
memcpy(
nt_buffer.mutable_data_ptr<scalar_t>() + start_offsets[i],
tensor_node.children(i).data_ptr<scalar_t>(),
tensor_node.children(i).numel() * sizeof(scalar_t));
}
}
});
});
long sizes_offset = 0;
for (size_t i = 0; i < tensor_node.degree(); ++i) {
auto tensor_sizes = tensor_node.children(i).sizes();
for (int64_t tensor_size : tensor_sizes) {
nt_sizes.mutable_data_ptr<int64_t>()[sizes_offset++] = tensor_size;
}
}
options = nt_buffer.options().merge_in(options_);
} else { // Slow path
std::vector<Tensor> flat_tensors;
std::vector<Tensor> sizes;
for (const auto i : c10::irange(tensor_node.degree())) {
flat_tensors.push_back(tensor_node.children(i).reshape(-1).contiguous());
sizes.push_back(
tensor(c10::IntArrayRef(tensor_node.children(i).sizes())));
}
options = flat_tensors[0].options().merge_in(options_);
nt_buffer = at::cat(flat_tensors);
nt_sizes = at::native::stack(sizes);
}
return wrap_buffer(nt_buffer.to(options), nt_sizes);
}
} // namespace impl
// This function is meant to ease rapid operator coverage for
// NestedTensor kernels. It is not meant to be efficient. Use it judiciously.
template <class F, class... A>
inline at::Tensor map_nested_tensor(F&& fn, A... a) {
return wrap_tensor_node(
impl::map(std::forward<F>(fn), impl::get_nested_tensor_structure(a)...),
c10::nullopt,
c10::nullopt,
c10::nullopt,
c10::nullopt);
}
} // namespace native
} // namespace at
| 14,128
| 32.963942
| 106
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/AffineQuantizer.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/quantized/AffineQuantizerBase.h>
namespace at {
namespace native {
Tensor& quantize_tensor_per_tensor_affine(
const Tensor& rtensor,
Tensor& qtensor,
double scale,
int64_t zero_point);
Tensor& quantize_tensor_per_channel_affine(
const Tensor& rtensor,
Tensor& qtensor,
Tensor scales,
Tensor zero_points,
int64_t axis);
Tensor& quantize_tensor_per_channel_float_qparams(
const Tensor& rtensor,
Tensor& qtensor,
Tensor scales,
Tensor zero_points,
int64_t axis);
Tensor& dequantize_tensor_per_tensor_affine(
const Tensor& qtensor,
Tensor& rtensor,
double scale,
int64_t zero_point);
Tensor& dequantize_tensor_per_channel_affine(
const Tensor& qtensor,
Tensor& rtensor,
Tensor scales,
Tensor zero_points,
int64_t axis);
Tensor& dequantize_tensor_per_channel_float_qparams(
const Tensor& qtensor,
Tensor& rtensor,
Tensor scales,
Tensor zero_points,
int64_t axis);
using quantize_tensor_per_tensor_affine_fn =
void (*)(const Tensor& rtensor, Tensor& qtensor, double scale, int64_t zero_point);
using quantize_tensor_per_channel_affine_fn = void (*)(
const Tensor& rtensor,
Tensor& qtensor,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis);
using quantize_tensor_per_channel_float_qparams_fn = void (*)(
const Tensor& rtensor,
Tensor& qtensor,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis);
using dequantize_tensor_per_tensor_affine_fn =
void (*)(const Tensor& qtensor, Tensor& rtensor, double scale, int64_t zero_point);
using dequantize_tensor_per_channel_affine_fn = void (*)(
const Tensor& qtensor,
Tensor& rtensor,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis);
using dequantize_tensor_per_channel_float_qparams_fn = void (*)(
const Tensor& qtensor,
Tensor& rtensor,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis);
using quantize_tensor_per_tensor_affine_sub_byte_fn =
void (*)(const Tensor& rtensor, Tensor& qtensor, float scale, float zero_point);
using dequantize_tensor_per_tensor_affine_sub_byte_fn =
void (*)(const Tensor& qtensor, Tensor& rtensor, float scale, float zero_point);
DECLARE_DISPATCH(
quantize_tensor_per_tensor_affine_fn,
quantize_tensor_per_tensor_affine_stub);
DECLARE_DISPATCH(
quantize_tensor_per_channel_affine_fn,
quantize_tensor_per_channel_affine_stub);
DECLARE_DISPATCH(
quantize_tensor_per_channel_float_qparams_fn,
quantize_tensor_per_channel_float_qparams_stub);
DECLARE_DISPATCH(
dequantize_tensor_per_tensor_affine_fn,
dequantize_tensor_per_tensor_affine_stub);
DECLARE_DISPATCH(
dequantize_tensor_per_channel_affine_fn,
dequantize_tensor_per_channel_affine_stub);
DECLARE_DISPATCH(
dequantize_tensor_per_channel_float_qparams_fn,
dequantize_tensor_per_channel_float_qparams_stub);
DECLARE_DISPATCH(
quantize_tensor_per_tensor_affine_sub_byte_fn,
quantize_tensor_per_tensor_affine_sub_byte_stub);
DECLARE_DISPATCH(
dequantize_tensor_per_tensor_affine_sub_byte_fn,
dequantize_tensor_per_tensor_affine_sub_byte_stub);
template <typename T>
TORCH_API Tensor quantize_tensor(
Tensor rtensor,
Tensor qtensor,
double scale,
int64_t zero_point);
template <typename T>
TORCH_API Tensor dequantize_tensor(
Tensor qtensor,
Tensor rtensor,
double scale,
int64_t zero_point);
} // namespace native
} // namespace at
| 3,654
| 26.900763
| 87
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/AffineQuantizerBase.h
|
#pragma once
#include <c10/macros/Export.h>
#include <c10/core/ScalarType.h>
namespace at {
namespace native {
// Quantize a float value into a uint value given scale and zero_point
template <typename T>
TORCH_API T quantize_val(double scale, int64_t zero_point, float value);
// TODO combine this with quantize_val once the numerics for ARM are aligned
// with it
template <typename T>
T quantize_val_arm(
const float scale,
const int32_t zero_point,
const float value);
template <typename T, int precision = 8>
void quantize_vec(
double scale,
int64_t zero_point,
const float* src,
T* dst,
size_t count = 8);
template <typename T>
TORCH_API float dequantize_val(double scale, int64_t zero_point, T value);
template <typename T>
TORCH_API float dequantize_vec(
double scale,
int64_t zero_point,
const T* src,
float* dst,
size_t count = 8);
template <typename SRC_T, typename DST_T>
TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src);
// Given a multiplier and a zero_point, requantize int32_t computed values back
// to quantized values. See comment above
// make_per_tensor_affine_quantizer function for the usage of int64_t
template <typename DST_T>
TORCH_API DST_T
requantize_from_int(double multiplier, int64_t zero_point, int64_t src);
int quantize_val_float_qparams(float scale, float zero_point, float value, int qmin, int qmax);
} // namespace native
} // namespace at
| 1,460
| 29.4375
| 95
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/ConvUtils.h
|
#pragma once
#include <ATen/core/List.h>
#include <ATen/native/ConvUtils.h>
namespace at::native::quantized {
namespace {
// MakeConvOutputShape used from both CPU and CUDA libraries
// and exporting symbol from torch_cpu would probaby take more storage
// than duplicating implementation which likely be inlined away
template <int kSpatialDim>
at::SmallVector<int64_t, kSpatialDim + 2> MakeConvOutputShape(
int N, // mini-batch
int M, // output channels
const std::array<int64_t, kSpatialDim>& input_image_shape,
const std::vector<int64_t>& kernel,
const torch::List<int64_t>& stride,
const torch::List<int64_t>& padding,
const torch::List<int64_t>& dilation);
#if defined(USE_CUDA) || defined(USE_PYTORCH_QNNPACK)
template <>
at::SmallVector<int64_t, 4> MakeConvOutputShape<2>(
int N, // mini-batch
int M, // output channels
const std::array<int64_t, 2>& input_image_shape,
const std::vector<int64_t>& kernel,
const at::List<int64_t>& stride,
const at::List<int64_t>& padding,
const at::List<int64_t>& dilation) {
const int H = input_image_shape[0];
const int W = input_image_shape[1];
const int64_t Y_H =
(H + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0] + 1;
const int64_t Y_W =
(W + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1] + 1;
return {N, M, Y_H, Y_W};
}
template <>
at::SmallVector<int64_t, 5> MakeConvOutputShape<3>(
int N, // mini-batch
int M, // output channels
const std::array<int64_t, 3>& input_image_shape,
const std::vector<int64_t>& kernel,
const at::List<int64_t>& stride,
const at::List<int64_t>& padding,
const torch::List<int64_t>& dilation) {
const int D = input_image_shape[0];
const int H = input_image_shape[1];
const int W = input_image_shape[2];
const int64_t Y_D =
(D + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0] + 1;
const int64_t Y_H =
(H + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1] + 1;
const int64_t Y_W =
(W + 2 * padding[2] - dilation[2] * (kernel[2] - 1) - 1) / stride[2] + 1;
return {N, M, Y_D, Y_H, Y_W};
}
#endif
} // anonymous namespace
} // namespace at::native::quantized
| 2,239
| 34.555556
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/FakeQuantAffine.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
namespace at {
struct TensorIterator;
namespace native {
using fake_quant_tensor_cachemask_fn = void (*)(
Tensor& output,
Tensor& mask,
const Tensor& input,
float sc,
int64_t z_point,
int64_t quant_min,
int64_t quant_max);
using fake_quant_tensor_cachemask_tensor_qparams_fn = void (*)(
Tensor& output,
Tensor& mask,
const Tensor& input,
const Tensor& sc,
const Tensor& z_point,
const Tensor& fake_quant_enabled,
int64_t quant_min,
int64_t quant_max);
using fake_quant_learnable_grad_tensor_fn = void (*)(
TensorIterator& iter,
float scale,
float inv_scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max,
float grad_factor);
DECLARE_DISPATCH(fake_quant_tensor_cachemask_fn, fake_quant_tensor_cachemask_stub);
DECLARE_DISPATCH(fake_quant_tensor_cachemask_tensor_qparams_fn, fake_quant_tensor_cachemask_tensor_qparams_stub);
DECLARE_DISPATCH(fake_quant_learnable_grad_tensor_fn, fake_quant_grad_learnable_tensor_stub);
using fake_quant_per_channel_fn = void (*)(
TensorIterator &iter,
int64_t quant_min,
int64_t quant_max);
using fake_quant_per_channel_cachemask_fn = void (*)(
TensorIterator &iter,
TensorIterator &iter_mask,
int64_t quant_min,
int64_t quant_max);
DECLARE_DISPATCH(fake_quant_per_channel_cachemask_fn, fake_quant_per_channel_cachemask_stub);
using fake_quant_learnable_per_channel_fn = void (*)(
TensorIterator &iter,
int64_t quant_min,
int64_t quant_max,
float grad_factor);
DECLARE_DISPATCH(fake_quant_learnable_per_channel_fn, fake_quant_grad_learnable_channel_stub);
} // namespace native
} // namespace at
| 1,792
| 25.367647
| 113
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/PackedParams.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/ivalue.h>
struct LinearPackedParamsBase : public torch::jit::CustomClassHolder {
virtual at::Tensor apply(
at::Tensor input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_relu(
at::Tensor input,
double output_scale,
int64_t output_zero_point) = 0;
// out variant of LinearPackedParamsBase::apply
virtual at::Tensor& apply_out(
const at::Tensor& /*input*/,
double /*output_scale*/,
int64_t /*output_zero_point*/,
at::Tensor& output) {
throw std::runtime_error(
"apply_out is not implemented for this packed "
"parameter type");
return output;
}
virtual at::Tensor& apply_relu_out(
const at::Tensor& /*input*/,
double /*output_scale*/,
int64_t /*output_zero_point*/,
at::Tensor& output) {
throw std::runtime_error(
"apply_relu_out is not implemented for this packed "
"parameter type");
return output;
}
// Corresponding pattern (the ops with `*` are part of the pattern that
// represents the computation of quantized::linear_with_input_q_dq_qweight_dq_output_fp32):
// input -> q* -> dq* -> linear* ->
// qweight -> dq* /
//
// After fusion:
// input -> quantized::linear_with_input_q_dq_qweight_dq_output_fp32* ->
// qweight /
//
// Additional Note: the weight is packed as well
// Params:
// X: float32 Tensor, will be quantized to quint8 in the op
// W_prepack: packed qint8 quantized weight and bias
// Returns:
// Y: float32 Tensor
virtual at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
at::Tensor input,
double input_scale,
int64_t input_zero_point) {
throw std::runtime_error(
"apply_with_input_q_dq_qweight_dq_output_fp32 is not implemented for this packed "
"parameter type");
return {};
}
// Corresponding pattern (the ops with `*` are part of the pattern that
// represents the computation of quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32):
// input -> q* -> dq* -> linear* -> relu* ->
// qweight -> dq* /
//
// After fusion:
// input -> quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32* ->
// qweight /
//
// Additional Note: the weight is packed as well
// Params:
// input: float32 Tensor, will be quantized to quint8 in the op
// Returns:
// float32 Tensor
virtual at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
at::Tensor input,
double input_scale,
int64_t input_zero_point) {
throw std::runtime_error(
"apply_with_input_q_dq_qweight_dq_relu_output_fp32 is not implemented for this packed "
"parameter type");
return {};
}
virtual at::Tensor apply_dynamic(
at::Tensor input,
bool reduce_range = false) = 0;
virtual at::Tensor apply_dynamic_relu(
at::Tensor input,
bool reduce_range = false) = 0;
virtual at::Tensor& apply_dynamic_out(
const at::Tensor& /* input */,
at::Tensor& output,
bool /* reduce_range */) {
throw std::runtime_error(
"apply_dynamic_out is not implemented for this packed "
"parameter type");
return output;
}
virtual at::Tensor& apply_dynamic_relu_out(
const at::Tensor& /* input */,
at::Tensor& output,
bool /* reduce_range */) {
throw std::runtime_error(
"apply_dynamic_relu_out is not implemented for this packed "
"parameter type");
return output;
}
virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
virtual c10::optional<at::Tensor> bias() = 0;
virtual void set_bias(c10::optional<at::Tensor> /*bias*/) {
throw std::runtime_error(
"set_bias is not implemented for this packed "
"parameter type");
}
};
template <int kSpatialDim = 2>
struct ConvPackedParamsBase : public torch::jit::CustomClassHolder {
virtual at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range) = 0;
virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
virtual torch::List<int64_t> stride() const = 0;
virtual torch::List<int64_t> padding() const = 0;
virtual torch::List<int64_t> output_padding() const = 0;
virtual torch::List<int64_t> dilation() const = 0;
virtual int64_t groups() const = 0;
virtual bool transpose() const = 0;
};
| 4,739
| 31.027027
| 98
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/EmbeddingPackedParams.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/ivalue.h>
struct EmbeddingPackedParamsBase : public torch::jit::CustomClassHolder {
virtual at::Tensor embeddingbag_byte(
const at::Tensor& indices,
const c10::optional<at::Tensor>& offsets,
bool pruned_weights,
const c10::optional<at::Tensor>& per_sample_weights_,
const c10::optional<at::Tensor>& compressed_indices_mapping,
bool include_last_offset,
bool is_embedding_op) = 0;
virtual at::Tensor embeddingbag_4bit(
const at::Tensor& indices,
const c10::optional<at::Tensor>& offsets,
bool pruned_weights,
const c10::optional<at::Tensor>& per_sample_weights_,
const c10::optional<at::Tensor>& compressed_indices_mapping,
bool include_last_offset,
bool is_embedding_op) = 0;
virtual at::Tensor unpack() = 0;
virtual int64_t bit_rate() const = 0;
virtual int64_t version() const = 0;
};
| 921
| 29.733333
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/OnednnUtils.h
|
#pragma once
#include <ATen/Config.h>
#if AT_MKLDNN_ENABLED()
#include <ATen/Tensor.h>
#include <ATen/native/quantized/PackedParams.h>
#include <ideep.hpp>
#include <cpuinfo.h>
#include <c10/util/CallOnce.h>
using PrimitiveCacheKey = std::tuple<
double, // input_scale
int64_t, // input_zero_point
std::vector<int64_t>, // input_shape
double, // output_scale
int64_t, // output_zero_point
int64_t, // OMP_number_of_threads
double, // accum_scale
int64_t>; // accum_zero_point
enum CacheKeyIndex {
InputScale,
InputZeroPoint,
InputShape,
OutputScale,
OutputZeroPoint,
NumOfThreads,
};
// Base class of primitive cache
struct PrimitiveCache {
PrimitiveCacheKey key;
bool hit(const PrimitiveCacheKey& key) {
return this->key == key;
}
};
using LinearParams = ideep::matmul_forward_params;
using Conv = dnnl::convolution_forward;
using ConvDesc = dnnl::convolution_forward::primitive_desc;
using ConvParams = ideep::convolution_forward_params;
using Deconv = dnnl::deconvolution_forward;
using DeconvDesc = dnnl::deconvolution_forward::primitive_desc;
using DeconvParams = ideep::deconv_forward_params;
struct LinearPrimitiveCache : PrimitiveCache {
LinearPrimitiveCache() {}
LinearPrimitiveCache(
const PrimitiveCacheKey& key,
const LinearParams& param) {
this->key = key;
this->param = param;
}
LinearPrimitiveCache(
const PrimitiveCacheKey& key,
const LinearParams& param,
const ideep::tensor& bias) {
this->key = key;
this->param = param;
if (!bias.is_empty()) {
expected_bias =
bias.reorder_if_differ_in(param.pd.bias_desc(), param.bias_attr);
}
}
LinearParams param;
ideep::tensor expected_bias;
// For dynamic qlinear, scale and zero point
// are set at execution time. So we only need to compare
// the rest part of key.
bool hit_dynamic(const PrimitiveCacheKey& new_key) {
auto cached_input_shape = std::get<InputShape>(this->key);
auto new_input_shape = std::get<InputShape>(new_key);
return (
cached_input_shape == new_input_shape &&
std::get<NumOfThreads>(this->key) == std::get<NumOfThreads>(new_key));
}
LinearParams& get_param() {
return param;
}
ideep::tensor& get_expected_bias() {
return expected_bias;
}
};
struct ConvPrimitiveCache : PrimitiveCache {
ConvPrimitiveCache() {}
ConvPrimitiveCache(
const PrimitiveCacheKey& key,
const ConvParams& params,
const ideep::tensor& bias) {
this->key = key;
this->params = params;
if (!bias.is_empty()) {
this->expected_bias =
bias.reorder_if_differ_in(params.pd.bias_desc(), params.bias_attr);
}
}
ideep::tensor expected_bias;
ConvParams params;
ConvParams& get_params() {
return params;
}
ideep::tensor& get_bias() {
return expected_bias;
}
};
struct DeconvPrimitiveCache : PrimitiveCache {
DeconvPrimitiveCache() {}
DeconvPrimitiveCache(
const PrimitiveCacheKey& key,
const DeconvParams& params,
const ideep::tensor& bias) {
this->key = key;
this->params = params;
if (!bias.is_empty()) {
this->expected_bias =
bias.reorder_if_differ_in(params.pd.bias_desc(), params.bias_attr);
}
}
DeconvParams params;
ideep::tensor expected_bias;
DeconvParams& get_params() {
return params;
}
ideep::tensor& get_bias() {
return expected_bias;
}
};
enum PostOps {
NoPostOp,
Relu,
LeakyRelu,
Tanh,
};
struct PackedLinearWeightsOnednn : public LinearPackedParamsBase {
PackedLinearWeightsOnednn(
std::unique_ptr<ideep::tensor> weight,
c10::optional<ideep::tensor> bias,
at::Tensor orig_weight,
c10::optional<at::Tensor> orig_bias)
: weight_(std::move(weight)),
bias_(std::move(bias)),
orig_weight_(std::move(orig_weight)),
orig_bias_(std::move(orig_bias)) {
cache_initialized_flag = std::make_unique<c10::once_flag>();
}
std::unique_ptr<ideep::tensor> weight_;
c10::optional<ideep::tensor> bias_;
at::Tensor orig_weight_;
c10::optional<at::Tensor> orig_bias_;
at::Tensor apply(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override;
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override;
at::Tensor apply_leaky_relu(
at::Tensor input,
double output_scale,
int64_t output_zero_point,
double negative_slope);
at::Tensor apply_tanh(
at::Tensor input,
double output_scale,
int64_t output_zero_point);
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
c10::optional<at::Tensor> bias() override {
return orig_bias_;
}
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias);
private:
LinearPrimitiveCache prim_cache;
std::unique_ptr<c10::once_flag> cache_initialized_flag;
template <PostOps post_op>
at::Tensor apply_impl(
at::Tensor input,
double output_scale,
int64_t output_zero_point,
torch::List<at::Scalar> post_op_args = torch::List<at::Scalar>());
template <bool ReluFused>
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range=false);
LinearPrimitiveCache& get_cache() {
return prim_cache;
}
};
template <int kSpatialDim = 2>
struct PackedConvWeightsOnednn : public ConvPackedParamsBase<kSpatialDim> {
PackedConvWeightsOnednn(
std::unique_ptr<ideep::tensor> weight,
c10::optional<ideep::tensor> bias,
at::Tensor orig_weight,
c10::optional<at::Tensor> orig_bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
uint8_t transpose)
: weight_(std::move(weight)),
bias_(std::move(bias)),
orig_weight_(std::move(orig_weight)),
orig_bias_(std::move(orig_bias)),
stride_(std::move(stride)),
padding_(std::move(padding)),
output_padding_(std::move(output_padding)),
dilation_(std::move(dilation)),
groups_(groups),
transpose_(transpose) {
cache_initialized_flag = std::make_unique<c10::once_flag>();
}
std::unique_ptr<ideep::tensor> weight_;
c10::optional<ideep::tensor> bias_;
at::Tensor orig_weight_;
c10::optional<at::Tensor> orig_bias_;
torch::List<int64_t> stride_;
torch::List<int64_t> padding_;
torch::List<int64_t> output_padding_;
torch::List<int64_t> dilation_;
int64_t groups_;
uint8_t transpose_;
at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range) override;
at::Tensor apply_add(
const at::Tensor& input,
const at::Tensor& accum,
double output_scale,
int64_t output_zero_point);
at::Tensor apply_add_relu(
const at::Tensor& input,
const at::Tensor& accum,
double output_scale,
int64_t output_zero_point);
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
bool transpose);
torch::List<int64_t> stride() const override {
return stride_;
}
torch::List<int64_t> padding() const override {
return padding_;
}
torch::List<int64_t> output_padding() const override {
return output_padding_;
}
torch::List<int64_t> dilation() const override {
return dilation_;
}
int64_t groups() const override {
return groups_;
}
bool transpose() const override {
return (bool)transpose_;
}
private:
ConvPrimitiveCache conv_prim_cache;
DeconvPrimitiveCache deconv_prim_cache;
std::unique_ptr<c10::once_flag> cache_initialized_flag;
template <bool ReluFused>
at::Tensor apply_impl(
const at::Tensor& input,
const c10::optional<at::Tensor>& accum,
double output_scale,
int64_t output_zero_point);
ConvPrimitiveCache& get_conv_cache() {
assert(!transpose());
return conv_prim_cache;
}
DeconvPrimitiveCache& get_deconv_cache() {
assert(transpose());
return deconv_prim_cache;
}
};
namespace onednn_utils {
// Try to reorder tensor to expected desc at runtime
// Do it in a `try...catch...` manner to avoid oneDNN's errors
// TODO: Move it to third_party/ideep
static void try_reorder(
ideep::tensor& t,
const ideep::tensor::desc&& desc,
ideep::scale_t scales) {
if (t.get_desc() != desc) {
try {
t = t.reorder_if_differ_in(desc);
} catch (...) {
ideep::tensor&& plain = t.to_public(nullptr, t.get_data_type());
t = plain.reorder_if_differ_in(desc);
}
t.set_scale(scales);
}
}
// ONEDNN requires symmetric quantization of weight
// Use this util function to check.
static bool is_weight_symmetric_quant(
const at::Tensor& weight,
bool is_transposed_conv) {
bool is_symmetric = true;
const auto qtype = weight.qscheme();
if (qtype == c10::kPerTensorAffine) {
is_symmetric &= (weight.q_zero_point() == 0);
} else if (qtype == c10::kPerChannelAffine) {
if (is_transposed_conv) {
// This case is currently not supported in PyTorch
// but we do not want to raise an error in this util function.
is_symmetric = false;
} else {
auto output_channels = weight.size(0);
for (int i = 0; i < output_channels; ++i) {
auto zp = weight.q_per_channel_zero_points()[i].item<int32_t>();
is_symmetric &= (zp == 0);
}
}
} else {
// This case is currently not supported in PyTorch
// but we do not want to raise an error in this util function.
is_symmetric = false;
}
return is_symmetric;
}
// When qengine is x86, use this util func to check if onednn kernel
// is preferred than fbgemm's to get better performance.
static bool should_use_onednn_quant(
const at::Tensor& weight,
bool is_transposed_conv,
int groups,
torch::List<int64_t> output_padding) {
// Performance of onednn is only validated on Linux right now.
// Also, the heuristics for dispatching are based on perf data on Linux.
// So, for x86 qengine, we always use fbgemm kernels if OS is not Linux.
// TODO Support more OSs.
#if !defined(__linux__)
return false;
#else
bool vnni_available = cpuinfo_has_x86_avx512vnni();
bool w_sym_quant =
is_weight_symmetric_quant(weight, is_transposed_conv);
bool opad_all_zero =
std::all_of(output_padding.begin(), output_padding.end(), [](int i) { return i==0; });
return vnni_available && (groups <= 100) && w_sym_quant && opad_all_zero;
#endif
}
} // onednn_utils
#endif // #if AT_MKLDNN_ENABLED()
| 11,394
| 26.130952
| 92
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/QnnpackUtils.h
|
#pragma once
#ifdef USE_PYTORCH_QNNPACK
#include <ATen/core/Tensor.h>
#include <c10/util/irange.h>
#include <pytorch_qnnpack.h>
#include <qnnpack_func.h>
#include <ATen/native/quantized/cpu/XnnpackUtils.h>
#include <ATen/native/quantized/PackedParams.h>
#include <ATen/native/utils/Factory.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/empty.h>
#endif
#include <utility>
inline int kPaddingChannels = 8;
struct QnnpackOperatorDeleter {
void operator()(pytorch_qnnp_operator_t op) {
pytorch_qnnp_delete_operator(op);
}
};
// PackedWeight struct for QNNPACK stores the original Weight and Bias as
// QNNPACK currently does not support an unpack function.
// For PyTorch Mobile, once the model is scripted and serialized we don't need
// to call unpack, so we can save some memory by checking for this case and free
// the original weights after packing.
// Input scale is set to null in pre-pack step. QNNPACK needs bias quantized
// with input scale which is available at runtime in pytorch. During runtime if
// input scale value changes then we requantize bias with the updated scale. For
// inference we expect the graph to be static so the input scale should not
// change across consecutive inference calls.
struct PackedLinearWeightsQnnp : public LinearPackedParamsBase {
PackedLinearWeightsQnnp(
std::unique_ptr<qnnpack::PackBMatrix> w,
at::Tensor orig_weight,
at::Tensor bias,
c10::optional<double> input_scale,
at::Tensor w_scales,
std::vector<uint8_t>&& w_zps)
: w(std::move(w)),
orig_weight(std::move(orig_weight)),
bias_(at::native::mobile::allocate_padded_contiguous_if_needed(
bias, bias.suggest_memory_format())),
per_channel_(this->orig_weight.qscheme() == at::kPerChannelAffine),
input_scale(std::move(input_scale)),
w_scales(std::move(w_scales)),
w_zero_points(std::move(w_zps)),
q_scheme(this->orig_weight.qscheme()) {
weight_sizes = this->orig_weight.sizes().vec();
}
std::unique_ptr<qnnpack::PackBMatrix> w;
at::Tensor orig_weight;
at::Tensor bias_;
bool per_channel_;
c10::optional<double> input_scale;
at::Tensor w_scales;
std::vector<uint8_t> w_zero_points;
std::vector<float> requantization_scales;
std::vector<int64_t> weight_sizes;
c10::QScheme q_scheme;
at::Tensor apply(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override;
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override;
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
c10::optional<at::Tensor> bias() override {
return bias_;
}
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias);
bool per_channel() const {
return per_channel_;
}
private:
std::mutex qnnp_mutex_;
#ifdef USE_XNNPACK
xnnpack_operator xnnp_linear_op;
template <typename scalar_t, bool kReluFused>
at::Tensor apply_impl_xnnp(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point);
#endif // USE_XNNPACK
template <bool ReluFused>
at::Tensor apply_impl(
at::Tensor input,
double output_scale,
int64_t output_zero_point);
template <bool ReluFused>
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range);
};
template <int kSpatialDim = 2>
struct PackedConvWeightsQnnp : public ConvPackedParamsBase<kSpatialDim> {
PackedConvWeightsQnnp(
std::unique_ptr<qnnpack::PrePackConvWeights> w,
at::Tensor orig_weight,
at::Tensor bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
bool transpose,
c10::optional<double> input_scale,
std::vector<int64_t> kernel,
at::Tensor w_scale,
std::vector<uint8_t>&& w_zps,
bool is_per_channel)
: w(std::move(w)),
orig_weight(std::move(orig_weight)),
bias(std::move(bias)),
stride_(std::move(stride)),
padding_(std::move(padding)),
output_padding_(std::move(output_padding)),
dilation_(std::move(dilation)),
groups_(groups),
transpose_(transpose),
is_per_channel_(is_per_channel),
input_scale(input_scale),
kernel_(std::move(kernel)),
w_scales(std::move(w_scale)),
w_zero_points(std::move(w_zps)) {
const bool any_padding = std::any_of(
padding_.begin(), padding_.end(), [](const auto& e) { return e != 0; });
const size_t kernel_size =
std::accumulate(kernel_.begin(), kernel_.end(), 1, std::multiplies<>());
const size_t group_input_channels = transpose
? this->orig_weight.size(0) / groups
: this->orig_weight.size(1);
const size_t group_output_channels = transpose
? this->orig_weight.size(1)
: this->orig_weight.size(0) / groups;
const size_t kernel_depth = kSpatialDim == 3 ? kernel_[0] : 1;
const size_t kernel_height = kernel_[kSpatialDim - 2];
const size_t kernel_width = kernel_[kSpatialDim - 1];
pytorch_qnnp_ukernel_type ukernel_type;
if (transpose_) {
ukernel_type = pytorch_qnnp_ukernel_type_conv;
} else {
ukernel_type = pytorch_qnnp_ukernel_type_none;
const bool has_depthwise_dimensions =
(kSpatialDim == 2 &&
((kernel_height == 3 && kernel_width == 3) ||
(kernel_height == 5 && kernel_width == 5))) ||
(kSpatialDim == 3 && kernel_height == 3 && kernel_width == 3 &&
kernel_depth == 3);
const bool has_depthwise_grouping =
group_input_channels == 1 && group_output_channels == 1 && groups > 1;
if (has_depthwise_dimensions && has_depthwise_grouping) {
ukernel_type = pytorch_qnnp_ukernel_type_dwconv;
} else if (
kernel_size == 1 &&
std::all_of(
stride_.begin(),
stride_.end(),
[](const auto& e) { return e == 1; }) &&
!any_padding) {
ukernel_type = group_input_channels >= SIZE_MAX
? pytorch_qnnp_ukernel_type_xzp_gemm
: pytorch_qnnp_ukernel_type_gemm;
} else {
ukernel_type = pytorch_qnnp_ukernel_type_conv;
}
}
if (is_per_channel && ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) {
TORCH_INTERNAL_ASSERT(
false, "Per channel quantized weights are not supported for XZP kernels");
}
pytorch_qnnp_operator_t convolution{nullptr};
// Initially all the params are set to zero.
convolution = static_cast<pytorch_qnnp_operator_t>(
calloc(1, sizeof(struct pytorch_qnnp_operator)));
if (convolution == nullptr) {
TORCH_INTERNAL_ASSERT(
false, "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
}
convolution_op =
std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter>(
convolution);
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
convolution->ukernel_type = ukernel_type;
convolution->groups = groups;
convolution->group_input_channels = group_input_channels;
convolution->group_output_channels = group_output_channels;
convolution->kernel_depth = kernel_depth;
convolution->kernel_height = kernel_height;
convolution->kernel_width = kernel_width;
convolution->stride_depth = kSpatialDim == 3 ? stride_[0] : 1;
convolution->stride_height = stride_[kSpatialDim - 2];
convolution->stride_width = stride_[kSpatialDim - 1];
convolution->dilation_depth = kSpatialDim == 3 ? dilation_[0] : 1;
convolution->dilation_height = dilation_[kSpatialDim - 2];
convolution->dilation_width = dilation_[kSpatialDim - 1];
convolution->input_padding_height = padding_[kSpatialDim - 2];
convolution->input_padding_width = padding_[kSpatialDim - 1];
convolution->input_padding_depth = kSpatialDim == 3 ? padding_[0] : 0;
convolution->per_channel = is_per_channel_;
convolution->transpose = transpose_;
const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
size_t zero_size = sizeof(uint8_t) * k_stride;
size_t zero_offset = 0;
if (transpose_) {
convolution->adjustment_width = output_padding_[1];
convolution->adjustment_height = output_padding_[0];
if (group_input_channels < 8) {
zero_size += 8;
zero_offset = 8;
}
} else {
zero_buffer_size = 0;
if (any_padding) {
zero_size = 0;
zero_offset = 0;
if (ukernel_type == pytorch_qnnp_ukernel_type_dwconv) {
const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
const size_t group_stride = (groups + (cr - 1)) & -cr;
if (groups >= 8) {
zero_size = sizeof(uint8_t) * group_stride;
zero_offset = 0;
} else {
zero_size = sizeof(uint8_t) * group_stride + 8;
zero_offset = sizeof(uint8_t) * 8;
}
} else if (
ukernel_type == pytorch_qnnp_ukernel_type_conv ||
ukernel_type == pytorch_qnnp_ukernel_type_gemm) {
if (group_input_channels >= 8) {
zero_size = sizeof(uint8_t) * k_stride;
zero_offset = 0;
} else {
zero_size = sizeof(uint8_t) * k_stride + 8;
zero_offset = 8;
}
}
}
}
// NOLINTNEXTLINE(clang-analyzer-optin.portability.UnixAPI)
void* zero_buffer = malloc(zero_size);
if (zero_buffer == nullptr) {
pytorch_qnnp_delete_operator(convolution);
TORCH_INTERNAL_ASSERT(
false, "failed to allocate %zu bytes for zero padding",
zero_size);
}
// Need to set to input zero point
// memset(zero_buffer, input_zero_point, zero_size);
zero_buffer_size = zero_size;
convolution->zero_buffer = zero_buffer;
convolution->zero_pointer = (void*)((uintptr_t)zero_buffer + zero_offset);
}
std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter> convolution_op;
#ifdef USE_XNNPACK
xnnpack_operator xnnp_convolution_op;
#endif // USE_XNNPACK
std::unique_ptr<qnnpack::PrePackConvWeights> w;
at::Tensor orig_weight;
at::Tensor bias;
torch::List<int64_t> stride_;
torch::List<int64_t> padding_;
torch::List<int64_t> output_padding_;
torch::List<int64_t> dilation_;
int64_t groups_;
bool transpose_;
bool is_per_channel_;
c10::optional<double> input_scale;
std::vector<int64_t> kernel_;
at::Tensor w_scales;
std::vector<uint8_t> w_zero_points;
std::vector<float> requantization_scales;
size_t zero_buffer_size;
at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range=false) override;
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
bool transpose);
torch::List<int64_t> stride() const override {
return stride_;
}
torch::List<int64_t> padding() const override {
return padding_;
}
torch::List<int64_t> output_padding() const override {
return output_padding_;
}
torch::List<int64_t> dilation() const override {
return dilation_;
}
int64_t groups() const override {
return groups_;
}
bool transpose() const override {
return transpose_;
}
bool per_channel() const {
return is_per_channel_;
}
private:
std::mutex qnnp_mutex_;
template <bool ReluFused>
at::Tensor apply_impl(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point);
#ifdef USE_XNNPACK
template <typename scalar_t, bool ReluFused>
at::Tensor apply_impl_xnnp(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point);
#endif // USE_XNNPACK
};
enum class Activation : uint8_t { NONE = 0, RELU = 1 };
#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
template <class T>
inline float Round(const float x) {
return ::nearbyintf(x);
}
inline double Round(const double x) {
return ::nearbyint(x);
}
#else
template <class T>
inline T Round(const T x) {
return std::nearbyint(x);
}
#endif
template<typename T>
inline T QuantizeValue(float scale, int32_t zero_point, float value) {
const int32_t qmin = std::numeric_limits<T>::min();
const int32_t qmax = std::numeric_limits<T>::max();
auto r = zero_point + static_cast<int32_t>(Round(value / scale));
r = std::max(r, qmin);
r = std::min(r, qmax);
return static_cast<T>(r);
}
template<typename T>
inline std::pair<T, T> activationLimits(
float scale,
int32_t zero_point,
Activation Ac) {
switch (Ac) {
case Activation::NONE:
return {std::numeric_limits<T>::min(),
std::numeric_limits<T>::max()};
case Activation::RELU:
return {QuantizeValue<T>(scale, zero_point, 0.0),
std::numeric_limits<T>::max()};
default:
#ifdef _MSC_VER
__assume(0);
#else
__builtin_unreachable();
#endif
}
}
namespace at {
namespace native {
namespace qnnp_avgpool_helper {
Tensor qnnpack_avg_pool2d(
Tensor input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override);
} // qnnp_avgpool_helper
} // namespace native
} // namespace at
namespace {
C10_UNUSED std::vector<float> generate_requantization_scales(
const at::Tensor& weight_scales,
const float input_scale,
const float output_scale,
std::vector<float>& requant_scales) {
// Since weight scale is allocated with padding
// weight_scales.numel() gives us padded num elements.
const auto num_output_channels_padded = weight_scales.numel();
float *const weight_scales_data = weight_scales.data_ptr<float>();
if (static_cast<int64_t>(requant_scales.size()) < num_output_channels_padded) {
requant_scales.resize(num_output_channels_padded);
}
for (const auto i : c10::irange(num_output_channels_padded)) {
const auto inverse_output_scale = 1.f /output_scale;
requant_scales[i] = (weight_scales_data[i] * input_scale) * inverse_output_scale;
TORCH_CHECK(
(requant_scales[i] > 0.0f && std::isnormal(requant_scales[i])),
"failed to create op with requantization scale: ",
requant_scales[i],
": requantization scale must be finite and positive");
}
return requant_scales;
}
C10_UNUSED std::pair<std::vector<uint8_t>, at::Tensor> make_zero_points_and_scales_tensor(
const at::Tensor& weight_contig,
bool transpose = false,
uint32_t groups = 1
) {
const int out_ch_idx = transpose ? 1 : 0;
const auto num_output_channels = weight_contig.size(out_ch_idx) * (transpose ? groups : 1);
// Add 8 to account for bufferring needed by QNNPACK.
const auto num_output_channels_padded = num_output_channels + kPaddingChannels;
const auto qtype = weight_contig.qscheme();
std::vector<uint8_t> weight_zp(num_output_channels_padded, 0);
// Adjust weight zero point, similar to weight data.
if (qtype == at::kPerTensorAffine) {
for (const auto i : c10::irange(num_output_channels)) {
weight_zp[i] = (uint8_t)(weight_contig.q_zero_point() + 128);
}
} else if (qtype == at::kPerChannelAffine) {
TORCH_CHECK(
weight_contig.q_per_channel_zero_points().scalar_type() == at::kLong,
"Per channel zero points dtype must be long int.");
const int64_t* per_channel_zero_points =
weight_contig.q_per_channel_zero_points().data_ptr<int64_t>();
for (const auto i : c10::irange(num_output_channels)) {
weight_zp[i] = (uint8_t)(per_channel_zero_points[i] + 128);
}
} else {
TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
}
at:: Tensor weight_scales =
at::empty(
{num_output_channels_padded},
at::device(at::kCPU).dtype(at::kFloat));
float *const weight_scales_data = weight_scales.data_ptr<float>();
if (qtype == at::kPerTensorAffine) {
for (const auto i : c10::irange(num_output_channels)) {
weight_scales_data[i] = weight_contig.q_scale();
}
} else if (qtype == at::kPerChannelAffine) {
TORCH_CHECK(
weight_contig.q_per_channel_scales().scalar_type() == at::kDouble,
"Per channel scales dtype must be double.");
const double *const per_channel_scales =
weight_contig.q_per_channel_scales().data_ptr<double>();
for (const auto i : c10::irange(num_output_channels)) {
weight_scales_data[i] = static_cast<float>(per_channel_scales[i]);
}
} else {
TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
}
for (const auto i : c10::irange(num_output_channels, num_output_channels_padded)) {
weight_scales_data[i] = 1.f;
}
return {weight_zp, weight_scales};
}
} // namespace
#endif
| 17,723
| 32.568182
| 93
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/QuantUtils.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/List.h>
#include <ATen/TensorOperators.h>
#include <c10/util/irange.h>
#include <algorithm>
#include <cmath>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/zeros.h>
#endif
namespace quant_utils {
namespace {
float RawUint16ToFp16(unsigned short value) {
// Convert raw 16 bits half precision floating point number
// to single precision floating point number.
const unsigned short sign_bits = value >> 15;
const unsigned short exponent_bits = value >> 10 & 0x1f;
const unsigned short significand_bits = value & 0x3ff;
const float sign = sign_bits ? -1 : 1;
const float significand =
1 + significand_bits * 0.0009765625f; // 0.0009765625f = 0x1p-10 = 2^-10;
const float exponent = exponent_bits - 0xf;
return sign * std::ldexp(significand, exponent);
}
template <typename T>
bool CheckAndSaturate(T max_val, T* element) {
if (*element > max_val) {
*element = max_val;
return true;
}
if (*element < -max_val) {
*element = -max_val;
return true;
}
return false;
}
}
using namespace std;
// A structure to hold quantization parameters 'scale' and 'zero_point'.
// The meaning of these values is as the constants in the quantization equation
//
// real_value = scale * (quantized_value - zero_point)
//
// In other words, 'zero_point' is the quantized value that corresponds
// to the real value 0, and 'scale' is the difference of real values
// corresponding to consecutive quantized values.
struct TensorQuantizationParams {
double scale;
std::int32_t zero_point;
int precision;
};
// Use fp16_min as the small scale cutoff because we don't want to use scales in
// fp16 subnormal range. This is to be consistent with Glow and FakeLowP
// implementation for NNPI.
constexpr float SMALL_SCALE_THRESHOLD = 6.1e-5f;
// Following implementation should be identical to fbgemm::ChooseQuantizationParams
inline TensorQuantizationParams ChooseQuantizationParams(
float min,
float max,
int32_t qmin,
int32_t qmax,
bool preserve_sparsity = false,
bool force_scale_power_of_two = false,
bool reduce_range = false) {
TORCH_CHECK(
min <= max,
"In ChooseQuantizationParams, min should be less than or equal to max");
if (reduce_range) {
qmin = qmin/2;
qmax = qmax/2;
}
if (min < 0 && max > 0 && preserve_sparsity) {
int symmetric_qmin = -((qmax - qmin) / 2 + 1);
int symmetric_qmax = (qmax - qmin) / 2;
double max_scale =
std::max(fabs(min / symmetric_qmin), fabs(max / symmetric_qmax));
min = max_scale * symmetric_qmin;
max = max_scale * symmetric_qmax;
}
// We extend the [min, max] interval to ensure that it contains 0.
// Otherwise, we would not meet the requirement that 0 be an exactly
// representable value.
min = std::min(min, 0.f);
max = std::max(max, 0.f);
TORCH_CHECK(
qmin < qmax,
"In ChooseQuantizationParams, qmin should be less than qmax");
// Use double precision for intermediate computation but use single precision
// in final number to reflect the actual number used during quantization.
double scale = (static_cast<double>(max) - min) / (qmax - qmin);
// If scale is 0 or too small so its reciprocal is infinity, we arbitrary
// adjust the scale to 0.1 . We want to avoid scale's reciprocal being
// infinity because some of fbgemm code pre-computes scale's reciprocal to do
// multiplication instead of division in the time critical part of code.
if (float(scale) == 0.0f || std::isinf(1.0f / float(scale))) {
scale = 0.1;
}
TORCH_CHECK(scale > 0, "quantization scale should be > 0");
if (force_scale_power_of_two) {
if (scale < 1) {
scale = 1.0 / (1 << static_cast<int>(floor(log(1.0 / scale) / log(2))));
} else {
scale = 1 << static_cast<int>(ceil(log(scale) / log(2)));
}
}
// Cut off small scale
if (scale < SMALL_SCALE_THRESHOLD) {
float org_scale = scale;
scale = SMALL_SCALE_THRESHOLD;
// Adjust the min and max based on the new scale
if (min == 0.0f) {
max = SMALL_SCALE_THRESHOLD * (qmax - qmin);
} else if (max == 0.0f) {
min = -SMALL_SCALE_THRESHOLD * (qmax - qmin);
} else {
float amplifier = SMALL_SCALE_THRESHOLD / org_scale;
min *= amplifier;
max *= amplifier;
}
}
// Zero-point computation.
// First the initial floating-point computation. The zero-point can be
// determined from solving an affine equation for any known pair
// (real value, corresponding quantized value).
// We know two such pairs: (rmin, qmin) and (rmax, qmax).
// The arithmetic error on the zero point computed from either pair
// will be roughly machine_epsilon * (sum of absolute values of terms)
// so we want to use the variant that adds the smaller terms.
double zero_point_from_min = qmin - min / static_cast<double>(scale);
double zero_point_from_max = qmax - max / static_cast<double>(scale);
double zero_point_from_min_error =
std::abs(qmin) - std::abs(min / static_cast<double>(scale));
double zero_point_from_max_error =
std::abs(qmax) - std::abs(max / static_cast<double>(scale));
double initial_zero_point =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
// for symmetric quantization (preserve_sparsity == true), we force zero_point
// to be a middle value between qmin and qmax.
// If either min or max is 0, then we just use 0 as zero_point.
if (min < 0 && max > 0 && preserve_sparsity) {
initial_zero_point = static_cast<double>(qmin + qmax) / 2;
}
// Now we need to nudge the zero point to be an integer
// (our zero points are integer, and this is motivated by the requirement
// to be able to represent the real value "0" exactly as a quantized value,
// which is required in multiple places, for example in Im2col with zero
// padding).
int32_t nudged_zero_point = 0;
if (initial_zero_point < qmin) {
nudged_zero_point = qmin;
} else if (initial_zero_point > qmax) {
nudged_zero_point = qmax;
} else {
nudged_zero_point = nearbyint(initial_zero_point);
}
TensorQuantizationParams result;
result.scale = scale;
result.zero_point = nudged_zero_point;
return result;
}
// This function helps to convert the Conv1D dimensions usable by the Conv2d op.
constexpr int64_t kConv1dSqueezeDim = 0;
static C10_UNUSED torch::List<int64_t> MakeArgForConv1d(const torch::List<int64_t>& arg,
int64_t base_value) {
TORCH_CHECK(!arg.empty(), "Argument must have elements.");
torch::List<int64_t> result({arg.get(0), base_value});
if (arg.size() == 1) {
result[1] = arg.get(0);
} else {
result[1] = arg.get(1);
}
result[kConv1dSqueezeDim] = base_value;
return result;
}
// The range for using FP16 quantization of weights requires that the elements
// should be in the range of [5.96e-8, 65504]. If it is out of range, then the
// number will be saturated to max or min representable values by FP16.
inline void HandleWeightsSaturation(int64_t N, float* weight) {
const float kFp16Max = RawUint16ToFp16(0x7BFF);
bool found_out_of_range = false;
for (const auto i : c10::irange(N)) {
bool saturate = CheckAndSaturate<float>(kFp16Max, weight + i);
if (saturate) {
found_out_of_range = true;
}
}
if (found_out_of_range) {
TORCH_WARN("FOUND weight out of range ");
}
}
// Util function for quantizing bias.
inline at::Tensor QuantizeBias(
bool is_per_channel,
const at::Tensor& bias,
const at::Tensor& weight_contig,
double input_scale) {
at::Tensor qbias;
if (is_per_channel) {
auto bias_quant_scales =
weight_contig.q_per_channel_scales() * input_scale;
auto bias_zp = at::zeros(bias_quant_scales.sizes(), c10::kInt);
qbias = at::native::quantize_per_channel(
bias, bias_quant_scales, bias_zp, 0, c10::kQInt32);
} else {
qbias = at::native::quantize_per_tensor(
bias, weight_contig.q_scale() * input_scale, 0, c10::kQInt32);
}
return qbias;
}
} // namespace quant_utils
| 8,361
| 33.841667
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/QuantizedOps.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/IListRef.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/Activation.h>
#include <ATen/native/DispatchStub.h>
namespace at {
namespace native {
using qrelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
using qrelu_leaky_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
const Scalar& /*negval_*/);
using qgelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, GeluType /* approximate */);
using qsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, double output_scale, int64_t output_zero_point);
using qhardsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
using qclamp_fn = void (*)(
const at::Tensor& /*qx*/,
const Scalar& min,
const Scalar& max,
at::Tensor& /*qy*/);
using qclamp_minmax_fn = void (*)(
const at::Tensor& /*qx*/,
const Scalar& /*min or max*/,
at::Tensor& /*qy*/);
using qthreshold_fn = void (*)(
const at::Tensor& /*qx*/,
const Scalar& threshold,
const Scalar& value,
at::Tensor& /*qy*/);
using qtanh_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
using qelu_fn = void(*)(
const at::Tensor& /*qx*/,
const Scalar& /*alpha*/,
const Scalar& /*scale*/,
const Scalar& /*input_scale*/,
at::Tensor& /*qy*/);
using qbinary_fn =
void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Tensor& /*other*/);
using qadd_scalar_fn =
void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Scalar& other /*other*/);
using qhardswish_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
using qdropout_fn = void(*)(
const at::Tensor& /*qx*/,
const Scalar& /*p*/,
bool training /*training*/,
at::Tensor& /*qy*/);
using qmaxpool_2d_fn = void (*)(
const Tensor& qx,
int64_t iC, // input/output channels
int64_t iH,
int64_t iW, // input sizes
int64_t oH,
int64_t oW, // output sizes
int64_t kH,
int64_t kW, // kernel size
int64_t sH,
int64_t sW, // strides
int64_t pH,
int64_t pW, // padding
int64_t dH,
int64_t dW, // dilation
Tensor& qy);
using qmaxpool_3d_fn = void (*)(
const Tensor& qx,
int64_t iC, // input/output channels
int64_t iT,
int64_t iH,
int64_t iW, // input sizes
int64_t oT,
int64_t oH,
int64_t oW, // output sizes
int64_t kT,
int64_t kH,
int64_t kW, // kernel size
int64_t sT,
int64_t sH,
int64_t sW, // strides
int64_t pT,
int64_t pH,
int64_t pW, // padding
int64_t dT,
int64_t dH,
int64_t dW, // dilation
Tensor& qy);
using qadaptive_avg_pool2d_fn = void (*)(
const Tensor& qx,
Tensor& qy,
int64_t sizeB,
int64_t sizeC,
int64_t isizeH,
int64_t isizeW,
int64_t osizeH,
int64_t osizeW,
int64_t istrideB,
int64_t istrideC,
int64_t istrideH,
int64_t istrideW);
using qadaptive_avg_pool3d_fn = void (*)(
const Tensor& qx,
Tensor& qy,
int64_t sizeB,
int64_t sizeC,
int64_t isizeD,
int64_t isizeH,
int64_t isizeW,
int64_t osizeD,
int64_t osizeH,
int64_t osizeW,
int64_t istrideB,
int64_t istrideC,
int64_t istrideD,
int64_t istrideH,
int64_t istrideW);
using qavg_pool2d_fn = void (*)(
const Tensor& qx,
Tensor& qy,
int64_t nBatch,
int64_t nInputPlane,
int64_t inputWidth,
int64_t inputHeight,
int64_t outputWidth,
int64_t outputHeight,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
bool count_include_pad,
c10::optional<int64_t> divisor_override);
using qavg_pool3d_fn = void (*)(
const Tensor& qx,
Tensor& qy,
int64_t nBatch,
int64_t nInputPlane,
int64_t inputWidth,
int64_t inputHeight,
int64_t inputDepth,
int64_t outputWidth,
int64_t outputHeight,
int64_t outputDepth,
int kW,
int kH,
int kD,
int dW,
int dH,
int dD,
int padW,
int padH,
int padD,
bool count_include_pad,
c10::optional<int64_t> divisor_override);
using qupsample_bilinear2d_fn = void (*)(
Tensor& output,
const Tensor& input,
int64_t input_height,
int64_t input_width,
int64_t output_height,
int64_t output_width,
int64_t nbatch,
int64_t channels,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w);
using qcat_nhwc_fn = Tensor (*)(
const MaterializedITensorListRef& qxs,
int64_t dim,
double scale,
int64_t zero_point);
using qtopk_fn = void(*)(Tensor&, Tensor&, const Tensor&, int64_t, int64_t, bool, bool);
using qbatch_norm_fn = void(*)(int64_t, int64_t, int64_t, int64_t, int64_t, const Tensor&, const Tensor&, const Tensor&, Tensor&);
using qnormalize_fn = void (*)(
const Tensor& /* X */,
const Tensor& /* gamma */,
const Tensor& /* beta */,
bool /* affine_per_channel */,
int /* num_channels */,
int /* num_groups */,
int64_t /* M */,
int64_t /* N */,
double /* eps */,
Tensor* /* Y */);
using qmean_inner_dim_fn = void (*)(
const Tensor& /* X */,
OptionalIntArrayRef /* opt_dim */,
bool /* keepdim */,
c10::optional<ScalarType> /* opt_dtype */,
Tensor& /* Y */);
using qstd_inner_dim_fn = void (*)(
const Tensor& /* X */,
OptionalIntArrayRef /* dim */,
const c10::optional<Scalar>& /* correction */,
bool /* keepdim */,
Tensor& /* Y */);
using qnormalize_nhwc_fn = void (*)(
const Tensor& /* X */,
const Tensor& /* gamma */,
const Tensor& /* beta */,
bool /* affine_per_channel */,
int /* num_channels */,
int /* num_groups */,
int64_t /* M */,
int64_t /* N */,
double /* eps */,
Tensor* /* Y */);
using qprelu_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
const Tensor& /*qw*/);
DECLARE_DISPATCH(qadaptive_avg_pool2d_fn, qadaptive_avg_pool2d_nhwc_stub);
DECLARE_DISPATCH(qadaptive_avg_pool3d_fn, qadaptive_avg_pool3d_ndhwc_stub);
DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_relu_stub);
DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_stub);
DECLARE_DISPATCH(qavg_pool2d_fn, qavg_pool2d_nhwc_stub);
DECLARE_DISPATCH(qavg_pool3d_fn, qavg_pool3d_nhwc_stub);
DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_relu_stub);
DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_stub);
DECLARE_DISPATCH(qbinary_fn, qadd_relu_stub);
DECLARE_DISPATCH(qbinary_fn, qadd_stub);
DECLARE_DISPATCH(qbinary_fn, qmul_relu_stub);
DECLARE_DISPATCH(qbinary_fn, qmul_stub);
DECLARE_DISPATCH(qcat_nhwc_fn, qcat_nhwc_stub);
DECLARE_DISPATCH(qcat_nhwc_fn, qcat_relu_nhwc_stub);
DECLARE_DISPATCH(qclamp_fn, qclamp_stub);
DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_min_stub);
DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_max_stub);
DECLARE_DISPATCH(qelu_fn, qelu_stub);
DECLARE_DISPATCH(qhardsigmoid_fn, qhardsigmoid_stub);
DECLARE_DISPATCH(qhardswish_fn, qhardswish_stub);
DECLARE_DISPATCH(qdropout_fn, qdropout_stub);
DECLARE_DISPATCH(qmaxpool_2d_fn, qmaxpool_2d_nhwc_stub);
DECLARE_DISPATCH(qmaxpool_3d_fn, qmaxpool_3d_nthwc_stub);
DECLARE_DISPATCH(qnormalize_fn, quantized_normalize_stub);
DECLARE_DISPATCH(qnormalize_nhwc_fn, quantized_groupnorm_nhwc_stub);
DECLARE_DISPATCH(qrelu_fn, qrelu_stub);
DECLARE_DISPATCH(qrelu_leaky_fn, qrelu_leaky_stub);
DECLARE_DISPATCH(qgelu_fn, qgelu_stub);
DECLARE_DISPATCH(qsigmoid_fn, qsigmoid_stub);
DECLARE_DISPATCH(qtanh_fn, qtanh_stub);
DECLARE_DISPATCH(qthreshold_fn, qthreshold_stub);
DECLARE_DISPATCH(qtopk_fn, qtopk_stub);
DECLARE_DISPATCH(qupsample_bilinear2d_fn, qupsample_bilinear2d_nhwc_stub);
DECLARE_DISPATCH(qmean_inner_dim_fn, qmean_inner_dim_stub);
DECLARE_DISPATCH(qstd_inner_dim_fn, qstd_inner_dim_stub);
DECLARE_DISPATCH(qprelu_fn, qprelu_stub);
} // namespace native
} // namespace at
| 7,861
| 29.355212
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/XnnpackUtils.h
|
#pragma once
#ifdef USE_XNNPACK
#include <cstdint>
#include <ATen/core/Tensor.h>
#include <ATen/native/xnnpack/Common.h>
using xnnpack_operator = at::native::xnnpack::Operator;
namespace at {
namespace native {
namespace xnnp_utils {
/*
* Return shape in the same order as the memory format
* e.g. channels_last will return NHWC instead of NCHW
*/
std::vector<size_t> get_mem_format_aware_shape(const at::Tensor& in);
/*
* Input is always int8_t, output can be [int8_t, uint8_t].
* input + offset = output
* int8_t + 128 = uint8_t
* int8_t + 0 = int8_t
*/
template <typename PT>
void q8_copy_int8_weight_and_add_offset(const at::Tensor& in, at::Tensor& out);
template <int kSpatialDim>
Tensor convert_conv_weights_to_channel_last_tensor(
const at::Tensor& src,
int groups,
bool transpose);
/*
* Series of create wrapper functions to call xnn_create_[de]conv* functions.
*/
C10_ALWAYS_INLINE
enum xnn_status xnnp_create_convolution2d_nhwc(
uint32_t pad_top,
uint32_t pad_right,
uint32_t pad_bottom,
uint32_t pad_left,
uint32_t kernel_h,
uint32_t kernel_w,
uint32_t stride_h,
uint32_t stride_w,
uint32_t dilation_h,
uint32_t dilation_w,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t ip_chan_stride,
size_t op_chan_stride,
int8_t izp,
float ip_scale,
int8_t kzp,
const float* k_scales,
const int8_t* kernel,
const int32_t* bias,
int8_t ozp,
float op_scale,
int8_t op_min,
int8_t op_max,
uint32_t flags,
xnn_operator_t* op,
bool per_channel,
bool transpose) {
/* Symmetric quantization forces kzp = 0 */
TORCH_CHECK(!kzp, "XNNPACK Q[SC]8 conv kernels expects kernel zero point to be zero."
"But got: ", kzp);
if (transpose) {
TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!");
return xnn_create_deconvolution2d_nhwc_qs8(
pad_top, /* uint32_t output_padding_top */
pad_right, /* uint32_t output_padding_right */
pad_bottom, /* uint32_t output_padding_bottom */
pad_left, /* uint32_t output_padding_left */
kernel_h, /* uint32_t kernel_height */
kernel_w, /* uint32_t kernel_width */
stride_h, /* uint32_t stride_height */
stride_w, /* uint32_t stride_width */
dilation_h, /* uint32_t dilation_height */
dilation_w, /* uint32_t dilation_width */
groups, /* uint32_t groups */
group_input_channels, /* size_t group_input_channels */
group_output_channels, /* size_t group_output_channels */
ip_chan_stride, /* size_t input_pixel_stride */
op_chan_stride, /* size_t output_pixel_stride */
izp, /* int8_t input_zero_point */
ip_scale, /* float input_scale */
k_scales[0], /* float kernel_scale */
kernel, /* const int8_t* kernel */
bias, /* const int32_t* bias */
ozp, /* int8_t output_zero_point */
op_scale, /* float output_scale */
op_min, /* int8_t output_min */
op_max, /* int8_t output_max */
flags, /* uint32_t flags */
nullptr, /* xnn_caches_t caches */
op); /* xnn_operator_t* deconvolution_op_out */
}
if (!per_channel) {
return xnn_create_convolution2d_nhwc_qs8(
pad_top, /* uint32_t input_padding_top */
pad_right, /* uint32_t input_padding_right */
pad_bottom, /* uint32_t input_padding_bottom */
pad_left, /* uint32_t input_padding_left */
kernel_h, /* uint32_t kernel_height */
kernel_w, /* uint32_t kernel_width */
stride_h, /* uint32_t subsampling_height */
stride_w, /* uint32_t subsampling_width */
dilation_h, /* uint32_t dilation_height */
dilation_w, /* uint32_t dilation_width */
groups, /* uint32_t groups */
group_input_channels, /* size_t group_input_channels */
group_output_channels, /* size_t group_output_channels*/
ip_chan_stride, /* size_t input_channel_stride */
op_chan_stride, /* size_t output_channel_stride */
izp, /* int8_t input_zero_point */
ip_scale, /* float input_scale */
k_scales[0], /* float kernel_scale */
kernel, /* const int8_t* kernel */
bias, /* const int32_t* bias */
ozp, /* int8_t output_zero_point */
op_scale, /* float output_scale */
op_min, /* int8_t output_min */
op_max, /* int8_t output_max */
flags, /* uint32_t flags */
nullptr, /* xnn_caches_t caches */
op); /* xnn_operator_t* convolution_op_out */
} else { /* per_channel */
return xnn_create_convolution2d_nhwc_qc8(
pad_top, /* uint32_t input_padding_top */
pad_right, /* uint32_t input_padding_right */
pad_bottom, /* uint32_t input_padding_bottom */
pad_left, /* uint32_t input_padding_left */
kernel_h, /* uint32_t kernel_height */
kernel_w, /* uint32_t kernel_width */
stride_h, /* uint32_t subsampling_height */
stride_w, /* uint32_t subsampling_width */
dilation_h, /* uint32_t dilation_height */
dilation_w, /* uint32_t dilation_width */
groups, /* uint32_t groups */
group_input_channels, /* size_t group_input_channels */
group_output_channels, /* size_t group_output_channels*/
ip_chan_stride, /* size_t input_channel_stride */
op_chan_stride, /* size_t output_channel_stride */
izp, /* int8_t input_zero_point */
ip_scale, /* float input_scale */
k_scales, /* const float* kernel_scale */
kernel, /* const int8_t* kernel */
bias, /* const int32_t* bias */
ozp, /* int8_t output_zero_point */
op_scale, /* float output_scale */
op_min, /* int8_t output_min */
op_max, /* int8_t output_max */
flags, /* uint32_t flags */
nullptr, /* xnn_caches_t caches */
op); /* xnn_operator_t* convolution_op_out */
}
}
/*
* Series of setup wrapper functions to call xnn_setup_[de]conv* functions.
*/
C10_ALWAYS_INLINE
enum xnn_status xnnp_setup_convolution2d_nhwc(
xnn_operator_t op,
size_t batch,
size_t in_h,
size_t in_w,
const int8_t* inp,
int8_t* outp,
pthreadpool_t pt_pool,
bool per_channel = false,
bool transpose = false,
uint32_t adj_h = 0,
uint32_t adj_w = 0) {
if(transpose) {
TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!");
return xnn_setup_deconvolution2d_nhwc_qs8(
op, /* xnn_operator_t deconvolution_op */
batch, /* size_t batch_size */
in_h, /* size_t input_height */
in_w, /* size_t input_width */
adj_h, /* uint32_t adjustment_height */
adj_w, /* uint32_t adjustment_width */
inp, /* const int8_t* input */
outp, /* int8_t* output */
pt_pool); /* pthreadpool_t threadpool */
}
if (!per_channel) {
return xnn_setup_convolution2d_nhwc_qs8(
op, /* xnn_operator_t convolution_op */
batch, /* size_t batch_size */
in_h, /* size_t input_height */
in_w, /* size_t input_width */
inp, /* const int8_t* input */
outp, /* int8_t* output */
pt_pool); /* pthreadpool_t threadpool */
} else { /* per_channel */
return xnn_setup_convolution2d_nhwc_qc8(
op, /* xnn_operator_t convolution_op */
batch, /* size_t batch_size */
in_h, /* size_t input_height */
in_w, /* size_t input_width */
inp, /* const int8_t* input */
outp, /* int8_t* output */
pt_pool); /* pthreadpool_t threadpool */
}
}
/*
* Series of wrapper functions to call xnn_create* and xnn_setup*
* functions for linear
*/
C10_ALWAYS_INLINE
enum xnn_status xnnp_create_fully_connected_nc(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
int8_t input_zero_point,
float input_scale,
int8_t kernel_zero_point,
float kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_operator_t* fully_connected_op_out) {
/* Symmetric quantization forces kzp = 0 */
TORCH_CHECK(!kernel_zero_point, "XNNPACK QS8 linear kernel expects kernel zero point to be zero."
"But got: ", kernel_zero_point);
return xnn_create_fully_connected_nc_qs8(
input_channels, /* size_t input_channels */
output_channels, /* size_t output_channels */
input_stride, /* size_t input_stride */
output_stride, /* size_t output_stride */
input_zero_point, /* int8_t input_zero_point */
input_scale, /* float input_scale */
kernel_scale, /* float kernel_scale */
kernel, /* const int8_t* kernel */
bias, /* const int32_t* bias */
output_zero_point, /* int8_t output_zero_point */
output_scale, /* float output_scale */
output_min, /* int8_t output_min */
output_max, /* int8_t output_max */
flags, /* uint32_t flags */
nullptr, /* xnn_caches_t caches */
fully_connected_op_out); /* xnn_operator_t* fully_connected_op_out */
}
C10_ALWAYS_INLINE
enum xnn_status xnnp_setup_fully_connected_nc(
xnn_operator_t fully_connected_op,
size_t batch_size,
const int8_t* input,
int8_t* output,
pthreadpool_t threadpool) {
return xnn_setup_fully_connected_nc_qs8(
fully_connected_op, /* xnn_operator_t fully_connected_op */
batch_size, /* size_t batch_size */
input, /* const int8_t* input */
output, /* int8_t* output */
threadpool); /* pthreadpool_t threadpool */
}
} // namespace xnnp_utils
} // namespace native
} // namespace at
#endif // USE_XNNPACK
| 12,149
| 41.78169
| 99
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/conv_serialization.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/List.h>
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
#include <ATen/native/quantized/cpu/OnednnUtils.h>
#include <c10/util/irange.h>
#include <cpuinfo.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/from_blob.h>
#endif
#include <tuple>
/* Convolution prepacked parameters serialization.
*
* Version 1
*
* - Fields:
* 1. weight
* 2. bias
* 3. stride x kSpatialDim
* 4. padding x kSpatialDim
* 5. dilation x kSpatialDim
* 6. groups
*
* Version 2
*
* - Fields:
* 0. version (string)
* 1. list of non-optional tensors
* 0: packed parameters (int16_t)
* - kSpatialDim
* - stride x kSpatialDim
* - padding x kSpatialDim
* - dilation x kSpatialDim
* - output_padding x kSpatialDim
* - groups
* - transpose (0 or 1)
* 1: weight
* 2. list of optional tensors
* 0: bias
*
* Version 3
*
* - Fields:
* 0. version (int64_t)
* 1. list of int64_t configuration values
* - kSpatialDim
* - stride x kSpatialDim
* - padding x kSpatialDim
* - dilation x kSpatialDim
* - output_padding x kSpatialDim
* - groups
* - flags (bitmask)
* - (1 << 0) transpose (1 = yes)
* 2. list of optional tensors
* 0: None (helps with type inference)
* 1: weight (this must be present)
* 2: bias
*/
using ConvParamsSerializationTypeV2 = std::tuple<
// version, for versions 2 and up
std::string,
// non-optional tensors
std::vector<at::Tensor>,
// optional tensors
std::vector<c10::optional<at::Tensor>>>;
using ConvParamsSerializationTypeV3 = std::tuple<
// version, int for versions 3 and up
int64_t,
// configuration values
std::vector<int64_t>,
// optional tensors
std::vector<c10::optional<at::Tensor>>>;
// Parses any historical conv packed params format into
// the current format.
template <uint32_t kSpatialDim>
ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) {
// determine the version based on IValue contents
int version = -1;
if (v.isTuple()) {
const auto& elements = v.toTupleRef().elements();
if (!elements.empty()) {
auto firstElement = elements[0];
if (firstElement.isTensor()) {
version = 1;
} else if (firstElement.isString()) {
const std::string& version_str = firstElement.toStringRef();
// note: not parsing the string to automatically handle bad
// inputs
if (version_str == "2") {
version = 2;
}
} else if (firstElement.isInt()) {
auto raw_version = firstElement.toInt();
if (raw_version == 3) {
version = 3;
}
}
}
}
TORCH_INTERNAL_ASSERT(version != -1, "Unable to parse serialization version");
if (version == 1) {
// version 1 - convert to version 3 manually
const auto& elements = v.toTupleRef().elements();
at::Tensor weight = elements[0].toTensor();
c10::optional<at::Tensor> bias = elements[1].toOptional<at::Tensor>();
torch::List<at::Tensor> stride_x_kSpatialDim = elements[2].toTensorList();
torch::List<at::Tensor> padding_x_kSpatialDim = elements[3].toTensorList();
torch::List<at::Tensor> dilation_x_kSpatialDim = elements[4].toTensorList();
at::Tensor groups = elements[5].toTensor();
std::vector<int64_t> config_vals;
config_vals.reserve(
stride_x_kSpatialDim.size() + padding_x_kSpatialDim.size() +
dilation_x_kSpatialDim.size() + kSpatialDim + 3);
config_vals.push_back(kSpatialDim);
for (const auto i : c10::irange(stride_x_kSpatialDim.size())) {
auto stride = stride_x_kSpatialDim.get(i);
config_vals.push_back(stride[0].item<int16_t>());
}
for (const auto i : c10::irange(padding_x_kSpatialDim.size())) {
auto padding = padding_x_kSpatialDim.get(i);
config_vals.push_back(padding[0].item<int16_t>());
}
for (const auto i : c10::irange(dilation_x_kSpatialDim.size())) {
auto dilation = dilation_x_kSpatialDim.get(i);
config_vals.push_back(dilation[0].item<int16_t>());
}
// output_padding does not exist in v1, so we fill in a default value
for (const auto i : c10::irange(kSpatialDim)) {
(void)i; // Suppress unused variable
config_vals.push_back(0);
}
config_vals.push_back(groups[0].item<int16_t>());
// transpose does not exist in v1, so we fill in a default value
config_vals.push_back(0);
std::vector<c10::optional<at::Tensor>> tensors;
tensors.emplace_back();
tensors.emplace_back(weight);
tensors.emplace_back(bias);
int64_t version = 3;
return std::tie(version, config_vals, tensors);
} else if (version == 2) {
// version 2
const auto& elements = v.toTupleRef().elements();
std::vector<at::Tensor> non_optional = elements[1].toTensorList().vec();
std::vector<c10::optional<at::Tensor>> optional;
if (elements[2].isTensorList()) {
for (const auto& elem : elements[2].toTensorList()) {
optional.emplace_back(static_cast<at::Tensor>(elem));
}
} else {
for (const auto& elem : elements[2].toList()) {
optional.emplace_back(static_cast<c10::IValue>(elem).toOptional<at::Tensor>());
}
}
auto config_a = non_optional[0].accessor<int16_t, 1>();
std::vector<int64_t> config_vals;
config_vals.reserve(config_a.size(0));
for (const auto i : c10::irange(config_a.size(0))) {
config_vals.emplace_back(config_a[i]);
}
auto weight = non_optional[1];
auto bias = optional[0];
std::vector<c10::optional<at::Tensor>> tensors;
tensors.emplace_back();
tensors.emplace_back(weight);
tensors.emplace_back(bias);
int64_t version = 3;
return std::tie(version, config_vals, tensors);
} else if (version == 3) {
return v.to<ConvParamsSerializationTypeV3>();
} else {
TORCH_INTERNAL_ASSERT(false, "Unexpected serialized qconv version: ",
version);
}
}
#define QCONV_SERIALIZATION_VERSION 2
#if QCONV_SERIALIZATION_VERSION == 2
using ConvParamsSerializationType = ConvParamsSerializationTypeV2;
template <uint32_t kSpatialDim>
ConvParamsSerializationTypeV2 serialize_conv(
const c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>>& params) {
std::string version = "2";
std::vector<at::Tensor> non_optional;
std::vector<c10::optional<at::Tensor>> optional;
// create a packed int8_t tensor for conv params
std::vector<int16_t> params_vec;
params_vec.push_back(kSpatialDim);
auto stride = params->stride().vec();
params_vec.insert(params_vec.end(), stride.begin(), stride.end());
auto padding = params->padding().vec();
params_vec.insert(params_vec.end(), padding.begin(), padding.end());
auto dilation = params->dilation().vec();
params_vec.insert(params_vec.end(), dilation.begin(), dilation.end());
auto output_padding = params->output_padding().vec();
params_vec.insert(params_vec.end(), output_padding.begin(),
output_padding.end());
params_vec.push_back(params->groups());
params_vec.push_back(params->transpose());
int64_t vec_size = params_vec.size();
at::Tensor params_tensor = at::from_blob(
params_vec.data(), {vec_size},
at::TensorOptions().dtype(at::kShort))
// clone to retain ownership of the data
.clone();
at::Tensor weight;
c10::optional<at::Tensor> bias;
std::tie(weight, bias) = params->unpack();
non_optional.emplace_back(std::move(params_tensor));
non_optional.emplace_back(std::move(weight));
optional.emplace_back(std::move(bias));
return std::tie(version, non_optional, optional);
}
#elif QCONV_SERIALIZATION_VERSION == 3
using ConvParamsSerializationType = ConvParamsSerializationTypeV3;
template <uint32_t kSpatialDim>
ConvParamsSerializationTypeV3 serialize_conv(
const c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>>& params) {
std::vector<int64_t> config_vals;
config_vals.push_back(kSpatialDim);
auto stride = params->stride().vec();
config_vals.insert(config_vals.end(), stride.begin(), stride.end());
auto padding = params->padding().vec();
config_vals.insert(config_vals.end(), padding.begin(), padding.end());
auto dilation = params->dilation().vec();
config_vals.insert(config_vals.end(), dilation.begin(), dilation.end());
auto output_padding = params->output_padding().vec();
config_vals.insert(config_vals.end(), output_padding.begin(),
output_padding.end());
config_vals.push_back(params->groups());
config_vals.push_back(params->transpose());
at::Tensor weight;
c10::optional<at::Tensor> bias;
std::tie(weight, bias) = params->unpack();
std::vector<c10::optional<at::Tensor>> tensors;
tensors.emplace_back();
tensors.emplace_back(weight);
tensors.emplace_back(bias);
int64_t version = 3;
return std::tie(version, config_vals, tensors);
}
#else
#error "Invalid qconv serialization version."
#endif
template <uint32_t kSpatialDim>
c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
ConvParamsSerializationTypeV3 state) {
int64_t version;
std::vector<int64_t> config_vals;
std::vector<c10::optional<at::Tensor>> tensors;
std::tie(version, config_vals, tensors) = state;
TORCH_INTERNAL_ASSERT(version == 3, "Unexpected serialized qconv version: ", version);
TORCH_CHECK(tensors.size() == 3, "Wrong number of tensors", tensors.size());
c10::optional<at::Tensor> weight = tensors[1];
c10::optional<at::Tensor> bias = tensors[2];
TORCH_INTERNAL_ASSERT(weight, "Weight should always be present in serialized qconv.");
torch::List<int64_t> stride, padding, output_padding, dilation;
// skip kSpatialDim
int idx = 1;
for (const auto i : c10::irange(kSpatialDim)) {
(void)i; // Suppress unused variable
stride.emplace_back(config_vals.at(idx));
idx++;
}
for (const auto i : c10::irange(kSpatialDim)) {
(void)i; // Suppress unused variable
padding.emplace_back(config_vals.at(idx));
idx++;
}
for (const auto i : c10::irange(kSpatialDim)) {
(void)i; // Suppress unused variable
dilation.emplace_back(config_vals.at(idx));
idx++;
}
for (const auto i : c10::irange(kSpatialDim)) {
(void)i; // Suppress unused variable
TORCH_INTERNAL_ASSERT(idx < static_cast<int64_t>(config_vals.size()),
"Unexpected index = ", idx, " for config_vals of size ",
config_vals.size());
output_padding.emplace_back(config_vals.at(idx));
idx++;
}
int64_t groups = config_vals.at(idx);
idx++;
int64_t flags = config_vals.at(idx);
idx++;
TORCH_INTERNAL_ASSERT(idx == static_cast<int64_t>(config_vals.size()),
"Unexpected length of config_vals, expected ",
idx,
" got ",
config_vals.size());
bool transpose = flags & (1 << 0);
int64_t other_flags = flags & ~(1 << 0);
TORCH_INTERNAL_ASSERT(other_flags == 0, "Unexpected flags set in ", flags, ".");
auto& ctx = at::globalContext();
#ifdef USE_FBGEMM
if (ctx.qEngine() == at::QEngine::X86) {
#if AT_MKLDNN_ENABLED()
bool use_onednn = onednn_utils::should_use_onednn_quant(
weight.value(), transpose, groups, output_padding);
if (use_onednn) {
return PackedConvWeightsOnednn<kSpatialDim>::prepack(
weight.value(),
bias,
stride,
padding,
output_padding,
dilation,
groups,
transpose
);
}
#endif
return PackedConvWeight<kSpatialDim>::prepack(
weight.value(),
bias,
stride,
padding,
output_padding,
dilation,
groups,
transpose
);
} // x86
#endif
#ifdef USE_FBGEMM
if (ctx.qEngine() == at::QEngine::FBGEMM) {
return PackedConvWeight<kSpatialDim>::prepack(
weight.value(),
bias,
stride,
padding,
output_padding,
dilation,
groups,
transpose
);
}
#endif // USE_FBGEMM
#ifdef USE_PYTORCH_QNNPACK
if (ctx.qEngine() == at::QEngine::QNNPACK) {
TORCH_CHECK(
kSpatialDim == 2,
"prepack/__setstate__: QNNPACK only supports Conv2d "
"now.");
return PackedConvWeightsQnnp<kSpatialDim>::prepack(
weight.value(),
bias,
stride,
padding,
output_padding,
dilation,
groups,
transpose
);
}
#endif // USE_PYTORCH_QNNPACK
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
return PackedConvWeightsOnednn<kSpatialDim>::prepack(
weight.value(),
bias,
stride,
padding,
output_padding,
dilation,
groups,
transpose
);
}
#endif // AT_MKLDNN_ENABLED()
TORCH_CHECK(
false,
"Didn't find engine for when deserializing ConvPackedParams: ",
toString(ctx.qEngine()));
}
| 12,861
| 29.406619
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/fbgemm_utils.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/native/quantized/PackedParams.h>
#include <ATen/native/quantized/cpu/EmbeddingPackedParams.h>
#include <c10/core/QScheme.h>
#include <c10/util/irange.h>
#ifdef USE_FBGEMM
#include <fbgemm/Fbgemm.h>
#include <fbgemm/FbgemmFP16.h>
#include <fbgemm/QuantUtils.h>
// The struct for the packed weight matrix (PackBMatrix) and the corresponding
// column offsets used for the fully connect layer, which are both prepared in
// the prepacking step to save the computations in the inference. Note the
// column offsets include the sum of the B columns as well as the scalar term
// B_zero_point * K, whereas the row offsets created by
// PackAWithQuantRowOffset/PackAWithIm2Col/PackAWithRowOffset are only the sum
// of the A rows. The column offsets are needed for the asymmetric quantization
// (affine quantization) of input matrix.
// Note that in JIT mode we can think of a way to fuse col_offsets with bias.
struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase {
PackedLinearWeight(
std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w,
c10::optional<at::Tensor> bias,
std::vector<int32_t> col_offsets,
std::vector<float> w_scale,
std::vector<int32_t> w_zp,
c10::QScheme q_scheme)
: w(std::move(w)),
bias_(std::move(bias)),
col_offsets(std::move(col_offsets)),
w_scale(std::move(w_scale)),
w_zp(std::move(w_zp)),
q_scheme(std::move(q_scheme)) {}
std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w;
c10::optional<at::Tensor> bias_;
std::vector<int32_t> col_offsets;
std::vector<float> w_scale;
std::vector<int32_t> w_zp;
c10::QScheme q_scheme;
at::Tensor apply(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
at::Tensor input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor& apply_out(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point,
at::Tensor& output) override;
at::Tensor& apply_relu_out(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point,
at::Tensor& output) override;
at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
at::Tensor input,
double input_scale,
int64_t input_zero_point) override;
at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
at::Tensor input,
double input_scale,
int64_t input_zero_point) override;
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
override;
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
override;
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
c10::optional<at::Tensor> bias() override {
return bias_;
}
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias);
private:
template <bool ReluFused>
at::Tensor& apply_impl(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point,
at::Tensor& output);
template <bool ReluFused>
at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32_impl(
const at::Tensor& input,
double input_scale,
int64_t input_zero_point);
template <bool ReluFused>
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range = false);
};
struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
PackedLinearWeightFp16(
std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w,
c10::optional<at::Tensor> bias)
: w(std::move(w)), bias_(std::move(bias)) {}
std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w;
c10::optional<at::Tensor> bias_;
at::Tensor apply(
at::Tensor /*input*/,
double /*output_scale*/,
int64_t /*output_zero_point*/) override {
TORCH_INTERNAL_ASSERT(false);
}
at::Tensor apply_relu(
at::Tensor /*input*/,
double /*output_scale*/,
int64_t /*output_zero_point*/) override {
TORCH_INTERNAL_ASSERT(false);
}
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
override;
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
override;
at::Tensor& apply_dynamic_out(
const at::Tensor& input,
at::Tensor& output,
bool reduce_range = false) override;
at::Tensor& apply_dynamic_relu_out(
const at::Tensor& input,
at::Tensor& output,
bool reduce_range = false) override;
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
c10::optional<at::Tensor> bias() override {
return bias_;
}
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias);
void set_bias(c10::optional<at::Tensor> bias) override;
private:
template <bool ReluFused>
at::Tensor& apply_dynamic_impl(const at::Tensor& input, at::Tensor& output);
};
template <int kSpatialDim = 2>
struct TORCH_API PackedConvWeight : public ConvPackedParamsBase<kSpatialDim> {
PackedConvWeight(
std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w,
c10::optional<at::Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
uint8_t transpose,
std::vector<int32_t> col_offsets,
std::vector<int64_t> kernel,
std::vector<float> w_scale,
std::vector<int32_t> w_zp,
c10::QScheme q_scheme)
: w(std::move(w)),
bias(std::move(bias)),
stride_(std::move(stride)),
padding_(std::move(padding)),
output_padding_(std::move(output_padding)),
dilation_(std::move(dilation)),
groups_(groups),
transpose_(transpose),
col_offsets(std::move(col_offsets)),
kernel(std::move(kernel)),
w_scale(std::move(w_scale)),
w_zp(std::move(w_zp)),
q_scheme(q_scheme) {}
std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w;
c10::optional<at::Tensor> bias;
torch::List<int64_t> stride_;
torch::List<int64_t> padding_;
torch::List<int64_t> output_padding_;
torch::List<int64_t> dilation_;
int64_t groups_;
uint8_t transpose_;
std::vector<int32_t> col_offsets;
std::vector<int64_t> kernel;
std::vector<float> w_scale;
std::vector<int32_t> w_zp;
c10::QScheme q_scheme;
at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range) override;
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
at::Tensor weight,
c10::optional<at::Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
bool transpose);
const float* GetBiasData(at::Tensor* bias);
void GetQuantizationParams(
float act_scale,
float out_scale,
std::vector<float>* output_multiplier_float,
std::vector<float>* act_times_w_scale);
torch::List<int64_t> stride() const override {
return stride_;
}
torch::List<int64_t> padding() const override {
return padding_;
}
torch::List<int64_t> output_padding() const override {
return output_padding_;
}
torch::List<int64_t> dilation() const override {
return dilation_;
}
int64_t groups() const override {
return groups_;
}
bool transpose() const override {
return (bool)transpose_;
}
private:
template <bool ReluFused>
at::Tensor apply_impl(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point);
};
// PackWeight: Convert the weight from uint8 to int8.
inline void convert_uint8_int8(
int len,
const uint8_t* src_uint8,
int8_t* dst_int8) {
for (const auto i : c10::irange(len)) {
dst_int8[i] = static_cast<int8_t>(static_cast<int32_t>(src_uint8[i]) - 128);
}
}
// UnpackWeight: Convert the weight from int8 to uint8.
inline void convert_int8_uint8(
int len,
const int8_t* src_int8,
uint8_t* dst_uint8) {
for (const auto i : c10::irange(len)) {
dst_uint8[i] =
static_cast<uint8_t>(static_cast<int32_t>(src_int8[i]) + 128);
}
}
namespace at {
namespace native {
namespace fbgemm_utils {
template <int kSpatialDim = 2>
fbgemm::conv_param_t<kSpatialDim> MakeFbgemmConvParam(
int N,
int C,
int M,
const std::vector<int>& image_shape,
int groups,
const std::vector<int>& kernels,
const std::vector<int>& strides,
const std::vector<int>& pads,
const std::vector<int>& dilations,
const std::vector<int>& output_padding = std::vector<int>(kSpatialDim, 0),
bool transposed = false);
// TODO: Remove functions below when ChannelsLast3d is ready.
Tensor MakeStridedQTensorCPU(
const IntArrayRef& sizes,
const IntArrayRef& strides,
const TensorOptions& options,
QuantizerPtr quantizer);
Tensor MakeEmptyAffineQuantizedChannelsLast3dTensor(
int64_t N,
int64_t C,
int64_t D,
int64_t H,
int64_t W,
const TensorOptions& options,
double scale,
int64_t zero_point);
Tensor MakeEmptyPerChannelAffineQuantizedChannelsLast3dTensor(
int64_t N,
int64_t C,
int64_t D,
int64_t H,
int64_t W,
const TensorOptions& options,
const Tensor& scales,
const Tensor& zero_points);
Tensor ConvertToChannelsLast3dTensor(const Tensor& src);
template <int kSpatialDim = 2>
Tensor TransposeConvTensorUnpackConversion(const Tensor& src, int groups);
template <int kSpatialDim>
Tensor ConvertConvWeightsToChannelLastTensor(
const at::Tensor& src,
int groups,
bool transpose);
} // namespace fbgemm_utils
} // namespace native
} // namespace at
#endif // USE_FBGEMM
struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
PackedEmbeddingBagWeight(
at::Tensor packed_w,
std::vector<float> w_scale,
std::vector<float> w_zp,
int64_t bit_rate,
c10::QScheme q_scheme,
int64_t version)
: packed_w(std::move(packed_w)),
w_scale(std::move(w_scale)),
w_zp(std::move(w_zp)),
bit_rate_(bit_rate),
q_scheme(q_scheme),
version_(version) {
// NOLINTNEXTLINE(clang-analyzer-cplusplus.Move)
if (!packed_w.is_contiguous()) {
packed_w = packed_w.contiguous();
}
}
at::Tensor packed_w;
std::vector<float> w_scale;
std::vector<float> w_zp;
int64_t bit_rate_;
c10::QScheme q_scheme;
int64_t version_;
at::Tensor unpack() override;
static c10::intrusive_ptr<EmbeddingPackedParamsBase> prepack(
at::Tensor weight);
int64_t bit_rate() const override {
return bit_rate_;
}
int64_t version() const override {
return version_;
}
at::Tensor embeddingbag_byte(
const at::Tensor& indices,
const c10::optional<at::Tensor>& offsets,
bool pruned_weights,
const c10::optional<at::Tensor>& per_sample_weights_,
const c10::optional<at::Tensor>& compressed_indices_mapping,
bool include_last_offset,
bool is_embedding_op) override;
at::Tensor embeddingbag_4bit(
const at::Tensor& indices,
const c10::optional<at::Tensor>& offsets,
bool pruned_weights,
const c10::optional<at::Tensor>& per_sample_weights_,
const c10::optional<at::Tensor>& compressed_indices_mapping,
bool include_last_offset,
bool is_embedding_op) override;
};
| 11,912
| 27.915049
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qembeddingbag.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <cstdint>
namespace at {
namespace native {
Tensor& embedding_bag_byte_rowwise_offsets_out(
Tensor& output,
const Tensor& weight,
const Tensor& indices,
const c10::optional<Tensor>& offsets_in,
const bool /* scale_grad_by_freq */,
const int64_t /* mode */,
bool pruned_weights,
const c10::optional<Tensor>& per_sample_weights_,
const c10::optional<Tensor>& compressed_indices_mapping,
bool include_last_offset);
Tensor& embedding_bag_4bit_rowwise_offsets_out(
Tensor& output,
const Tensor& weight,
const Tensor& indices,
const c10::optional<Tensor>& offsets_in,
const bool /* scale_grad_by_freq */,
const int64_t /* mode */,
bool pruned_weights,
const c10::optional<Tensor>& per_sample_weights_,
const c10::optional<Tensor>& compressed_indices_mapping,
bool include_last_offset);
Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weight);
} // native
} // at
| 1,020
| 28.171429
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/deps/clog/include/clog.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <inttypes.h>
#include <stdarg.h>
#include <stdlib.h>
#define CLOG_NONE 0
#define CLOG_FATAL 1
#define CLOG_ERROR 2
#define CLOG_WARNING 3
#define CLOG_INFO 4
#define CLOG_DEBUG 5
#ifndef CLOG_VISIBILITY
#if defined(__ELF__)
#define CLOG_VISIBILITY __attribute__((__visibility__("internal")))
#elif defined(__MACH__)
#define CLOG_VISIBILITY __attribute__((__visibility__("hidden")))
#else
#define CLOG_VISIBILITY
#endif
#endif
#ifndef CLOG_ARGUMENTS_FORMAT
#if defined(__GNUC__)
#define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
#else
#define CLOG_ARGUMENTS_FORMAT
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
CLOG_VISIBILITY void clog_vlog_debug(
const char* module,
const char* format,
va_list args);
CLOG_VISIBILITY void clog_vlog_info(
const char* module,
const char* format,
va_list args);
CLOG_VISIBILITY void clog_vlog_warning(
const char* module,
const char* format,
va_list args);
CLOG_VISIBILITY void clog_vlog_error(
const char* module,
const char* format,
va_list args);
CLOG_VISIBILITY void clog_vlog_fatal(
const char* module,
const char* format,
va_list args);
#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \
CLOG_ARGUMENTS_FORMAT \
inline static void log_debug_function_name(const char* format, ...) { \
if (level >= CLOG_DEBUG) { \
va_list args; \
va_start(args, format); \
clog_vlog_debug(module, format, args); \
va_end(args); \
} \
}
#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \
CLOG_ARGUMENTS_FORMAT \
inline static void log_info_function_name(const char* format, ...) { \
if (level >= CLOG_INFO) { \
va_list args; \
va_start(args, format); \
clog_vlog_info(module, format, args); \
va_end(args); \
} \
}
#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \
CLOG_ARGUMENTS_FORMAT \
inline static void log_warning_function_name(const char* format, ...) { \
if (level >= CLOG_WARNING) { \
va_list args; \
va_start(args, format); \
clog_vlog_warning(module, format, args); \
va_end(args); \
} \
}
#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \
CLOG_ARGUMENTS_FORMAT \
inline static void log_error_function_name(const char* format, ...) { \
if (level >= CLOG_ERROR) { \
va_list args; \
va_start(args, format); \
clog_vlog_error(module, format, args); \
va_end(args); \
} \
}
#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \
CLOG_ARGUMENTS_FORMAT \
inline static void log_fatal_function_name(const char* format, ...) { \
if (level >= CLOG_FATAL) { \
va_list args; \
va_start(args, format); \
clog_vlog_fatal(module, format, args); \
va_end(args); \
} \
abort(); \
}
#ifdef __cplusplus
} /* extern "C" */
#endif
| 4,900
| 38.524194
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/deps/clog/src/clog.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <unistd.h>
#endif
#ifdef __ANDROID__
#include <android/log.h>
#endif
#ifndef CLOG_LOG_TO_STDIO
#ifdef __ANDROID__
#define CLOG_LOG_TO_STDIO 0
#else
#define CLOG_LOG_TO_STDIO 1
#endif
#endif
#include <clog.h>
/* Messages up to this size are formatted entirely on-stack, and don't allocate
* heap memory */
#define CLOG_STACK_BUFFER_SIZE 1024
#define CLOG_FATAL_PREFIX "Fatal error: "
#define CLOG_FATAL_PREFIX_LENGTH 13
#define CLOG_FATAL_PREFIX_FORMAT "Fatal error in %s: "
#define CLOG_ERROR_PREFIX "Error: "
#define CLOG_ERROR_PREFIX_LENGTH 7
#define CLOG_ERROR_PREFIX_FORMAT "Error in %s: "
#define CLOG_WARNING_PREFIX "Warning: "
#define CLOG_WARNING_PREFIX_LENGTH 9
#define CLOG_WARNING_PREFIX_FORMAT "Warning in %s: "
#define CLOG_INFO_PREFIX "Note: "
#define CLOG_INFO_PREFIX_LENGTH 6
#define CLOG_INFO_PREFIX_FORMAT "Note (%s): "
#define CLOG_DEBUG_PREFIX "Debug: "
#define CLOG_DEBUG_PREFIX_LENGTH 7
#define CLOG_DEBUG_PREFIX_FORMAT "Debug (%s): "
#define CLOG_SUFFIX_LENGTH 1
void clog_vlog_fatal(const char* module, const char* format, va_list args) {
#if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO
__android_log_vprint(ANDROID_LOG_FATAL, module, format, args);
#else
char stack_buffer[CLOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a
* second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
int prefix_chars = CLOG_FATAL_PREFIX_LENGTH;
if (module == NULL) {
memcpy(stack_buffer, CLOG_FATAL_PREFIX, CLOG_FATAL_PREFIX_LENGTH);
} else {
prefix_chars = snprintf(
stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_FATAL_PREFIX_FORMAT, module);
if (prefix_chars < 0) {
/* Format error in prefix (possible if prefix is modified): skip prefix
* and continue as if nothing happened. */
prefix_chars = 0;
}
}
int format_chars;
if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) {
/*
* Prefix + suffix alone would overflow the on-stack buffer, thus need to
* use on-heap buffer. Do not even try to format the string into on-stack
* buffer.
*/
format_chars = vsnprintf(NULL, 0, format, args);
} else {
format_chars = vsnprintf(
&stack_buffer[prefix_chars],
CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH,
format,
args);
}
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH >
CLOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
if (heap_buffer == NULL) {
goto cleanup;
}
if (prefix_chars > CLOG_STACK_BUFFER_SIZE) {
/* Prefix didn't fit into on-stack buffer, re-format it again to on-heap
* buffer */
snprintf(
heap_buffer,
prefix_chars + 1 /* for '\0'-terminator */,
CLOG_FATAL_PREFIX_FORMAT,
module);
} else {
/* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */
memcpy(heap_buffer, stack_buffer, prefix_chars);
}
vsnprintf(
heap_buffer + prefix_chars,
format_chars + CLOG_SUFFIX_LENGTH,
format,
args_copy);
out_buffer = heap_buffer;
}
out_buffer[prefix_chars + format_chars] = '\n';
#ifdef _WIN32
DWORD bytes_written;
WriteFile(
GetStdHandle(STD_ERROR_HANDLE),
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH,
&bytes_written,
NULL);
#else
write(
STDERR_FILENO,
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
#endif
cleanup:
free(heap_buffer);
va_end(args_copy);
#endif
}
void clog_vlog_error(const char* module, const char* format, va_list args) {
#if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO
__android_log_vprint(ANDROID_LOG_ERROR, module, format, args);
#else
char stack_buffer[CLOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a
* second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
int prefix_chars = CLOG_ERROR_PREFIX_LENGTH;
if (module == NULL) {
memcpy(stack_buffer, CLOG_ERROR_PREFIX, CLOG_ERROR_PREFIX_LENGTH);
} else {
prefix_chars = snprintf(
stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_ERROR_PREFIX_FORMAT, module);
if (prefix_chars < 0) {
/* Format error in prefix (possible if prefix is modified): skip prefix
* and continue as if nothing happened. */
prefix_chars = 0;
}
}
int format_chars;
if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) {
/*
* Prefix + suffix alone would overflow the on-stack buffer, thus need to
* use on-heap buffer. Do not even try to format the string into on-stack
* buffer.
*/
format_chars = vsnprintf(NULL, 0, format, args);
} else {
format_chars = vsnprintf(
&stack_buffer[prefix_chars],
CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH,
format,
args);
}
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH >
CLOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
if (heap_buffer == NULL) {
goto cleanup;
}
if (prefix_chars > CLOG_STACK_BUFFER_SIZE) {
/* Prefix didn't fit into on-stack buffer, re-format it again to on-heap
* buffer */
snprintf(
heap_buffer,
prefix_chars + 1 /* for '\0'-terminator */,
CLOG_ERROR_PREFIX_FORMAT,
module);
} else {
/* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */
memcpy(heap_buffer, stack_buffer, prefix_chars);
}
vsnprintf(
heap_buffer + prefix_chars,
format_chars + CLOG_SUFFIX_LENGTH,
format,
args_copy);
out_buffer = heap_buffer;
}
out_buffer[prefix_chars + format_chars] = '\n';
#ifdef _WIN32
DWORD bytes_written;
WriteFile(
GetStdHandle(STD_ERROR_HANDLE),
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH,
&bytes_written,
NULL);
#else
write(
STDERR_FILENO,
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
#endif
cleanup:
free(heap_buffer);
va_end(args_copy);
#endif
}
void clog_vlog_warning(const char* module, const char* format, va_list args) {
#if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO
__android_log_vprint(ANDROID_LOG_WARN, module, format, args);
#else
char stack_buffer[CLOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a
* second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
int prefix_chars = CLOG_WARNING_PREFIX_LENGTH;
if (module == NULL) {
memcpy(stack_buffer, CLOG_WARNING_PREFIX, CLOG_WARNING_PREFIX_LENGTH);
} else {
prefix_chars = snprintf(
stack_buffer,
CLOG_STACK_BUFFER_SIZE,
CLOG_WARNING_PREFIX_FORMAT,
module);
if (prefix_chars < 0) {
/* Format error in prefix (possible if prefix is modified): skip prefix
* and continue as if nothing happened. */
prefix_chars = 0;
}
}
int format_chars;
if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) {
/*
* Prefix + suffix alone would overflow the on-stack buffer, thus need to
* use on-heap buffer. Do not even try to format the string into on-stack
* buffer.
*/
format_chars = vsnprintf(NULL, 0, format, args);
} else {
format_chars = vsnprintf(
&stack_buffer[prefix_chars],
CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH,
format,
args);
}
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH >
CLOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
if (heap_buffer == NULL) {
goto cleanup;
}
if (prefix_chars > CLOG_STACK_BUFFER_SIZE) {
/* Prefix didn't fit into on-stack buffer, re-format it again to on-heap
* buffer */
snprintf(
heap_buffer,
prefix_chars + 1 /* for '\0'-terminator */,
CLOG_WARNING_PREFIX_FORMAT,
module);
} else {
/* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */
memcpy(heap_buffer, stack_buffer, prefix_chars);
}
vsnprintf(
heap_buffer + prefix_chars,
format_chars + CLOG_SUFFIX_LENGTH,
format,
args_copy);
out_buffer = heap_buffer;
}
out_buffer[prefix_chars + format_chars] = '\n';
#ifdef _WIN32
DWORD bytes_written;
WriteFile(
GetStdHandle(STD_ERROR_HANDLE),
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH,
&bytes_written,
NULL);
#else
write(
STDERR_FILENO,
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
#endif
cleanup:
free(heap_buffer);
va_end(args_copy);
#endif
}
void clog_vlog_info(const char* module, const char* format, va_list args) {
#if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO
__android_log_vprint(ANDROID_LOG_INFO, module, format, args);
#else
char stack_buffer[CLOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a
* second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
int prefix_chars = CLOG_INFO_PREFIX_LENGTH;
if (module == NULL) {
memcpy(stack_buffer, CLOG_INFO_PREFIX, CLOG_INFO_PREFIX_LENGTH);
} else {
prefix_chars = snprintf(
stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_INFO_PREFIX_FORMAT, module);
if (prefix_chars < 0) {
/* Format error in prefix (possible if prefix is modified): skip prefix
* and continue as if nothing happened. */
prefix_chars = 0;
}
}
int format_chars;
if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) {
/*
* Prefix + suffix alone would overflow the on-stack buffer, thus need to
* use on-heap buffer. Do not even try to format the string into on-stack
* buffer.
*/
format_chars = vsnprintf(NULL, 0, format, args);
} else {
format_chars = vsnprintf(
&stack_buffer[prefix_chars],
CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH,
format,
args);
}
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH >
CLOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
if (heap_buffer == NULL) {
goto cleanup;
}
if (prefix_chars > CLOG_STACK_BUFFER_SIZE) {
/* Prefix didn't fit into on-stack buffer, re-format it again to on-heap
* buffer */
snprintf(
heap_buffer,
prefix_chars + 1 /* for '\0'-terminator */,
CLOG_INFO_PREFIX_FORMAT,
module);
} else {
/* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */
memcpy(heap_buffer, stack_buffer, prefix_chars);
}
vsnprintf(
heap_buffer + prefix_chars,
format_chars + CLOG_SUFFIX_LENGTH,
format,
args_copy);
out_buffer = heap_buffer;
}
out_buffer[prefix_chars + format_chars] = '\n';
#ifdef _WIN32
DWORD bytes_written;
WriteFile(
GetStdHandle(STD_OUTPUT_HANDLE),
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH,
&bytes_written,
NULL);
#else
write(
STDOUT_FILENO,
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
#endif
cleanup:
free(heap_buffer);
va_end(args_copy);
#endif
}
void clog_vlog_debug(const char* module, const char* format, va_list args) {
#if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO
__android_log_vprint(ANDROID_LOG_DEBUG, module, format, args);
#else
char stack_buffer[CLOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a
* second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
int prefix_chars = CLOG_DEBUG_PREFIX_LENGTH;
if (module == NULL) {
memcpy(stack_buffer, CLOG_DEBUG_PREFIX, CLOG_DEBUG_PREFIX_LENGTH);
} else {
prefix_chars = snprintf(
stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_DEBUG_PREFIX_FORMAT, module);
if (prefix_chars < 0) {
/* Format error in prefix (possible if prefix is modified): skip prefix
* and continue as if nothing happened. */
prefix_chars = 0;
}
}
int format_chars;
if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) {
/*
* Prefix + suffix alone would overflow the on-stack buffer, thus need to
* use on-heap buffer. Do not even try to format the string into on-stack
* buffer.
*/
format_chars = vsnprintf(NULL, 0, format, args);
} else {
format_chars = vsnprintf(
&stack_buffer[prefix_chars],
CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH,
format,
args);
}
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH >
CLOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
if (heap_buffer == NULL) {
goto cleanup;
}
if (prefix_chars > CLOG_STACK_BUFFER_SIZE) {
/* Prefix didn't fit into on-stack buffer, re-format it again to on-heap
* buffer */
snprintf(
heap_buffer,
prefix_chars + 1 /* for '\0'-terminator */,
CLOG_DEBUG_PREFIX_FORMAT,
module);
} else {
/* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */
memcpy(heap_buffer, stack_buffer, prefix_chars);
}
vsnprintf(
heap_buffer + prefix_chars,
format_chars + CLOG_SUFFIX_LENGTH,
format,
args_copy);
out_buffer = heap_buffer;
}
out_buffer[prefix_chars + format_chars] = '\n';
#ifdef _WIN32
DWORD bytes_written;
WriteFile(
GetStdHandle(STD_OUTPUT_HANDLE),
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH,
&bytes_written,
NULL);
#else
write(
STDOUT_FILENO,
out_buffer,
prefix_chars + format_chars + CLOG_SUFFIX_LENGTH);
#endif
cleanup:
free(heap_buffer);
va_end(args_copy);
#endif
}
| 15,834
| 29.161905
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/include/pack_block_sparse.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cassert>
#include <cstdint>
#include <memory>
#include <vector>
#ifndef _WIN32
#include <qnnpack/AlignedAllocator.h>
#endif
#include <pytorch_qnnpack.h>
#include <qnnpack/common.h>
#include <qnnpack/math.h>
#ifdef QNNPACK_BCSRMATRIX_DEBUG
#include <iostream>
#endif // QNNPACK_BCSRMATRIX_DEBUG
namespace qnnpack {
template <typename T>
struct OwnedOrBorrowedVector {
using VECTOR_T =
#ifndef _WIN32
std::vector<T, AlignedAllocator<T, 16>>;
#else
std::vector<T>;
#endif
// Only one of owned_vec_data_ or borrowed_tuple_data_ will be meaningfully
// populated.
// A union could potentially be used here to reduce memory usage.
// std::variant is not used here because it causes internal build errors
// due to incompatibility.
VECTOR_T owned_vec_data_;
std::tuple<T*, uint32_t> borrowed_tuple_data_;
bool owned;
VECTOR_T& vector() {
assert(owned);
return owned_vec_data_;
}
uint32_t size() const {
if (owned) {
return owned_vec_data_.size();
} else {
return std::get<1>(borrowed_tuple_data_);
}
}
const T* data() const {
if (owned) {
return owned_vec_data_.data();
} else {
return std::get<0>(borrowed_tuple_data_);
}
}
const T& operator[](int i) const {
return data()[i];
}
OwnedOrBorrowedVector() : owned(true) {}
OwnedOrBorrowedVector(T* data_ptr, const uint32_t size)
: borrowed_tuple_data_(std::tuple<T*, uint32_t>(data_ptr, size)),
owned(false) {}
};
struct BCSRMatrix {
OwnedOrBorrowedVector<uint8_t> values;
uint32_t col_block_size; // input features block size
uint32_t row_block_size; // output features block size
enum pytorch_qnnp_sparse_matrix_indices_dtype indices_dtype;
virtual ~BCSRMatrix() = default;
// Return void for the data ptrs because it doesn't require knowing the
// underlying TypedBCSRMatrix indices dtype and that's how it's passed
// into the qnnpack fully connected sparse op
virtual const void* col_indices_data_ptr() const = 0;
virtual const void* row_values_data_ptr() const = 0;
#ifdef QNNPACK_BCSRMATRIX_DEBUG
virtual void print() const = 0;
#endif // QNNPACK_BCSRMATRIX_DEBUG
/*
* Unpack from BCSR to Dense
* - Each value and zero point converted to int8_t by subtracting 128
* - num_rows and num_cols are dimensions of dense weight tensor
* - dst should be able to hold num_rows * num_cols elements
* - zero_points should hold num_rows zero points
*/
virtual void unpack(
int8_t* dst,
const int64_t num_rows,
const int64_t num_cols,
const uint8_t* zero_points) const = 0;
virtual uint32_t max_index() const = 0;
};
template <typename INDICES_DTYPE>
struct TypedBCSRMatrix : BCSRMatrix {
OwnedOrBorrowedVector<INDICES_DTYPE> col_indices;
OwnedOrBorrowedVector<INDICES_DTYPE> row_values;
TypedBCSRMatrix();
const void* col_indices_data_ptr() const override;
const void* row_values_data_ptr() const override;
#ifdef QNNPACK_BCSRMATRIX_DEBUG
void print() const override;
#endif // QNNPACK_BCSRMATRIX_DEBUG
void unpack(
int8_t* dst,
const int64_t num_rows,
const int64_t num_cols,
const uint8_t* zero_points) const override;
uint32_t max_index() const override;
~TypedBCSRMatrix() override = default;
};
template <typename INDICES_DTYPE>
std::unique_ptr<BCSRMatrix> generateBlockCSRMatrix(
const uint8_t* a,
const size_t N,
const size_t K,
const uint32_t row_block_size,
const uint32_t col_block_size,
const uint8_t* zero_points) {
assert(K > 0);
std::unique_ptr<TypedBCSRMatrix<INDICES_DTYPE>> bcsr_mat =
std::make_unique<TypedBCSRMatrix<INDICES_DTYPE>>();
auto& row_values = bcsr_mat->row_values.vector();
auto& col_indices = bcsr_mat->col_indices.vector();
auto& values = bcsr_mat->values.vector();
const uint32_t num_row_blocks = (N + row_block_size - 1) / row_block_size;
// K must be > 0
const uint32_t num_col_blocks = (K + col_block_size - 1) / col_block_size;
row_values.reserve(num_row_blocks);
uint32_t num_nnz_blocks{0};
row_values.push_back(num_nnz_blocks);
for (uint32_t i = 0; i < num_row_blocks; ++i) {
for (uint32_t j = 0; j < num_col_blocks; ++j) {
bool block_zero{true};
for (uint32_t ib = 0; ib < row_block_size; ++ib) {
uint32_t row_index = i * row_block_size + ib;
if PYTORCH_QNNP_UNLIKELY(row_index >= N) {
break;
}
for (uint32_t jb = 0; jb < col_block_size; ++jb) {
uint32_t col_index = j * col_block_size + jb;
if PYTORCH_QNNP_UNLIKELY(col_index >= K) {
goto block_scanned;
}
if (*(a + row_index * K + col_index) != zero_points[row_index]) {
block_zero = false;
goto block_scanned;
}
}
}
block_scanned:
if (!block_zero) {
col_indices.push_back(j);
num_nnz_blocks++;
for (uint32_t ib = 0; ib < row_block_size; ++ib) {
uint32_t row_index = i * row_block_size + ib;
if PYTORCH_QNNP_UNLIKELY(row_index >= N) {
for (; row_index < (num_row_blocks * row_block_size); row_index++) {
for (uint32_t jb = 0; jb < col_block_size; ++jb) {
values.push_back(zero_points[N-1]);
}
}
break;
}
for (uint32_t jb = 0; jb < col_block_size; ++jb) {
uint32_t col_index = j * col_block_size + jb;
if PYTORCH_QNNP_UNLIKELY(col_index >= K) {
values.push_back(zero_points[row_index]);
} else {
uint8_t val = *(a + row_index * K + col_index);
values.push_back(val);
}
}
}
}
}
row_values.push_back(num_nnz_blocks);
}
bcsr_mat->row_block_size = row_block_size;
bcsr_mat->col_block_size = col_block_size;
return bcsr_mat;
}
template <typename INDICES_DTYPE>
std::unique_ptr<BCSRMatrix> generateBlockCSRMatrix(
INDICES_DTYPE* col_indices,
INDICES_DTYPE* row_values,
uint8_t* values,
const int64_t col_indices_size,
const int64_t row_values_size,
const int64_t values_size,
const int64_t row_block_size,
const int64_t col_block_size) {
std::unique_ptr<TypedBCSRMatrix<INDICES_DTYPE>> bcsr_mat =
std::make_unique<TypedBCSRMatrix<INDICES_DTYPE>>();
bcsr_mat->col_indices =
OwnedOrBorrowedVector<INDICES_DTYPE>(col_indices, col_indices_size);
bcsr_mat->row_values =
OwnedOrBorrowedVector<INDICES_DTYPE>(row_values, row_values_size);
bcsr_mat->values = OwnedOrBorrowedVector<uint8_t>(values, values_size);
bcsr_mat->row_block_size = row_block_size;
bcsr_mat->col_block_size = col_block_size;
return bcsr_mat;
}
template <typename INDICES_DTYPE>
struct IndicesDtypeEnumTrait {
static_assert(
sizeof(INDICES_DTYPE) == 0,
"Invalid dtype for IndicesDtypeEnumTrait");
};
template <>
struct IndicesDtypeEnumTrait<uint32_t> {
const static pytorch_qnnp_sparse_matrix_indices_dtype dtype =
pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t;
};
template <>
struct IndicesDtypeEnumTrait<uint16_t> {
const static pytorch_qnnp_sparse_matrix_indices_dtype dtype =
pytorch_qnnp_sparse_matrix_indices_dtype_uint16_t;
};
template <>
struct IndicesDtypeEnumTrait<uint8_t> {
const static pytorch_qnnp_sparse_matrix_indices_dtype dtype =
pytorch_qnnp_sparse_matrix_indices_dtype_uint8_t;
};
template <typename INDICES_DTYPE>
TypedBCSRMatrix<INDICES_DTYPE>::TypedBCSRMatrix() {
indices_dtype = IndicesDtypeEnumTrait<INDICES_DTYPE>::dtype;
}
template <typename INDICES_DTYPE>
const void* TypedBCSRMatrix<INDICES_DTYPE>::col_indices_data_ptr() const {
return static_cast<const void*>(col_indices.data());
}
template <typename INDICES_DTYPE>
const void* TypedBCSRMatrix<INDICES_DTYPE>::row_values_data_ptr() const {
return static_cast<const void*>(row_values.data());
}
#ifdef QNNPACK_BCSRMATRIX_DEBUG
template <typename INDICES_DTYPE>
void TypedBCSRMatrix<INDICES_DTYPE>::print() const {
std::cout << "row block size:" << row_block_size << std::endl;
std::cout << "col block size:" << col_block_size << std::endl;
std::cout << "row ptr\n";
std::cout
<< "indices dtype: uint"
<< static_cast<
std::underlying_type_t<pytorch_qnnp_sparse_matrix_indices_dtype>>(
indices_dtype)
<< "_t" << std::endl;
for (uint32_t i = 0; i < row_values.size(); i++) {
std::cout << (uint32_t)row_values[i] << ", ";
}
std::cout << std::endl;
std::cout << "col indices\n";
for (uint32_t i = 0; i < col_indices.size(); i++) {
std::cout << (uint32_t)col_indices[i] << ", ";
}
std::cout << std::endl;
std::cout << "Actual values\n";
for (uint32_t i = 0; i < values.size(); i++) {
std::cout << (uint32_t)values[i] << ", ";
}
std::cout << std::endl;
}
#endif // QNNPACK_BCSRMATRIX_DEBUG
template <typename INDICES_DTYPE>
void TypedBCSRMatrix<INDICES_DTYPE>::unpack(
int8_t* dst,
const int64_t num_rows,
const int64_t num_cols,
const uint8_t* zero_points) const {
for (int64_t i = 0; i < num_rows; i++) {
memset(
dst + i * num_cols,
static_cast<int8_t>(static_cast<int16_t>(zero_points[i]) - 128),
num_cols * sizeof(int8_t));
}
const int64_t num_block_rows = static_cast<int64_t>(row_values.size()) - 1;
const int64_t block_size = (int64_t)row_block_size * col_block_size;
int64_t weight_values_num = 0;
for (int64_t block_row_num = 0; block_row_num < num_block_rows;
block_row_num++) {
const int64_t num_blocks_in_current_block_row =
row_values[block_row_num + 1] - row_values[block_row_num];
for (int64_t k = 0; k < num_blocks_in_current_block_row;
k++) { // iterate over each block in the row
const int64_t block_start_row_num = block_row_num * row_block_size;
const int64_t block_start_col_num =
(int64_t)(col_indices[weight_values_num / block_size]) *
col_block_size;
for (int64_t l = 0; l < block_size;
l++) { // iterate over each value in the block
const int64_t row_num = block_start_row_num + l / col_block_size;
const int64_t col_num = block_start_col_num + l % col_block_size;
if (row_num < num_rows && col_num < num_cols) {
dst[row_num * num_cols + col_num] = static_cast<int8_t>(
static_cast<int16_t>(values[weight_values_num]) - 128);
}
weight_values_num++;
}
}
}
}
template <typename INDICES_DTYPE>
uint32_t TypedBCSRMatrix<INDICES_DTYPE>::max_index() const {
return static_cast<uint32_t>(std::max(
*std::max_element(
row_values.data(), row_values.data() + row_values.size()),
*std::max_element(
col_indices.data(), col_indices.data() + col_indices.size())));
}
/**
* Given a BCSRMatrix (bcsr_) and a block of code enclosed in { }
* (dispatch_body), run the block of code with the following in scope
* 1) The BCSRMatrix's underlying TypedBCSRMatrix, called typed_bcsr
* 2) The TypedBCSRMatrix's indices data type, called INDICES_DTYPE
*/
#define QNNPACK_BCSRMATRIX_DISPATCH_INDICES_DTYPE(bcsr_, dispatch_body) \
[&bcsr = bcsr_]() { \
switch (bcsr->indices_dtype) { \
case pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t: { \
using INDICES_DTYPE = uint32_t; \
const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>* typed_bcsr = \
static_cast<const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>*>( \
bcsr.get()); \
return [&typed_bcsr]() dispatch_body(); \
} \
case pytorch_qnnp_sparse_matrix_indices_dtype_uint16_t: { \
using INDICES_DTYPE = uint16_t; \
const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>* typed_bcsr = \
static_cast<const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>*>( \
bcsr.get()); \
return [&typed_bcsr]() dispatch_body(); \
} \
case pytorch_qnnp_sparse_matrix_indices_dtype_uint8_t: { \
using INDICES_DTYPE = uint8_t; \
const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>* typed_bcsr = \
static_cast<const qnnpack::TypedBCSRMatrix<INDICES_DTYPE>*>( \
bcsr.get()); \
return [&typed_bcsr]() dispatch_body(); \
} \
case pytorch_qnnp_sparse_matrix_indices_dtype_invalid: { \
assert(false); \
} \
} \
/* Throw exception to avoid the following errors: */ \
/* - "non-void lambda does not return a value in all control paths" */ \
/* - "control reaches end of non-void function" */ \
/* Throwing exception from within invalid case alone does not fix these */ \
throw std::invalid_argument( \
"Invalid indices dtype in QNNPACK_BCSRMATRIX_DISPATCH_INDICES_DTYPE"); \
}()
} // namespace qnnpack
| 14,150
| 35.851563
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/include/pytorch_qnnpack.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <pthreadpool.h>
#include <qnnpack/log.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Status code for any QNNPACK function call.
*/
enum pytorch_qnnp_status {
/** The call succeeded, and all output arguments now contain valid data. */
pytorch_qnnp_status_success = 0,
pytorch_qnnp_status_uninitialized = 1,
pytorch_qnnp_status_invalid_parameter = 2,
pytorch_qnnp_status_unsupported_parameter = 3,
pytorch_qnnp_status_unsupported_hardware = 4,
pytorch_qnnp_status_out_of_memory = 5,
};
enum pytorch_qnnp_sparse_matrix_indices_dtype {
pytorch_qnnp_sparse_matrix_indices_dtype_invalid = 0,
pytorch_qnnp_sparse_matrix_indices_dtype_uint8_t = 8,
pytorch_qnnp_sparse_matrix_indices_dtype_uint16_t = 16,
pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t = 32,
};
enum pytorch_qnnp_status pytorch_qnnp_initialize(void);
enum pytorch_qnnp_status pytorch_qnnp_deinitialize(void);
typedef struct pytorch_qnnp_operator* pytorch_qnnp_operator_t;
enum pytorch_qnnp_status pytorch_qnnp_create_convolution2d_nhwc_q8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
bool per_channel,
pytorch_qnnp_operator_t* convolution);
enum pytorch_qnnp_status pytorch_qnnp_create_convolution3d_ndhwc_q8(
uint32_t input_padding_depth,
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t kernel_depth,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_depth,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_depth,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
bool per_channel,
pytorch_qnnp_operator_t* convolution);
enum pytorch_qnnp_status pytorch_qnnp_setup_convolution2d_nhwc_q8(
pytorch_qnnp_operator_t convolution,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_setup_convolution_ndhwc_q8(
pytorch_qnnp_operator_t convolution,
size_t batch_size,
size_t input_depth,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_create_deconvolution2d_nhwc_q8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
pytorch_qnnp_operator_t* deconvolution);
enum pytorch_qnnp_status pytorch_qnnp_setup_deconvolution2d_nhwc_q8(
pytorch_qnnp_operator_t deconvolution,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_create_fully_connected_nc_q8(
size_t input_channels,
size_t output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
pytorch_qnnp_operator_t* fully_connected);
enum pytorch_qnnp_status pytorch_qnnp_create_fully_connected_sparse_dq_nc_q8(
size_t input_channels,
size_t output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const void* kernel_col_indices,
const void* kernel_row_values,
const uint8_t* kernel_values,
const uint32_t kernel_row_block_size,
const uint32_t kernel_col_block_size,
enum pytorch_qnnp_sparse_matrix_indices_dtype kernel_indices_dtype,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
bool use_prepack_kernel,
pytorch_qnnp_operator_t* fully_connected);
enum pytorch_qnnp_status pytorch_qnnp_setup_fully_connected_nc_q8(
pytorch_qnnp_operator_t fully_connected,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_setup_fully_connected_sparse_dq_nc_q8(
pytorch_qnnp_operator_t fully_connected,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
const float* bias,
float* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_global_average_pooling_nwc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* global_average_pooling);
enum pytorch_qnnp_status pytorch_qnnp_setup_global_average_pooling_nwc_q8(
pytorch_qnnp_operator_t global_average_pooling,
size_t batch_size,
size_t width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_average_pooling2d_nhwc_q8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* average_pooling);
enum pytorch_qnnp_status pytorch_qnnp_setup_average_pooling2d_nhwc_q8(
pytorch_qnnp_operator_t average_pooling,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_create_max_pooling2d_nhwc_u8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
size_t channels,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* max_pooling);
enum pytorch_qnnp_status pytorch_qnnp_setup_max_pooling2d_nhwc_u8(
pytorch_qnnp_operator_t max_pooling,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_create_channel_shuffle_nc_x8(
size_t groups,
size_t group_channels,
uint32_t flags,
pytorch_qnnp_operator_t* channel_shuffle);
enum pytorch_qnnp_status pytorch_qnnp_setup_channel_shuffle_nc_x8(
pytorch_qnnp_operator_t channel_shuffle,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_add_nc_q8(
size_t channels,
uint8_t a_zero_point,
float a_scale,
uint8_t b_zero_point,
float b_scale,
uint8_t sum_zero_point,
float sum_scale,
uint8_t sum_min,
uint8_t sum_max,
uint32_t flags,
pytorch_qnnp_operator_t* add);
enum pytorch_qnnp_status pytorch_qnnp_setup_add_nc_q8(
pytorch_qnnp_operator_t add,
size_t batch_size,
const uint8_t* a,
size_t a_stride,
const uint8_t* b,
size_t b_stride,
uint8_t* sum,
size_t sum_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_clamp_nc_u8(
size_t channels,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* clamp);
enum pytorch_qnnp_status pytorch_qnnp_setup_clamp_nc_u8(
pytorch_qnnp_operator_t clamp,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_sigmoid_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* sigmoid);
enum pytorch_qnnp_status pytorch_qnnp_setup_sigmoid_nc_q8(
pytorch_qnnp_operator_t sigmoid,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_leaky_relu_nc_q8(
size_t channels,
float negative_slope,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* leaky_relu);
enum pytorch_qnnp_status pytorch_qnnp_setup_leaky_relu_nc_q8(
pytorch_qnnp_operator_t leaky_relu,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_softargmax_nc_q8(
size_t channels,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint32_t flags,
pytorch_qnnp_operator_t* softargmax);
enum pytorch_qnnp_status pytorch_qnnp_setup_softargmax_nc_q8(
pytorch_qnnp_operator_t softargmax,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_tanh_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* tanh);
enum pytorch_qnnp_status pytorch_qnnp_setup_tanh_nc_q8(
pytorch_qnnp_operator_t tanh,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_hardsigmoid_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* hardsigmoid);
enum pytorch_qnnp_status pytorch_qnnp_setup_hardsigmoid_nc_q8(
pytorch_qnnp_operator_t hardsigmoid,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_create_hardswish_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* hardswish);
enum pytorch_qnnp_status pytorch_qnnp_setup_hardswish_nc_q8(
pytorch_qnnp_operator_t hardswish,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride);
enum pytorch_qnnp_status pytorch_qnnp_run_operator(
pytorch_qnnp_operator_t op,
pthreadpool_t threadpool);
enum pytorch_qnnp_status pytorch_qnnp_delete_operator(
pytorch_qnnp_operator_t op);
#ifdef __cplusplus
} /* extern "C" */
#endif
| 13,055
| 27.568928
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/include/qnnpack_func.h
|
#pragma once
#include <cstdlib>
#include <qnnpack/operator.h>
namespace qnnpack {
class PrePackConvWeights final {
public:
PrePackConvWeights(
const pytorch_qnnp_operator_t convolution,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias);
void* getPackedWeights() const
{
return packed_weights_;
}
int64_t getOutputChannels() const
{
return output_channels_;
}
~PrePackConvWeights()
{
if (packed_weights_ != nullptr) {
free(packed_weights_);
}
}
PrePackConvWeights() = delete;
PrePackConvWeights(const PrePackConvWeights&) = delete;
PrePackConvWeights& operator=(const PrePackConvWeights&) = delete;
private:
void* packed_weights_ = nullptr;
int64_t output_channels_;
};
class PackBMatrix final {
public:
PackBMatrix(
size_t input_channels,
size_t output_channels,
const uint8_t* kernel_zero_points,
const float* requantization_scale,
const uint8_t* kernel,
const int32_t* bias);
// This constructor is to be used for dynamic mode
// quantization. In dynamic mode, we dont yet support
// per channel quantization, and paying the cost of
// memory allocation for per channel zero point and
// requant scale will hurt performance.
PackBMatrix(
size_t input_channels,
size_t output_channels,
const uint8_t kernel_zero_point,
const float requantization_scale,
const uint8_t* kernel,
const int32_t* bias);
void* getPackedWeights() const
{
return packed_weights_;
}
void unpackWeights(
const uint8_t* kernel_zero_points,
int8_t* kernel
) const;
size_t getInputChannels() const
{
return input_channels_;
}
size_t getOutputChannels() const
{
return output_channels_;
}
~PackBMatrix()
{
if (packed_weights_ != nullptr) {
free(packed_weights_);
}
}
PackBMatrix() = delete;
PackBMatrix(const PackBMatrix&) = delete;
PackBMatrix& operator=(const PackBMatrix&) = delete;
private:
void* packed_weights_ = nullptr;
size_t input_channels_;
size_t output_channels_;
};
enum pytorch_qnnp_status qnnpackLinear(
const size_t batch_size,
const size_t input_channels,
const size_t output_channels,
const uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const float* requantization_scales,
const uint8_t output_zero_point,
const uint8_t output_min,
const uint8_t output_max,
const uint8_t* input,
const size_t input_stride,
void* packed_weights,
uint8_t* output,
const size_t output_stride,
pthreadpool_t threadpool);
enum pytorch_qnnp_status qnnpackConv(
const pytorch_qnnp_operator_t convolution,
void* packed_weights,
const size_t batch_size,
const size_t input_depth,
const size_t input_height,
const size_t input_width,
const uint8_t input_zero_point,
const uint8_t* input,
const uint8_t* kernel_zero_points,
const float* requantization_scales,
const uint8_t output_zero_point,
const uint8_t output_min,
const uint8_t output_max,
uint8_t* output,
pthreadpool_t threadpool);
enum pytorch_qnnp_status qnnpackDeConv(
const pytorch_qnnp_operator_t deconvolution,
void* packed_weights,
const size_t batch_size,
const size_t input_height,
const size_t input_width,
const uint8_t input_zero_point,
const uint8_t* input,
const uint8_t* kernel_zero_points,
const float* requantization_scales,
const uint8_t output_zero_point,
const uint8_t output_min,
const uint8_t output_max,
uint8_t* output,
pthreadpool_t threadpool);
enum pytorch_qnnp_status qnnpackLinearDynamic(
const size_t batch_size,
const size_t input_channels,
const size_t output_channels,
const uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const float* dequantization_scales,
const uint8_t* input,
const size_t input_stride,
void* packed_weights,
const float* bias,
float* output,
const size_t output_stride,
pthreadpool_t threadpool);
} // namespace qnnpack
| 4,146
| 23.832335
| 68
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/add.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
enum pytorch_qnnp_status pytorch_qnnp_create_add_nc_q8(
size_t channels,
uint8_t a_zero_point,
float a_scale,
uint8_t b_zero_point,
float b_scale,
uint8_t sum_zero_point,
float sum_scale,
uint8_t sum_min,
uint8_t sum_max,
uint32_t flags,
pytorch_qnnp_operator_t* add_out) {
pytorch_qnnp_operator_t add_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_add_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create add operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (a_scale <= 0.0f || !isnormal(a_scale)) {
pytorch_qnnp_log_error(
"failed to create add operator with %.7g A scale: scale must be finite and positive",
a_scale);
goto error;
}
if (b_scale <= 0.0f || !isnormal(b_scale)) {
pytorch_qnnp_log_error(
"failed to create add operator with %.7g B scale: scale must be finite and positive",
b_scale);
goto error;
}
if (sum_scale <= 0.0f || !isnormal(sum_scale)) {
pytorch_qnnp_log_error(
"failed to create add operator with %.7g output scale: scale must be finite and positive",
sum_scale);
goto error;
}
if (sum_min >= sum_max) {
pytorch_qnnp_log_error(
"failed to create add operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
sum_min,
sum_max);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
const float a_output_scale = a_scale / sum_scale;
if (a_output_scale < 0x1.0p-14f || a_output_scale >= 0x1.0p+8f) {
pytorch_qnnp_log_error(
"failed to create add operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
a_output_scale);
goto error;
}
const float b_output_scale = b_scale / sum_scale;
if (b_output_scale < 0x1.0p-14f || b_output_scale >= 0x1.0p+8f) {
pytorch_qnnp_log_error(
"failed to create add operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
b_output_scale);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
add_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (add_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
add_op->channels = channels;
add_op->add_quantization_params =
pytorch_qnnp_compute_add_quantization_params(
a_zero_point,
b_zero_point,
sum_zero_point,
a_scale / sum_scale,
b_scale / sum_scale,
sum_min,
sum_max);
add_op->ukernel_type = pytorch_qnnp_ukernel_type_add;
add_op->format = pytorch_qnnp_format_quint8;
*add_out = add_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(add_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_add_nc_q8(
pytorch_qnnp_operator_t add_op,
size_t batch_size,
const uint8_t* a,
size_t a_stride,
const uint8_t* b,
size_t b_stride,
uint8_t* sum,
size_t sum_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_add_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
add_op->batch_size = 0;
return pytorch_qnnp_status_success;
}
add_op->batch_size = batch_size;
add_op->input = a;
add_op->input_pixel_stride = a_stride;
add_op->input2 = b;
add_op->input2_pixel_stride = b_stride;
add_op->output = sum;
add_op->output_pixel_stride = sum_stride;
return pytorch_qnnp_status_success;
}
| 4,530
| 27.142857
| 119
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/average-pooling.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/common.h>
#include <qnnpack/indirection.h>
#include <qnnpack/log.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
#include <qnnpack/params.h>
static inline size_t compute_output_dimension(
size_t padded_input_dimension,
size_t pooling_dimension,
size_t stride_dimension) {
return (padded_input_dimension - pooling_dimension) / stride_dimension + 1;
}
enum pytorch_qnnp_status pytorch_qnnp_create_average_pooling2d_nhwc_q8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* average_pooling_out) {
pytorch_qnnp_operator_t average_pooling = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_average_pooling2d_nhwc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
pytorch_qnnp_log_error(
"failed to create average pooling with %" PRIu32 "x%" PRIu32
" pooling size: "
"pooling size dimensions must be non-zero",
pooling_width,
pooling_height);
goto error;
}
if (pooling_size == 1) {
pytorch_qnnp_log_error(
"failed to create average pooling with 1 pooling element: "
"1x1 pooling is meaningless");
goto error;
}
if (stride_height == 0 || stride_width == 0) {
pytorch_qnnp_log_error(
"failed to create average pooling with %" PRIu32 "x%" PRIu32
" stride: "
"stride dimensions must be non-zero",
stride_width,
stride_height);
goto error;
}
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create average pooling with %zu channels: "
"number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create average pooling with %.7g input scale: "
"scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create average pooling with %.7g output scale: "
"scale must be finite and positive",
output_scale);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
const float input_output_scale = input_scale / output_scale;
if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
pytorch_qnnp_log_error(
"failed to create average pooling with %.7g input scale and %.7g output scale: "
"input-to-output scale ratio (%.7f) must be in [2**-8, 2**8) range",
input_scale,
output_scale,
input_output_scale);
goto error;
}
if (pooling_size >= 16777216) {
pytorch_qnnp_log_error(
"failed to create average pooling with %" PRIu32 " (%" PRIu32
"x%" PRIu32
") pooling elements: "
"the number of elements in the pooling area must be below 2**24",
pooling_size,
pooling_width,
pooling_height);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
average_pooling = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (average_pooling == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
const bool any_padding = (input_padding_width | input_padding_height) != 0;
const uint32_t kr = pytorch_qnnp_params.q8avgpool.kr;
const uint32_t mr = pytorch_qnnp_params.q8avgpool.mr;
const uint32_t qr = pytorch_qnnp_params.q8avgpool.qr;
if (any_padding || (channels >= kr || (pooling_size - mr) % qr != 0)) {
void* zero_buffer = malloc(channels);
if (zero_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for zero padding", channels);
goto error;
}
memset(zero_buffer, input_zero_point, channels);
average_pooling->zero_buffer = zero_buffer;
average_pooling->zero_pointer = zero_buffer;
}
average_pooling->input_padding_depth = 0;
average_pooling->input_padding_height = input_padding_height;
average_pooling->input_padding_width = input_padding_width;
average_pooling->kernel_depth = 1;
average_pooling->kernel_height = pooling_height;
average_pooling->kernel_width = pooling_width;
average_pooling->stride_depth = 1;
average_pooling->stride_height = stride_height;
average_pooling->stride_width = stride_width;
average_pooling->dilation_depth = 1;
average_pooling->dilation_height = 1;
average_pooling->dilation_width = 1;
average_pooling->channels = channels;
size_t nrows = pooling_height * pooling_width;
if (channels >= pytorch_qnnp_params.q8avgpool.kr) {
if (nrows <= mr) {
nrows = mr;
} else {
nrows = round_up(nrows - mr, qr) + mr;
}
}
average_pooling->avgpool_quantization_params =
pytorch_qnnp_compute_avgpool_quantization_params(
(int32_t) - ((uint32_t)input_zero_point * (uint32_t)nrows),
input_scale / (output_scale * (float)pooling_size),
output_zero_point,
output_min,
output_max);
average_pooling->ukernel_type = pytorch_qnnp_ukernel_type_average_pooling;
average_pooling->format = pytorch_qnnp_format_quint8;
*average_pooling_out = average_pooling;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(average_pooling);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_average_pooling2d_nhwc_q8(
pytorch_qnnp_operator_t average_pooling,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_pixel_stride,
uint8_t* output,
size_t output_pixel_stride,
pthreadpool_t threadpool) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_average_pooling2d_nhwc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
average_pooling->batch_size = 0;
return pytorch_qnnp_status_success;
}
if (input_width == 0 || input_height == 0) {
pytorch_qnnp_log_error(
"failed to setup average pooling with %zux%zu input: input dimensions must be non-zero",
input_width,
input_height);
return pytorch_qnnp_status_invalid_parameter;
}
average_pooling->batch_size = batch_size;
average_pooling->input_depth = 1;
average_pooling->input_height = input_height;
average_pooling->input_width = input_width;
average_pooling->input = input;
average_pooling->input_pixel_stride = input_pixel_stride;
average_pooling->output_height = compute_output_dimension(
input_height + average_pooling->input_padding_height * 2,
average_pooling->kernel_height,
average_pooling->stride_height);
average_pooling->output_width = compute_output_dimension(
input_width + average_pooling->input_padding_width * 2,
average_pooling->kernel_width,
average_pooling->stride_width);
average_pooling->output_depth = 1;
average_pooling->output = output;
average_pooling->output_pixel_stride = output_pixel_stride;
size_t valid_batch_size = 0;
if (input == average_pooling->last_input &&
input_height == average_pooling->last_input_height &&
input_width == average_pooling->last_input_width) {
valid_batch_size = average_pooling->valid_batch_size;
if (batch_size <= valid_batch_size) {
return pytorch_qnnp_status_success;
}
}
/* Micro-kernel may read up to (mr - 1) elements after the end of indirection
* buffer */
const uint32_t mr = pytorch_qnnp_params.q8avgpool.mr;
pytorch_qnnp_indirection_set_step_dimensions(average_pooling);
const size_t indirection_buffer_size = sizeof(void*) *
((mr - 1) +
batch_size * average_pooling->output_height *
average_pooling->step_height);
const void** indirection_buffer = (const void**)realloc(
average_pooling->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for indirection buffer",
indirection_buffer_size);
return pytorch_qnnp_status_out_of_memory;
}
average_pooling->indirection_buffer = indirection_buffer;
pytorch_qnnp_indirection_init_dwconv(average_pooling, valid_batch_size);
average_pooling->last_input = input;
average_pooling->last_input_height = input_height;
average_pooling->last_input_width = input_width;
average_pooling->valid_batch_size = max(valid_batch_size, batch_size);
return pytorch_qnnp_status_success;
}
| 9,584
| 31.938144
| 108
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/channel-shuffle.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
#include <qnnpack/params.h>
enum pytorch_qnnp_status pytorch_qnnp_create_channel_shuffle_nc_x8(
size_t groups,
size_t group_channels,
uint32_t flags,
pytorch_qnnp_operator_t* channel_shuffle_out) {
pytorch_qnnp_operator_t channel_shuffle_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_channel_shuffle_nc_x8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (groups <= 1) {
pytorch_qnnp_log_error(
"failed to create channel shuffle operator with %zu groups: "
"at least two groups required",
groups);
goto error;
}
if (group_channels == 0) {
pytorch_qnnp_log_error(
"failed to create channel shuffle operator with %zu group channels: "
"number of group channels must be non-zero",
group_channels);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
channel_shuffle_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (channel_shuffle_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
channel_shuffle_op->groups = groups;
channel_shuffle_op->group_channels = group_channels;
channel_shuffle_op->ukernel_type = pytorch_qnnp_ukernel_type_channel_shuffle;
channel_shuffle_op->format = pytorch_qnnp_format_quint8;
*channel_shuffle_out = channel_shuffle_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(channel_shuffle_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_channel_shuffle_nc_x8(
pytorch_qnnp_operator_t channel_shuffle_op,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_channel_shuffle_nc_x8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
channel_shuffle_op->batch_size = 0;
return pytorch_qnnp_status_success;
}
channel_shuffle_op->batch_size = batch_size;
channel_shuffle_op->input = input;
channel_shuffle_op->input_pixel_stride = input_stride;
channel_shuffle_op->output = output;
channel_shuffle_op->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 2,998
| 28.401961
| 104
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/clamp.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_clamp_nc_u8(
size_t channels,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* clamp_out) {
pytorch_qnnp_operator_t clamp_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_clamp_nc_u8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Clamp operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (output_min > output_max) {
pytorch_qnnp_log_error(
"failed to create Clamp operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
clamp_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (clamp_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
clamp_op->channels = channels;
clamp_op->u8_clamping_params =
pytorch_qnnp_compute_u8_clamping_params(output_min, output_max);
clamp_op->ukernel_type = pytorch_qnnp_ukernel_type_clamp;
clamp_op->format = pytorch_qnnp_format_quint8;
*clamp_out = clamp_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(clamp_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_clamp_nc_u8(
pytorch_qnnp_operator_t clamp,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_clamp_nc_u8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
clamp->batch_size = 0;
return pytorch_qnnp_status_success;
}
clamp->batch_size = batch_size;
clamp->input = input;
clamp->input_pixel_stride = input_stride;
clamp->output = output;
clamp->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 2,815
| 26.339806
| 97
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/deconvolution.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/indirection.h>
#include <qnnpack/log.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
#include <qnnpack/pack.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
static inline size_t compute_output_dimension(
size_t input_dimension,
size_t input_padding_dimension,
size_t adjustment_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t stride_dimension) {
const size_t effective_kernel_dimension =
(kernel_dimension - 1) * dilation_dimension + 1;
return stride_dimension * (input_dimension - 1) + adjustment_dimension +
effective_kernel_dimension - input_padding_dimension;
}
enum pytorch_qnnp_status pytorch_qnnp_create_deconvolution2d_nhwc_q8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
pytorch_qnnp_operator_t* deconvolution_out) {
pytorch_qnnp_operator_t deconvolution = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_deconvolution2d_nhwc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (kernel_width == 0 || kernel_height == 0) {
pytorch_qnnp_log_error(
"failed to create deconvolution with %" PRIu32 "x%" PRIu32
" kernel: kernel dimensions must be non-zero",
kernel_width,
kernel_height);
goto error;
}
if (stride_width == 0 || stride_height == 0) {
pytorch_qnnp_log_error(
"failed to create deconvolution with %" PRIu32 "x%" PRIu32
" stride: "
"stride dimensions must be non-zero",
stride_width,
stride_height);
goto error;
}
if (dilation_width == 0 || dilation_height == 0) {
pytorch_qnnp_log_error(
"failed to create deconvolution with %" PRIu32 "x%" PRIu32
" dilation: "
"dilation dimensions must be non-zero",
dilation_width,
dilation_height);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
for (int i = 0; i < groups * group_output_channels; i++) {
if (requantization_scales[i] <= 0.0f ||
!isnormal(requantization_scales[i])) {
pytorch_qnnp_log_error(
"failed to create deconvolution operator with %.7g requantization scale for "
"channel %d scale must be finite and positive",
requantization_scales[i], i);
goto error;
}
}
status = pytorch_qnnp_status_out_of_memory;
deconvolution = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (deconvolution == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
const uint32_t nr = pytorch_qnnp_params.q8conv.nr;
const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
const uint32_t n_stride = (group_output_channels + (nr - 1)) & -nr;
const uint32_t k_stride = (group_input_channels + (kr - 1)) & -kr;
const uint32_t kernel_size = kernel_height * kernel_width;
const size_t packed_group_weights_size =
(sizeof(uint8_t) * kernel_size * k_stride + sizeof(int32_t)) * n_stride;
deconvolution->packed_weights = malloc(packed_group_weights_size * groups);
if (deconvolution->packed_weights == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for packed weights",
packed_group_weights_size * groups);
goto error;
}
memset(
deconvolution->packed_weights,
kernel_zero_points[0],
packed_group_weights_size * groups);
for (uint32_t group = 0; group < groups; group++) {
pytorch_pack_q8deconv_w(
group_output_channels,
kernel_size,
group_input_channels,
nr,
kr,
#if !PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
input_zero_point,
kernel_zero_points[0],
#endif
kernel +
group * group_output_channels * kernel_size * group_input_channels,
bias + group * group_output_channels,
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
kernel_zero_points + group * group_output_channels,
#endif
(void*)((uintptr_t)deconvolution->packed_weights + group * packed_group_weights_size));
}
size_t zero_size = sizeof(uint8_t) * k_stride;
size_t zero_offset = 0;
if (group_input_channels < 8) {
zero_size += 8;
zero_offset = 8;
}
void* zero_buffer = malloc(zero_size);
if (zero_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for zero padding", zero_size);
goto error;
}
memset(zero_buffer, input_zero_point, zero_size);
deconvolution->zero_buffer = zero_buffer;
deconvolution->zero_pointer = (void*)((uintptr_t)zero_buffer + zero_offset);
deconvolution->input_padding_height = input_padding_height;
deconvolution->input_padding_width = input_padding_width;
deconvolution->adjustment_height = adjustment_height;
deconvolution->adjustment_width = adjustment_width;
deconvolution->kernel_height = kernel_height;
deconvolution->kernel_width = kernel_width;
deconvolution->stride_height = stride_height;
deconvolution->stride_width = stride_width;
deconvolution->dilation_height = dilation_height;
deconvolution->dilation_width = dilation_width;
deconvolution->groups = groups;
deconvolution->group_input_channels = group_input_channels;
deconvolution->group_output_channels = group_output_channels;
deconvolution->kernel_zero_point = kernel_zero_points[0];
deconvolution->conv_quantization_params =
pytorch_qnnp_compute_conv_quantization_params(
input_zero_point,
kernel_zero_points,
requantization_scales,
output_zero_point,
output_min,
output_max);
deconvolution->ukernel_type = pytorch_qnnp_ukernel_type_conv;
deconvolution->format = pytorch_qnnp_format_quint8;
deconvolution->transpose = true;
*deconvolution_out = deconvolution;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(deconvolution);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_deconvolution2d_nhwc_q8(
pytorch_qnnp_operator_t deconvolution,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_pixel_stride,
uint8_t* output,
size_t output_pixel_stride,
pthreadpool_t threadpool) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_deconvolution2d_nhwc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
deconvolution->batch_size = 0;
return pytorch_qnnp_status_success;
}
if (input_width == 0 || input_height == 0) {
pytorch_qnnp_log_error(
"failed to setup deconvolution with %zux%zu input: input dimensions must be non-zero",
input_width,
input_height);
return pytorch_qnnp_status_invalid_parameter;
}
deconvolution->batch_size = batch_size;
deconvolution->input_height = input_height;
deconvolution->input_width = input_width;
deconvolution->input = input;
deconvolution->input_pixel_stride = input_pixel_stride;
deconvolution->output = output;
deconvolution->output_pixel_stride = output_pixel_stride;
const size_t kernel_height = deconvolution->kernel_height;
const size_t kernel_width = deconvolution->kernel_width;
const size_t kernel_size = kernel_height * kernel_width;
const size_t stride_height = deconvolution->stride_height;
const size_t stride_width = deconvolution->stride_width;
const size_t output_height = deconvolution->output_height =
compute_output_dimension(
input_height,
deconvolution->input_padding_height * 2,
deconvolution->adjustment_height,
kernel_height,
deconvolution->dilation_height,
stride_height);
const size_t output_width = deconvolution->output_width =
compute_output_dimension(
input_width,
deconvolution->input_padding_width * 2,
deconvolution->adjustment_width,
kernel_width,
deconvolution->dilation_width,
stride_width);
const size_t groups = deconvolution->groups;
const size_t output_size = output_height * output_width;
const size_t output_tile_size = pytorch_qnnp_params.q8conv.mr;
const size_t tiled_output_size = round_up(output_size, output_tile_size);
const size_t indirection_buffer_size =
sizeof(void*) * batch_size * groups * tiled_output_size * kernel_size;
const void** indirection_buffer = (const void**)realloc(
deconvolution->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for indirection buffer",
indirection_buffer_size);
return pytorch_qnnp_status_out_of_memory;
}
deconvolution->indirection_buffer = indirection_buffer;
pytorch_qnnp_indirection_init_deconv2d(
deconvolution, output_tile_size, tiled_output_size);
return pytorch_qnnp_status_success;
}
| 10,140
| 32.69103
| 106
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/fully-connected-sparse.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
#include <qnnpack/pack.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
enum pytorch_qnnp_status pytorch_qnnp_create_fully_connected_sparse_dq_nc_q8(
size_t input_channels,
size_t output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const void* kernel_col_indices,
const void* kernel_row_values,
const uint8_t* kernel_values,
const uint32_t kernel_row_block_size,
const uint32_t kernel_col_block_size,
enum pytorch_qnnp_sparse_matrix_indices_dtype kernel_indices_dtype,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
bool use_prepack_kernel,
pytorch_qnnp_operator_t* fully_connected_out) {
pytorch_qnnp_operator_t fully_connected = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_fully_connected_sparse_dq_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
for (int i = 0; i < output_channels; ++i) {
if (requantization_scales[i] <= 0.0f ||
!isnormal(requantization_scales[i])) {
pytorch_qnnp_log_error(
"failed to create fully connected operator with %.7g requantization scale: scale must be finite and positive",
requantization_scales[i]);
goto error;
}
}
status = pytorch_qnnp_status_out_of_memory;
fully_connected = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (fully_connected == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
if (kernel_row_block_size == 8 && kernel_col_block_size == 1) {
// This is to gate 8x1 on SSE2 since we have not implemented SSE2
// kernel that suppors 8x1 sparsity pattern.
if (pytorch_qnnp_params.q8gemm_sparse_c8x1.packA == NULL) {
status = pytorch_qnnp_status_invalid_parameter;
goto error;
}
}
fully_connected->sparse_matrix.indices_dtype = kernel_indices_dtype;
switch (kernel_indices_dtype) {
case pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t:
fully_connected->sparse_matrix.col_indices_w32 =
(const uint32_t*)kernel_col_indices;
fully_connected->sparse_matrix.row_values_w32 =
(const uint32_t*)kernel_row_values;
break;
case pytorch_qnnp_sparse_matrix_indices_dtype_uint16_t:
fully_connected->sparse_matrix.col_indices_w16 =
(const uint16_t*)kernel_col_indices;
fully_connected->sparse_matrix.row_values_w16 =
(const uint16_t*)kernel_row_values;
break;
case pytorch_qnnp_sparse_matrix_indices_dtype_uint8_t:
fully_connected->sparse_matrix.col_indices_w8 =
(const uint8_t*)kernel_col_indices;
fully_connected->sparse_matrix.row_values_w8 =
(const uint8_t*)kernel_row_values;
break;
case pytorch_qnnp_sparse_matrix_indices_dtype_invalid:
status = pytorch_qnnp_status_invalid_parameter;
pytorch_qnnp_log_error(
"Invalid indices dtype specified for qnnpack fully connected sparse");
goto error;
}
fully_connected->sparse_matrix.values = kernel_values;
fully_connected->sparse_matrix.row_block_size = kernel_row_block_size;
fully_connected->sparse_matrix.col_block_size = kernel_col_block_size;
fully_connected->groups = 1;
fully_connected->group_input_channels = input_channels;
fully_connected->group_output_channels = output_channels;
fully_connected->kernel_zero_point = kernel_zero_points[0];
fully_connected->dynamic_conv_quantization_params.input_zero_point =
input_zero_point;
fully_connected->dynamic_conv_quantization_params.kernel_zero_points =
kernel_zero_points;
fully_connected->dynamic_conv_quantization_params.multipliers =
requantization_scales;
// Always use prepacking based kernel
fully_connected->ukernel_type = pytorch_qnnp_ukernel_type_gemm_prepackA_sparse_dq;
fully_connected->format = pytorch_qnnp_format_quint8;
*fully_connected_out = fully_connected;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(fully_connected);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_fully_connected_sparse_dq_nc_q8(
pytorch_qnnp_operator_t fully_connected,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
const float* bias,
float* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_fully_connected_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
fully_connected->batch_size = 0;
return pytorch_qnnp_status_success;
}
fully_connected->batch_size = 1;
fully_connected->input_height = batch_size;
fully_connected->input_width = 1;
fully_connected->input = input;
fully_connected->input_pixel_stride = input_stride;
fully_connected->bias = bias;
fully_connected->output_height = batch_size;
fully_connected->output_width = 1;
fully_connected->output = output;
fully_connected->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 5,879
| 33.186047
| 120
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/fully-connected.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
#include <qnnpack/pack.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
enum pytorch_qnnp_status pytorch_qnnp_create_fully_connected_nc_q8(
size_t input_channels,
size_t output_channels,
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
const float* requantization_scales,
pytorch_qnnp_operator_t* fully_connected_out) {
pytorch_qnnp_operator_t fully_connected = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_fully_connected_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
for (int i = 0; i < output_channels; ++i) {
if (requantization_scales[i] <= 0.0f ||
!isnormal(requantization_scales[i])) {
pytorch_qnnp_log_error(
"failed to create fully connected operator with %.7g requantization scale: scale must be finite and positive",
requantization_scales[i]);
goto error;
}
}
status = pytorch_qnnp_status_out_of_memory;
fully_connected = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (fully_connected == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
const uint32_t nr = pytorch_qnnp_params.q8conv.nr;
const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
const uint32_t n_stride = (output_channels + (nr - 1)) & -nr;
const uint32_t k_stride = (input_channels + (kr - 1)) & -kr;
fully_connected->packed_weights =
malloc(n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
if (fully_connected->packed_weights == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for packed weights",
n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
goto error;
}
memset(
fully_connected->packed_weights,
kernel_zero_points[0],
n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
pytorch_pack_q8gemm_w(
output_channels,
input_channels,
nr,
nr,
kr,
#if !PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
input_zero_point,
kernel_zero_points[0],
#endif
kernel,
bias,
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
kernel_zero_points,
#endif
fully_connected->packed_weights);
fully_connected->groups = 1;
fully_connected->group_input_channels = input_channels;
fully_connected->group_output_channels = output_channels;
fully_connected->kernel_zero_point = kernel_zero_points[0];
fully_connected->conv_quantization_params =
pytorch_qnnp_compute_conv_quantization_params(
input_zero_point,
kernel_zero_points,
requantization_scales,
output_zero_point,
output_min,
output_max);
fully_connected->ukernel_type = pytorch_qnnp_ukernel_type_gemm;
fully_connected->format = pytorch_qnnp_format_quint8;
*fully_connected_out = fully_connected;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(fully_connected);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_fully_connected_nc_q8(
pytorch_qnnp_operator_t fully_connected,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_fully_connected_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
fully_connected->batch_size = 0;
return pytorch_qnnp_status_success;
}
fully_connected->batch_size = 1;
fully_connected->input_height = batch_size;
fully_connected->input_width = 1;
fully_connected->input = input;
fully_connected->input_pixel_stride = input_stride;
fully_connected->output_height = batch_size;
fully_connected->output_width = 1;
fully_connected->output = output;
fully_connected->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 4,827
| 28.987578
| 120
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/global-average-pooling.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
enum pytorch_qnnp_status pytorch_qnnp_create_global_average_pooling_nwc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* global_average_pooling_out) {
pytorch_qnnp_operator_t global_average_pooling_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_global_average_pooling_nwc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create global average pooling operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create global average pooling operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create global average pooling operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
const float input_output_scale = input_scale / output_scale;
if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
pytorch_qnnp_log_error(
"failed to create global average pooling operator with %.7g input-to-output scale ratio: "
"scale ratio must be in [2**-8, 2**8) range",
input_output_scale);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
global_average_pooling_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (global_average_pooling_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
void* zero_buffer = calloc(channels, sizeof(uint8_t));
if (zero_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for zero padding",
channels * sizeof(uint8_t));
goto error;
}
global_average_pooling_op->zero_buffer = zero_buffer;
global_average_pooling_op->zero_pointer = zero_buffer;
global_average_pooling_op->channels = channels;
global_average_pooling_op->input_zero_point = input_zero_point;
global_average_pooling_op->output_zero_point = output_zero_point;
global_average_pooling_op->input_scale = input_scale;
global_average_pooling_op->output_scale = output_scale;
global_average_pooling_op->output_min = output_min;
global_average_pooling_op->output_max = output_max;
global_average_pooling_op->ukernel_type =
pytorch_qnnp_ukernel_type_global_average_pooling;
global_average_pooling_op->format = pytorch_qnnp_format_quint8;
*global_average_pooling_out = global_average_pooling_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(global_average_pooling_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_global_average_pooling_nwc_q8(
pytorch_qnnp_operator_t global_average_pooling_op,
size_t batch_size,
size_t width,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_global_average_pooling_nwc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
global_average_pooling_op->batch_size = 0;
return pytorch_qnnp_status_success;
}
if (width == 0) {
pytorch_qnnp_log_error(
"failed to setup global average pooling operator with width %zu: width must be non-zero",
width);
return pytorch_qnnp_status_invalid_parameter;
}
global_average_pooling_op->batch_size = batch_size;
global_average_pooling_op->input_width = width;
global_average_pooling_op->input = input;
global_average_pooling_op->input_pixel_stride = input_stride;
global_average_pooling_op->output = output;
global_average_pooling_op->output_pixel_stride = output_stride;
global_average_pooling_op->avgpool_quantization_params =
pytorch_qnnp_compute_avgpool_quantization_params(
-(int32_t)width *
(int32_t)(uint32_t)global_average_pooling_op->input_zero_point,
global_average_pooling_op->input_scale /
(global_average_pooling_op->output_scale * (float)width),
global_average_pooling_op->output_zero_point,
global_average_pooling_op->output_min,
global_average_pooling_op->output_max);
return pytorch_qnnp_status_success;
}
| 5,450
| 33.283019
| 117
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/hardsigmoid.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_hardsigmoid_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* hardsigmoid_out) {
pytorch_qnnp_operator_t hardsigmoid_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_hardsigmoid_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
if (output_min >= output_max) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
if (output_scale != 0x1.0p-8f) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with %.7g output scale: only output scale of 1/256 is supported",
output_scale);
goto error;
}
if (output_zero_point != 0) {
pytorch_qnnp_log_error(
"failed to create Hardsigmoid operator with %" PRIu8
" output zero point: only output zero point of 0 is supported",
output_zero_point);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
hardsigmoid_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (hardsigmoid_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
hardsigmoid_op->lookup_table = malloc(256 * sizeof(uint8_t));
if (hardsigmoid_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for Hardsigmoid lookup table");
goto error;
}
uint8_t* lookup_table = hardsigmoid_op->lookup_table;
const float scaled_min = (float)(int32_t)output_min;
const float scaled_max = (float)(int32_t)output_max;
const float inv_output_scale = 1.0f / output_scale;
for (int32_t i = 0; i < 256; i++) {
float x =
input_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
// hardsigmoid, no min/max functions in C
float x2 = x + 3.0f;
x2 = x2 > 0.0f ? x2 : 0.0f;
x2 = x2 < 6.0f ? x2 : 6.0f;
x2 = x2 / 6.0f;
float scaled_hardsigmoid_x = inv_output_scale * x2 + output_zero_point;
if (scaled_hardsigmoid_x < scaled_min) {
scaled_hardsigmoid_x = scaled_min;
}
if (scaled_hardsigmoid_x > scaled_max) {
scaled_hardsigmoid_x = scaled_max;
}
lookup_table[(uint32_t)i] = (uint8_t)lrintf(scaled_hardsigmoid_x);
}
hardsigmoid_op->channels = channels;
hardsigmoid_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
hardsigmoid_op->format = pytorch_qnnp_format_quint8;
*hardsigmoid_out = hardsigmoid_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(hardsigmoid_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_hardsigmoid_nc_q8(
pytorch_qnnp_operator_t hardsigmoid,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_hardsigmoid_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
hardsigmoid->batch_size = 0;
return pytorch_qnnp_status_success;
}
hardsigmoid->batch_size = batch_size;
hardsigmoid->input = input;
hardsigmoid->input_pixel_stride = input_stride;
hardsigmoid->output = output;
hardsigmoid->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 5,005
| 29.339394
| 112
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/hardswish.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_hardswish_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* hardswish_out) {
pytorch_qnnp_operator_t hardswish_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_hardswish_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Hardswish operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create Hardswish operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create Hardswish operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
if (output_min >= output_max) {
pytorch_qnnp_log_error(
"failed to create Hardswish operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
hardswish_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (hardswish_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
hardswish_op->lookup_table = malloc(256 * sizeof(uint8_t));
if (hardswish_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for Hardswish lookup table");
goto error;
}
uint8_t* lookup_table = hardswish_op->lookup_table;
const float scaled_min = (float)(int32_t)output_min;
const float scaled_max = (float)(int32_t)output_max;
const float inv_output_scale = 1.0f / output_scale;
for (int32_t i = 0; i < 256; i++) {
float x =
input_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
// hardswish, no min/max functions in C
float x2 = x + 3.0f;
x2 = x2 > 0.0f ? x2 : 0.0f;
x2 = x2 < 6.0f ? x2 : 6.0f;
x2 = x * x2 / 6.0f;
float scaled_hardswish_x = inv_output_scale * x2 + output_zero_point;
if (scaled_hardswish_x < scaled_min) {
scaled_hardswish_x = scaled_min;
}
if (scaled_hardswish_x > scaled_max) {
scaled_hardswish_x = scaled_max;
}
lookup_table[(uint32_t)i] = (uint8_t)lrintf(scaled_hardswish_x);
}
hardswish_op->channels = channels;
hardswish_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
hardswish_op->format = pytorch_qnnp_format_quint8;
*hardswish_out = hardswish_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(hardswish_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_hardswish_nc_q8(
pytorch_qnnp_operator_t hardswish,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_hardswish_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
hardswish->batch_size = 0;
return pytorch_qnnp_status_success;
}
hardswish->batch_size = batch_size;
hardswish->input = input;
hardswish->input_pixel_stride = input_stride;
hardswish->output = output;
hardswish->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 4,420
| 28.871622
| 104
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/indirection.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <stddef.h>
#include <fxdiv.h>
#include <qnnpack/indirection.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
void pytorch_qnnp_indirection_init_conv3d(
pytorch_qnnp_operator_t op,
size_t output_tile_size,
size_t tiled_output_size) {
const void** indirection_buffer = op->indirection_buffer;
const void* input = op->input;
const size_t input_pixel_stride = op->input_pixel_stride;
const void* zero = op->zero_pointer;
const size_t groups = op->groups;
const size_t group_input_channels = op->group_input_channels;
const size_t batch_size = op->batch_size;
const size_t input_depth = op->input_depth;
const size_t input_height = op->input_height;
const size_t input_width = op->input_width;
const size_t output_depth = op->output_depth;
const size_t output_height = op->output_height;
const size_t output_width = op->output_width;
const size_t kernel_depth = op->kernel_depth;
const size_t kernel_height = op->kernel_height;
const size_t kernel_width = op->kernel_width;
const size_t stride_depth = op->stride_depth;
const size_t stride_height = op->stride_height;
const size_t stride_width = op->stride_width;
const size_t dilation_depth = op->dilation_depth;
const size_t dilation_height = op->dilation_height;
const size_t dilation_width = op->dilation_width;
const size_t input_padding_depth = op->input_padding_depth;
const size_t input_padding_height = op->input_padding_height;
const size_t input_padding_width = op->input_padding_width;
const size_t output_size = output_depth * output_height * output_width;
const size_t kernel_size = kernel_depth * kernel_height * kernel_width;
const struct fxdiv_divisor_size_t output_yx_divisor =
fxdiv_init_size_t(output_height * output_width);
const struct fxdiv_divisor_size_t output_x_divisor =
fxdiv_init_size_t(output_width);
for (size_t group = 0; group < groups; group++) {
for (size_t image = 0; image < batch_size; image++) {
for (size_t output_tile_start = 0; output_tile_start < tiled_output_size;
output_tile_start += output_tile_size) {
for (size_t output_tile_offset = 0;
output_tile_offset < output_tile_size;
output_tile_offset++) {
const size_t tiled_output_index =
output_tile_start + output_tile_offset;
const size_t output_index = min(tiled_output_index, output_size - 1);
const struct fxdiv_result_size_t z_yx =
fxdiv_divide_size_t(output_index, output_yx_divisor);
const struct fxdiv_result_size_t y_x =
fxdiv_divide_size_t(z_yx.remainder, output_x_divisor);
const size_t output_z = z_yx.quotient;
const size_t output_y = y_x.quotient;
const size_t output_x = y_x.remainder;
for (size_t kernel_z = 0; kernel_z < kernel_depth; kernel_z++) {
const size_t input_z = output_z * stride_depth +
kernel_z * dilation_depth - input_padding_depth;
if (input_z < input_depth) {
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
const size_t input_y = output_y * stride_height +
kernel_y * dilation_height - input_padding_height;
if (input_y < input_height) {
for (size_t kernel_x = 0; kernel_x < kernel_width;
kernel_x++) {
const size_t input_x = output_x * stride_width +
kernel_x * dilation_width - input_padding_width;
const size_t index = (group * batch_size + image) *
tiled_output_size * kernel_size +
output_tile_start * kernel_size +
((kernel_height * kernel_z + kernel_y) * kernel_width +
kernel_x) *
output_tile_size +
output_tile_offset;
if (input_x < input_width) {
indirection_buffer[index] = (char*)input +
(((image * input_depth + input_z) * input_height +
input_y) *
input_width +
input_x) *
input_pixel_stride +
group * group_input_channels;
} else {
indirection_buffer[index] = zero;
}
}
} else {
for (size_t kernel_x = 0; kernel_x < kernel_width;
kernel_x++) {
const size_t index = (group * batch_size + image) *
tiled_output_size * kernel_size +
output_tile_start * kernel_size +
((kernel_height * kernel_z + kernel_y) * kernel_width +
kernel_x) *
output_tile_size +
output_tile_offset;
indirection_buffer[index] = zero;
}
}
}
} else {
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
for (size_t kernel_x = 0; kernel_x < kernel_width; kernel_x++) {
const size_t index = (group * batch_size + image) *
tiled_output_size * kernel_size +
output_tile_start * kernel_size +
((kernel_height * kernel_z + kernel_y) * kernel_width +
kernel_x) *
output_tile_size +
output_tile_offset;
indirection_buffer[index] = zero;
}
}
}
}
}
}
}
}
}
/**
* Imagine a we want to do dw conv or avgpooling with these parameters:
* kernel_width/height=3 stride=2
* Input is:
* ---------------
* |0|1|2|3|4|5|6|
* --------------- -------
* | | | | | | | | to |0|1|2|
* --------------- -------
* | | | | | | | | | | | |
* --------------- -------
* | | | | | | | |
* ---------------
* | | | | | | | |
* ---------------
*
* Thus we are going from width=7 height=5 input to width=3 height=2
* Convince yourself that input 5x7 with pooling params of 3x3 kernel
* with 2x2 stride gets you to 2x3 output.
* Now for each output place (0,0), (0,1), (0,2), (1,0), (1,1), (1,2)
* we have 3x3 input.
* For just the first row of output this will look like as follows:
* pixel:0 pixel:1 pixel:2
* ------- ------- -------
* |0|1|2| |2|3|4| |4|5|6|
* ------- ------- -------
* | | | | | | | | | | | |
* ------- ------- -------
* | | | | | | | | | | | |
* ------- ------- -------
* As you can see there is some overlap in the input needed for each
* output pixel.
* What is indirection buffer:
* Indirection buffer just stores the pointer to the underlying data.
* In this case pointer for a particular input position will point to
* all the input channels of that position in NHWC format.
* So one option for the aforemnetioned storage would be:
* For each output position: store a 3x3 array of pointers. Thus we
* would have 3x3 * 3 (3 output pixel of the first row) = 27 pointers
* stored.
* Now instead we store the pointer in this format:
* ---------------
* |0|1|2|3|4|5|6|
* ---------------
* | | | | | | | |
* ---------------
* | | | | | | | |
* ---------------
* Then we have all the pointers needed as before, but with less duplication.
* So instead of 27 pointers now we have:
* (3 (# of output pixels) - 1) * (stride) * 3 (kernel height) * + 3 * 3 (kernel h*w)
* = 4 * 3 + 9
* = 21 pointers.
* which is the equation below.
* Now in order for this to work the kernel has to be adjusted.
* Here the kernel produced output worth of entire width. Thus as you move from one
* pixel to the next, the jump in the indirection buffer has to be not 3*3 = 9
* but kernel height (3) * stride (2) = 6.
* This you will see operator-run.c
*
* step_width: The number of yz slices of the kernel to traverse to move from
* the starting input index of an output pixel in the indirection buffer to
* that of the output pixel directly after it in the same row.
* i.e. if indirection_buffer[j] points to the first input pixel used to
* compute the i'th output pixel, then
* indirection_buffer[j + (kernel_depth * kernel_height * step_width)]
* points to the first input pixel used to compute the (i + 1)'th output
* pixel if in the same row
* When dilation is 1 (for convolution): if neighboring output pixels use
* overlapping regions of the input, this overlap is not included in the
* indirection buffer (saving some space), hence step width is set to stride
* width
*
* step_height: The number of pointers to traverse to move from an output
* pixel's first input's index in the indirection bufffer to that of the
* output pixel one ROW (one output y) after it.
* i.e. if indirection_buffer[j] points to the first input pixel used to
* compute the i'th output pixel, then
* indirection_buffer[j + step_height] points to the first
* input pixel used to compute the output pixel one row below-
* the (i + output_width)'th output pixel
*
* step_depth: Same as step height but for an xy slice rather than a row
*
* The input operator's step dimensions must have been set up before calling
* this function.
*/
void pytorch_qnnp_indirection_init_dwconv(
pytorch_qnnp_operator_t op,
size_t batch_start) {
const void** indirection_buffer = op->indirection_buffer;
const void* input = op->input;
const size_t input_pixel_stride = op->input_pixel_stride;
const void* zero = op->zero_pointer;
const size_t batch_size = op->batch_size;
const size_t input_depth = op->input_depth;
const size_t input_height = op->input_height;
const size_t input_width = op->input_width;
const size_t output_depth = op->output_depth;
const size_t output_height = op->output_height;
const size_t output_width = op->output_width;
const size_t kernel_depth = op->kernel_depth;
const size_t kernel_height = op->kernel_height;
const size_t kernel_width = op->kernel_width;
const size_t stride_depth = op->stride_depth;
const size_t stride_height = op->stride_height;
const size_t stride_width = op->stride_width;
const size_t dilation_depth = op->dilation_depth;
const size_t dilation_height = op->dilation_height;
const size_t dilation_width = op->dilation_width;
const size_t input_padding_depth = op->input_padding_depth;
const size_t input_padding_height = op->input_padding_height;
const size_t input_padding_width = op->input_padding_width;
const size_t step_depth = op->step_depth;
const size_t step_height = op->step_height;
const size_t step_width = op->step_width;
#define DW_CONV_3D_INDEX(oz, oy, ox, kz, ky, kx) \
/* Output Pixel */ \
(image * output_depth + oz) * step_depth + /* slice */ \
oy * step_height + /* row */ \
ox * step_width * kernel_height * kernel_depth + /* column */ \
/* Kernel */ \
kx * kernel_depth * kernel_height + /* column */ \
ky * kernel_depth + /* row */ \
kz /* slice */
for (size_t image = batch_start; image < batch_size; image++) {
for (size_t output_z = 0; output_z < output_depth; output_z++) {
for (size_t kernel_z = 0; kernel_z < kernel_depth; kernel_z++) {
const size_t input_z = output_z * stride_depth +
kernel_z * dilation_depth - input_padding_depth;
if (input_z < input_depth) {
for (size_t output_y = 0; output_y < output_height; output_y++) {
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
const size_t input_y = output_y * stride_height +
kernel_y * dilation_height - input_padding_height;
if (input_y < input_height) {
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t kernel_x = 0; kernel_x < kernel_width;
kernel_x++) {
const size_t input_x = output_x * stride_width +
kernel_x * dilation_width - input_padding_width;
const size_t index = DW_CONV_3D_INDEX(
output_z,
output_y,
output_x,
kernel_z,
kernel_y,
kernel_x);
if (input_x < input_width) {
indirection_buffer[index] = (char*)input +
((image * input_depth + input_z) * input_height *
input_width + // slice
input_y * input_width + // row
input_x // column
) * input_pixel_stride;
} else {
indirection_buffer[index] = zero;
}
}
}
} else {
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t kernel_x = 0; kernel_x < kernel_width;
kernel_x++) {
const size_t index = DW_CONV_3D_INDEX(
output_z,
output_y,
output_x,
kernel_z,
kernel_y,
kernel_x);
indirection_buffer[index] = zero;
}
}
}
}
}
} else {
for (size_t output_y = 0; output_y < output_height; output_y++) {
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t kernel_x = 0; kernel_x < kernel_width; kernel_x++) {
const size_t index = DW_CONV_3D_INDEX(
output_z,
output_y,
output_x,
kernel_z,
kernel_y,
kernel_x);
indirection_buffer[index] = zero;
}
}
}
}
}
}
}
}
}
void pytorch_qnnp_indirection_init_deconv2d(
pytorch_qnnp_operator_t op,
size_t output_tile_size,
size_t tiled_output_size) {
const void** indirection_buffer = op->indirection_buffer;
const void* input = op->input;
const size_t input_pixel_stride = op->input_pixel_stride;
const void* zero = op->zero_pointer;
const size_t groups = op->groups;
const size_t group_input_channels = op->group_input_channels;
const size_t batch_size = op->batch_size;
const size_t input_height = op->input_height;
const size_t input_width = op->input_width;
const size_t output_height = op->output_height;
const size_t output_width = op->output_width;
const size_t kernel_height = op->kernel_height;
const size_t kernel_width = op->kernel_width;
const size_t stride_height = op->stride_height;
const size_t stride_width = op->stride_width;
const size_t dilation_height = op->dilation_height;
const size_t dilation_width = op->dilation_width;
const size_t input_padding_height = op->input_padding_height;
const size_t input_padding_width = op->input_padding_width;
const size_t output_size = output_height * output_width;
const size_t kernel_size = kernel_height * kernel_width;
for (size_t group = 0; group < groups; group++) {
for (size_t image = 0; image < batch_size; image++) {
for (size_t output_tile_start = 0; output_tile_start < tiled_output_size;
output_tile_start += output_tile_size) {
for (size_t output_tile_offset = 0;
output_tile_offset < output_tile_size;
output_tile_offset++) {
const size_t tiled_output_index =
output_tile_start + output_tile_offset;
const size_t output_index = min(tiled_output_index, output_size - 1);
const size_t output_y = output_index / output_width;
const size_t output_x = output_index % output_width;
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
const size_t y =
output_y + input_padding_height - kernel_y * dilation_height;
const size_t input_y = y / stride_height;
for (size_t kernel_x = 0; kernel_x < kernel_width; kernel_x++) {
const size_t x =
output_x + input_padding_width - kernel_x * dilation_width;
const size_t input_x = x / stride_width;
const size_t index = (group * batch_size + image) *
tiled_output_size * kernel_size +
output_tile_start * kernel_size +
(kernel_y * kernel_width + kernel_x) * output_tile_size +
output_tile_offset;
if (input_y * stride_height == y && input_y < input_height &&
input_x * stride_width == x && input_x < input_width) {
indirection_buffer[index] = (char*)input +
((image * input_height + input_y) * input_width + input_x) *
input_pixel_stride +
group * group_input_channels;
} else {
indirection_buffer[index] = zero;
}
}
}
}
}
}
}
}
void pytorch_qnnp_indirection_init_maxpool2d(
pytorch_qnnp_operator_t op,
size_t batch_start) {
const void** indirection_buffer = op->indirection_buffer;
const void* input = op->input;
const size_t input_pixel_stride = op->input_pixel_stride;
const size_t batch_size = op->batch_size;
const size_t input_height = op->input_height;
const size_t input_width = op->input_width;
const size_t output_height = op->output_height;
const size_t output_width = op->output_width;
const size_t pooling_height = op->kernel_height;
const size_t pooling_width = op->kernel_width;
const size_t stride_height = op->stride_height;
const size_t stride_width = op->stride_width;
const size_t dilation_height = op->dilation_height;
const size_t dilation_width = op->dilation_width;
const size_t input_padding_height = op->input_padding_height;
const size_t input_padding_width = op->input_padding_width;
const size_t step_height = op->step_height;
const size_t step_width = op->step_width;
for (size_t image = batch_start; image < batch_size; image++) {
for (size_t output_y = 0; output_y < output_height; output_y++) {
for (size_t pooling_y = 0; pooling_y < pooling_height; pooling_y++) {
const size_t input_y =
doz(output_y * stride_height + pooling_y * dilation_height,
input_padding_height);
const size_t clamped_input_y = min(input_y, input_height - 1);
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t pooling_x = 0; pooling_x < pooling_width; pooling_x++) {
const size_t input_x =
doz(output_x * stride_width + pooling_x * dilation_width,
input_padding_width);
const size_t clamped_input_x = min(input_x, input_width - 1);
const size_t index =
(image * output_height + output_y) * step_height +
output_x * step_width * pooling_height +
pooling_x * pooling_height + pooling_y;
indirection_buffer[index] = (char*)input +
((image * input_height + clamped_input_y) * input_width +
clamped_input_x) *
input_pixel_stride;
}
}
}
}
}
}
void pytorch_qnnp_indirection_set_step_dimensions(pytorch_qnnp_operator_t op) {
const size_t original_kernel_depth = op->kernel_depth;
const size_t kernel_depth =
(original_kernel_depth != 0) ? original_kernel_depth : 1;
const size_t kernel_height = op->kernel_height;
const size_t kernel_width = op->kernel_width;
const size_t kernel_size = kernel_depth * kernel_height * kernel_width;
const size_t output_height = op->output_height;
const size_t output_width = op->output_width;
size_t step_width = 0;
switch (op->ukernel_type) {
case pytorch_qnnp_ukernel_type_dwconv:
step_width = op->dilation_width == 1 ? op->stride_width : kernel_width;
break;
case pytorch_qnnp_ukernel_type_average_pooling:
step_width = min(op->stride_width, kernel_width);
break;
case pytorch_qnnp_ukernel_type_max_pooling:
step_width = op->dilation_width > 1 ? kernel_width
: min(op->stride_width, kernel_width);
break;
default:
PYTORCH_QNNP_UNREACHABLE;
}
const size_t step_height = kernel_size +
(output_width - 1) * step_width * kernel_height * kernel_depth;
const size_t step_depth = step_height * output_height;
op->step_depth = step_depth;
op->step_height = step_height;
op->step_width = step_width;
}
| 21,849
| 43.320487
| 86
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/init.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef _MSC_VER
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <cpuinfo.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/params.h>
#include <qnnpack/q8avgpool.h>
#include <qnnpack/q8conv.h>
#include <qnnpack/q8dwconv.h>
#include <qnnpack/q8gavgpool.h>
#include <qnnpack/q8gemm.h>
#include <qnnpack/q8gemm_sparse.h>
#include <qnnpack/q8vadd.h>
#include <qnnpack/u8clamp.h>
#include <qnnpack/u8lut32norm.h>
#include <qnnpack/u8maxpool.h>
#include <qnnpack/u8rmax.h>
#include <qnnpack/x8lut.h>
#include <qnnpack/x8zip.h>
#ifdef _MSC_VER
static INIT_ONCE init_guard;
BOOL CALLBACK pytorch_qnnp_init_win(PINIT_ONCE InitOnce, PVOID Parameter, PVOID* lpContex);
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
struct pytorch_qnnp_parameters pytorch_qnnp_params = {.initialized = false};
static void init(void) {
#if CPUINFO_ARCH_ARM
if (!cpuinfo_has_arm_neon()) {
pytorch_qnnp_log_error(
"QNNPACK initialization failed: NEON is not supported");
return;
}
pytorch_qnnp_params.q8conv = (struct pytorch_q8conv_parameters){
.gemm = pytorch_q8gemm_ukernel_4x8__aarch32_neon,
.conv = pytorch_q8conv_ukernel_4x8__aarch32_neon,
.gemm_dq = pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon,
.mr = 4,
.nr = 8,
.kr = 1,
};
pytorch_qnnp_params.q8gemm_sparse_c1x4 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w32__aarch32_neon,
.packedA_w16_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w16__aarch32_neon,
.packedA_w8_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w8__aarch32_neon,
.packA = pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon,
.mr = 4,
.nr = 8,
.kr = 4,
.log2_mr = 2,
.log2_row_block_size = 0,
.row_block_size = 1,
.col_block_size = 4,
};
pytorch_qnnp_params.q8gemm_sparse_c8x1 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon,
.packedA_w16_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon,
.packedA_w8_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon,
.packA = pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon,
.mr = 4,
.nr = 8,
.kr = 4, // kr is really 1 but we set it to 4 because we resuse 4x4 prepacking kernel
.log2_mr = 2,
.log2_row_block_size = 3,
.row_block_size = 8,
.col_block_size = 1,
};
#if !PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
pytorch_qnnp_params.q8conv_xzp = (struct pytorch_q8conv_xzp_parameters){
.gemm = pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon,
.mr = 4,
.nr = 8,
.kr = 2,
.kc = 8,
.kthreshold = SIZE_MAX,
};
/* setup xzp threshold based on measurements */
switch (cpuinfo_get_core(0)->uarch) {
case cpuinfo_uarch_cortex_a72:
pytorch_qnnp_params.q8conv_xzp.kthreshold = 64;
break;
case cpuinfo_uarch_cortex_a73:
pytorch_qnnp_params.q8conv_xzp.kthreshold = 256;
break;
case cpuinfo_uarch_cortex_a75:
pytorch_qnnp_params.q8conv_xzp.kthreshold = 32;
break;
case cpuinfo_uarch_cortex_a76:
pytorch_qnnp_params.q8conv_xzp.kthreshold = 16;
break;
default:
break;
}
#else
pytorch_qnnp_params.q8conv_xzp = (struct pytorch_q8conv_xzp_parameters){
.kthreshold = SIZE_MAX,
};
#endif
pytorch_qnnp_params.q8dw9 = (struct pytorch_q8dwconv2d_up_parameters){
.updw = pytorch_q8dwconv_ukernel_up8x9__aarch32_neon,
.updw_per_channel =
pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon,
.cr = 8,
};
pytorch_qnnp_params.q8dw25 = (struct pytorch_q8dwconv2d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x25__neon,
.mpdw_per_channel = pytorch_q8dwconv_ukernel_mp8x25_per_channel__neon,
.cr = 8,
};
pytorch_qnnp_params.q8dw27 = (struct pytorch_q8dwconv3d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x27__neon,
.cr = 8,
};
pytorch_qnnp_params.q8sum_rows = (struct pytorch_q8sum_rows_parameters){
.sum_rows = pytorch_q8sumrows_ukernel_4x__neon,
.m = 4,
};
pytorch_qnnp_params.q8vadd = pytorch_q8vadd_ukernel__neon;
pytorch_qnnp_params.q8gavgpool = (struct pytorch_q8gavgpool_parameters){
.ltnr = pytorch_q8gavgpool_ukernel_up8xm__neon,
.genr_lemr = pytorch_q8gavgpool_ukernel_up8x7__neon,
.genr_gtmr = pytorch_q8gavgpool_ukernel_mp8x7p7q__neon,
.mr = 7,
.nr = 8,
};
pytorch_qnnp_params.q8avgpool = (struct pytorch_q8avgpool_parameters){
.ltkr = pytorch_q8avgpool_ukernel_up8xm__neon,
.gekr_lemr = pytorch_q8avgpool_ukernel_up8x9__neon,
.gekr_gtmr = pytorch_q8avgpool_ukernel_mp8x9p8q__neon,
.mr = 9,
.qr = 8,
.kr = 8,
};
pytorch_qnnp_params.u8maxpool = (struct pytorch_u8maxpool_parameters){
.ltkr = pytorch_u8maxpool_ukernel_sub16__neon,
.gekr = pytorch_u8maxpool_ukernel_16x9p8q__neon,
.mr = 9,
.qr = 8,
.kr = 16,
};
pytorch_qnnp_params.x8zip = (struct pytorch_x8zip_parameters){
.x2 = pytorch_qnnp_x8zip_x2__neon,
.x3 = pytorch_qnnp_x8zip_x3__neon,
.x4 = pytorch_qnnp_x8zip_x4__neon,
.xm = pytorch_qnnp_x8zip_xm__neon,
};
pytorch_qnnp_params.u8clamp = pytorch_u8clamp_ukernel__neon;
pytorch_qnnp_params.u8rmax = pytorch_u8rmax_ukernel__neon;
pytorch_qnnp_params.u8lut32norm = pytorch_u8lut32norm_ukernel__scalar;
pytorch_qnnp_params.x8lut = pytorch_x8lut_ukernel__scalar;
#elif CPUINFO_ARCH_ARM64
pytorch_qnnp_params.q8gemm_sparse_c1x4 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon,
.packedA_w16_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon,
.packedA_w8_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon,
.packA = pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon,
.mr = 8,
.nr = 8,
.kr = 4,
.log2_mr = 3,
.log2_row_block_size = 0,
.row_block_size = 1,
.col_block_size = 4,
};
pytorch_qnnp_params.q8gemm_sparse_c8x1 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w32__aarch64_neon,
.packedA_w16_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w16__aarch64_neon,
.packedA_w8_gemm_dq = pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w8__aarch64_neon,
.packA = pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon,
.mr = 8,
.nr = 8,
.kr = 4, // kr is really 1 but we set it to 4 because we resuse 4x4 prepacking kernel
.log2_mr = 3,
.log2_row_block_size = 3,
.row_block_size = 8,
.col_block_size = 1,
};
pytorch_qnnp_params.q8conv = (struct pytorch_q8conv_parameters){
.gemm = pytorch_q8gemm_ukernel_8x8__aarch64_neon,
.conv = pytorch_q8conv_ukernel_8x8__aarch64_neon,
.gemm_dq = pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon,
.mr = 8,
.nr = 8,
.kr = 1,
};
pytorch_qnnp_params.q8conv_xzp = (struct pytorch_q8conv_xzp_parameters){
.kthreshold = SIZE_MAX,
};
pytorch_qnnp_params.q8dw9 = (struct pytorch_q8dwconv2d_up_parameters){
.updw = pytorch_q8dwconv_ukernel_up8x9__neon,
.updw_per_channel = pytorch_q8dwconv_ukernel_up8x9_per_channel__neon,
.cr = 8,
};
pytorch_qnnp_params.q8dw25 = (struct pytorch_q8dwconv2d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x25__neon,
.mpdw_per_channel = pytorch_q8dwconv_ukernel_mp8x25_per_channel__neon,
.cr = 8,
};
pytorch_qnnp_params.q8dw27 = (struct pytorch_q8dwconv3d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x27__neon,
.cr = 8,
};
pytorch_qnnp_params.q8vadd = pytorch_q8vadd_ukernel__neon;
pytorch_qnnp_params.q8gavgpool = (struct pytorch_q8gavgpool_parameters){
.ltnr = pytorch_q8gavgpool_ukernel_up8xm__neon,
.genr_lemr = pytorch_q8gavgpool_ukernel_up8x7__neon,
.genr_gtmr = pytorch_q8gavgpool_ukernel_mp8x7p7q__neon,
.mr = 7,
.nr = 8,
};
pytorch_qnnp_params.q8avgpool = (struct pytorch_q8avgpool_parameters){
.ltkr = pytorch_q8avgpool_ukernel_up8xm__neon,
.gekr_lemr = pytorch_q8avgpool_ukernel_up8x9__neon,
.gekr_gtmr = pytorch_q8avgpool_ukernel_mp8x9p8q__neon,
.mr = 9,
.qr = 8,
.kr = 8,
};
pytorch_qnnp_params.u8maxpool = (struct pytorch_u8maxpool_parameters){
.ltkr = pytorch_u8maxpool_ukernel_sub16__neon,
.gekr = pytorch_u8maxpool_ukernel_16x9p8q__neon,
.mr = 9,
.qr = 8,
.kr = 16,
};
pytorch_qnnp_params.x8zip = (struct pytorch_x8zip_parameters){
.x2 = pytorch_qnnp_x8zip_x2__neon,
.x3 = pytorch_qnnp_x8zip_x3__neon,
.x4 = pytorch_qnnp_x8zip_x4__neon,
.xm = pytorch_qnnp_x8zip_xm__neon,
};
pytorch_qnnp_params.u8clamp = pytorch_u8clamp_ukernel__neon;
pytorch_qnnp_params.u8rmax = pytorch_u8rmax_ukernel__neon;
pytorch_qnnp_params.u8lut32norm = pytorch_u8lut32norm_ukernel__scalar;
pytorch_qnnp_params.x8lut = pytorch_x8lut_ukernel__scalar;
#elif CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
if (!cpuinfo_has_x86_sse2()) {
pytorch_qnnp_log_error(
"QNNPACK initialization failed: SSE2 is not supported");
return;
}
pytorch_qnnp_params.q8conv = (struct pytorch_q8conv_parameters){
.gemm = pytorch_q8gemm_ukernel_4x4c2__sse2,
.conv = pytorch_q8conv_ukernel_4x4c2__sse2,
.gemm_dq = pytorch_q8gemm_dq_ukernel_4x4c2__sse2,
.mr = 4,
.nr = 4,
.kr = 2,
};
pytorch_qnnp_params.q8gemm_sparse_c1x4 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w32__sse2,
.packedA_w16_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w16__sse2,
.packedA_w8_gemm_dq = pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w8__sse2,
.packA = pytorch_q8gemm_sparse_packA_ukernel_8x4__sse2,
.mr = 8,
.nr = 4,
.kr = 4,
.log2_mr = 3,
.log2_row_block_size = 0,
.row_block_size = 1,
.col_block_size = 4,
};
pytorch_qnnp_params.q8gemm_sparse_c8x1 = (struct pytorch_q8gemm_sparse_parameters){
.gemm_dq = NULL,
.packedA_w32_gemm_dq = NULL,
.packedA_w16_gemm_dq = NULL,
.packedA_w8_gemm_dq = NULL,
.packA = NULL,
.mr = 4,
.nr = 8,
.kr = 1,
.log2_mr = 2,
.log2_row_block_size = 3,
.row_block_size = 8,
.col_block_size = 1,
};
pytorch_qnnp_params.q8conv_xzp = (struct pytorch_q8conv_xzp_parameters){
.kthreshold = SIZE_MAX,
};
pytorch_qnnp_params.q8dw9 = (struct pytorch_q8dwconv2d_up_parameters){
.updw = pytorch_q8dwconv_ukernel_up8x9__sse2,
.updw_per_channel = pytorch_q8dwconv_ukernel_up8x9_per_channel__sse2,
.cr = 8,
};
pytorch_qnnp_params.q8dw25 = (struct pytorch_q8dwconv2d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x25__sse2,
.mpdw_per_channel = pytorch_q8dwconv_ukernel_mp8x25_per_channel__sse2,
.cr = 8,
};
pytorch_qnnp_params.q8dw27 = (struct pytorch_q8dwconv3d_mp_parameters){
.mpdw = pytorch_q8dwconv_ukernel_mp8x27__sse2,
.cr = 8,
};
pytorch_qnnp_params.q8vadd = pytorch_q8vadd_ukernel__sse2;
pytorch_qnnp_params.q8gavgpool = (struct pytorch_q8gavgpool_parameters){
.ltnr = pytorch_q8gavgpool_ukernel_up8xm__sse2,
.genr_lemr = pytorch_q8gavgpool_ukernel_up8x7__sse2,
.genr_gtmr = pytorch_q8gavgpool_ukernel_mp8x7p7q__sse2,
.mr = 7,
.nr = 8,
};
pytorch_qnnp_params.q8avgpool = (struct pytorch_q8avgpool_parameters){
.ltkr = pytorch_q8avgpool_ukernel_up8xm__sse2,
.gekr_lemr = pytorch_q8avgpool_ukernel_up8x9__sse2,
.gekr_gtmr = pytorch_q8avgpool_ukernel_mp8x9p8q__sse2,
.mr = 9,
.qr = 8,
.kr = 8,
};
pytorch_qnnp_params.u8maxpool = (struct pytorch_u8maxpool_parameters){
.ltkr = pytorch_u8maxpool_ukernel_sub16__sse2,
.gekr = pytorch_u8maxpool_ukernel_16x9p8q__sse2,
.mr = 9,
.qr = 8,
.kr = 16,
};
pytorch_qnnp_params.x8zip = (struct pytorch_x8zip_parameters){
.x2 = pytorch_qnnp_x8zip_x2__sse2,
.x3 = pytorch_qnnp_x8zip_x3__sse2,
.x4 = pytorch_qnnp_x8zip_x4__sse2,
.xm = pytorch_qnnp_x8zip_xm__sse2,
};
pytorch_qnnp_params.u8clamp = pytorch_u8clamp_ukernel__sse2;
pytorch_qnnp_params.u8rmax = pytorch_u8rmax_ukernel__sse2;
pytorch_qnnp_params.u8lut32norm = pytorch_u8lut32norm_ukernel__scalar;
pytorch_qnnp_params.x8lut = pytorch_x8lut_ukernel__scalar;
#else
#error "Unsupported architecture"
#endif
pytorch_qnnp_params.initialized = true;
}
enum pytorch_qnnp_status pytorch_qnnp_initialize(void) {
if (!cpuinfo_initialize()) {
return pytorch_qnnp_status_out_of_memory;
}
#ifdef _MSC_VER
InitOnceExecuteOnce(&init_guard, pytorch_qnnp_init_win, NULL, NULL);
#else
pthread_once(&init_guard, &init);
#endif
if (pytorch_qnnp_params.initialized) {
return pytorch_qnnp_status_success;
} else {
return pytorch_qnnp_status_unsupported_hardware;
}
}
enum pytorch_qnnp_status pytorch_qnnp_deinitialize(void) {
cpuinfo_deinitialize();
return pytorch_qnnp_status_success;
}
#ifdef _MSC_VER
BOOL CALLBACK pytorch_qnnp_init_win(PINIT_ONCE InitOnce, PVOID Parameter, PVOID* lpContex) {
init();
return TRUE;
}
#endif
| 13,994
| 35.350649
| 96
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/leaky-relu.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_leaky_relu_nc_q8(
size_t channels,
float negative_slope,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* leaky_relu_out) {
pytorch_qnnp_operator_t leaky_relu_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_leaky_relu_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (negative_slope <= 0.0f || !isnormal(negative_slope)) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %.7g negative slope: slope must be finite and positive",
negative_slope);
goto error;
}
if (negative_slope > 1.0f) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %.7g negative slope: slope must not exceed 1.0",
negative_slope);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
if (output_min >= output_max) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
const float input_output_scale = input_scale / output_scale;
if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
pytorch_qnnp_log_error(
"failed to create Leaky ReLU operator with %.7g input-to-output scale ratio: "
"scale ratio must be in [2**-8, 2**8) range",
input_output_scale);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
leaky_relu_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (leaky_relu_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
leaky_relu_op->lookup_table = malloc(256 * sizeof(uint8_t));
if (leaky_relu_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for Leaky ReLU lookup table");
goto error;
}
uint8_t* lookup_table = leaky_relu_op->lookup_table;
const float scaled_min_less_zero_point =
(float)((int32_t)output_min - (int32_t)output_zero_point);
const float scaled_max_less_zero_point =
(float)((int32_t)output_max - (int32_t)output_zero_point);
for (int32_t i = 0; i < 256; i++) {
const float x =
input_output_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
float y = x < 0.0f ? x * negative_slope : x;
if (y < scaled_min_less_zero_point) {
y = scaled_min_less_zero_point;
}
if (y > scaled_max_less_zero_point) {
y = scaled_max_less_zero_point;
}
lookup_table[(uint32_t)i] = (uint8_t)(lrintf(y) + (long)output_zero_point);
}
leaky_relu_op->channels = channels;
leaky_relu_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
leaky_relu_op->format = pytorch_qnnp_format_quint8;
*leaky_relu_out = leaky_relu_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(leaky_relu_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_leaky_relu_nc_q8(
pytorch_qnnp_operator_t leaky_relu,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_leaky_relu_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
leaky_relu->batch_size = 0;
return pytorch_qnnp_status_success;
}
leaky_relu->batch_size = batch_size;
leaky_relu->input = input;
leaky_relu->input_pixel_stride = input_stride;
leaky_relu->output = output;
leaky_relu->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 5,225
| 29.741176
| 107
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/max-pooling.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/common.h>
#include <qnnpack/indirection.h>
#include <qnnpack/log.h>
#include <qnnpack/math.h>
#include <qnnpack/operator.h>
#include <qnnpack/params.h>
static inline size_t compute_output_dimension(
size_t padded_input_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t stride_dimension) {
const size_t effective_kernel_dimension =
(kernel_dimension - 1) * dilation_dimension + 1;
return (padded_input_dimension - effective_kernel_dimension) /
stride_dimension +
1;
}
enum pytorch_qnnp_status pytorch_qnnp_create_max_pooling2d_nhwc_u8(
uint32_t input_padding_height,
uint32_t input_padding_width,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
size_t channels,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* max_pooling_out) {
pytorch_qnnp_operator_t max_pooling = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_max_pooling2d_nhwc_u8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
const uint32_t pooling_size = pooling_height * pooling_width;
if (pooling_size == 0) {
pytorch_qnnp_log_error(
"failed to create max pooling with %" PRIu32 "x%" PRIu32
" pooling size: "
"pooling size dimensions must be non-zero",
pooling_width,
pooling_height);
goto error;
}
if (pooling_size == 1) {
pytorch_qnnp_log_error(
"failed to create max pooling with 1 pooling element: "
"1x1 pooling is meaningless");
goto error;
}
if (stride_height == 0 || stride_width == 0) {
pytorch_qnnp_log_error(
"failed to create max pooling with %" PRIu32 "x%" PRIu32
" stride: "
"stride dimensions must be non-zero",
stride_width,
stride_height);
goto error;
}
if (dilation_height == 0 || dilation_width == 0) {
pytorch_qnnp_log_error(
"failed to create max pooling with %" PRIu32 "x%" PRIu32
" dilation: "
"dilation dimensions must be non-zero",
dilation_width,
dilation_height);
goto error;
}
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create max pooling with %zu channels: "
"number of channels must be non-zero",
channels);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
max_pooling = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (max_pooling == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
max_pooling->input_padding_height = input_padding_height;
max_pooling->input_padding_width = input_padding_width;
max_pooling->kernel_height = pooling_height;
max_pooling->kernel_width = pooling_width;
max_pooling->stride_height = stride_height;
max_pooling->stride_width = stride_width;
max_pooling->dilation_height = dilation_height;
max_pooling->dilation_width = dilation_width;
max_pooling->channels = channels;
max_pooling->u8_clamping_params =
pytorch_qnnp_compute_u8_clamping_params(output_min, output_max);
max_pooling->ukernel_type = pytorch_qnnp_ukernel_type_max_pooling;
max_pooling->format = pytorch_qnnp_format_quint8;
*max_pooling_out = max_pooling;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(max_pooling);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_max_pooling2d_nhwc_u8(
pytorch_qnnp_operator_t max_pooling,
size_t batch_size,
size_t input_height,
size_t input_width,
const uint8_t* input,
size_t input_pixel_stride,
uint8_t* output,
size_t output_pixel_stride,
pthreadpool_t threadpool) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_max_pooling2d_nhwc_u8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
max_pooling->batch_size = 0;
return pytorch_qnnp_status_success;
}
if (input_width == 0 || input_height == 0) {
pytorch_qnnp_log_error(
"failed to setup max pooling with %zux%zu input: input dimensions must be non-zero",
input_width,
input_height);
return pytorch_qnnp_status_invalid_parameter;
}
max_pooling->batch_size = batch_size;
max_pooling->input_height = input_height;
max_pooling->input_width = input_width;
max_pooling->input = input;
max_pooling->input_pixel_stride = input_pixel_stride;
max_pooling->output_height = compute_output_dimension(
input_height + max_pooling->input_padding_height * 2,
max_pooling->kernel_height,
max_pooling->dilation_height,
max_pooling->stride_height);
max_pooling->output_width = compute_output_dimension(
input_width + max_pooling->input_padding_width * 2,
max_pooling->kernel_width,
max_pooling->dilation_width,
max_pooling->stride_width);
max_pooling->output = output;
max_pooling->output_pixel_stride = output_pixel_stride;
size_t valid_batch_size = 0;
if (input == max_pooling->last_input &&
input_height == max_pooling->last_input_height &&
input_width == max_pooling->last_input_width) {
valid_batch_size = max_pooling->valid_batch_size;
if (batch_size <= valid_batch_size) {
return pytorch_qnnp_status_success;
}
}
/* Micro-kernel may read up to (mr - 1) elements after the end of indirection
* buffer */
const uint32_t mr = pytorch_qnnp_params.u8maxpool.mr;
pytorch_qnnp_indirection_set_step_dimensions(max_pooling);
const size_t indirection_buffer_size = sizeof(void*) *
((mr - 1) +
batch_size * max_pooling->output_height * max_pooling->step_height);
const void** indirection_buffer = (const void**)realloc(
max_pooling->indirection_buffer, indirection_buffer_size);
if (indirection_buffer == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for indirection buffer",
indirection_buffer_size);
return pytorch_qnnp_status_out_of_memory;
}
max_pooling->indirection_buffer = indirection_buffer;
pytorch_qnnp_indirection_init_maxpool2d(max_pooling, valid_batch_size);
max_pooling->last_input = input;
max_pooling->last_input_height = input_height;
max_pooling->last_input_width = input_width;
max_pooling->valid_batch_size = max(valid_batch_size, batch_size);
return pytorch_qnnp_status_success;
}
| 7,200
| 30.445415
| 104
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/operator-delete.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_delete_operator(
pytorch_qnnp_operator_t op) {
if (op == NULL) {
return pytorch_qnnp_status_invalid_parameter;
}
free(op->indirection_buffer);
free(op->packed_weights);
free(op->a_sum);
free(op->zero_buffer);
free(op->lookup_table);
free(op);
return pytorch_qnnp_status_success;
}
| 646
| 22.107143
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sigmoid.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_sigmoid_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* sigmoid_out) {
pytorch_qnnp_operator_t sigmoid_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_sigmoid_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
if (output_min >= output_max) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
if (output_scale != 0x1.0p-8f) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with %.7g output scale: only output scale of 1/256 is supported",
output_scale);
goto error;
}
if (output_zero_point != 0) {
pytorch_qnnp_log_error(
"failed to create Sigmoid operator with %" PRIu8
" output zero point: only output zero point of 0 is supported",
output_zero_point);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
sigmoid_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (sigmoid_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
sigmoid_op->lookup_table = malloc(256 * sizeof(uint8_t));
if (sigmoid_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for Sigmoid lookup table");
goto error;
}
uint8_t* lookup_table = sigmoid_op->lookup_table;
const float scaled_min = (float)(int32_t)output_min;
const float scaled_max = (float)(int32_t)output_max;
for (int32_t i = 0; i < 256; i++) {
const float x =
input_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
/* Scale sigmoid(x) by 1 / output scale = 256.0 */
float scaled_sigmoid_x = 256.0f / (1.0f + expf(-x));
if (scaled_sigmoid_x < scaled_min) {
scaled_sigmoid_x = scaled_min;
}
if (scaled_sigmoid_x > scaled_max) {
scaled_sigmoid_x = scaled_max;
}
lookup_table[(uint32_t)i] = (uint8_t)lrintf(scaled_sigmoid_x);
}
sigmoid_op->channels = channels;
sigmoid_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
sigmoid_op->format = pytorch_qnnp_format_quint8;
*sigmoid_out = sigmoid_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(sigmoid_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_sigmoid_nc_q8(
pytorch_qnnp_operator_t sigmoid,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_sigmoid_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
sigmoid->batch_size = 0;
return pytorch_qnnp_status_success;
}
sigmoid->batch_size = batch_size;
sigmoid->input = input;
sigmoid->input_pixel_stride = input_stride;
sigmoid->output = output;
sigmoid->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 4,694
| 28.34375
| 108
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/softargmax.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_softargmax_nc_q8(
size_t channels,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint32_t flags,
pytorch_qnnp_operator_t* softargmax_out) {
pytorch_qnnp_operator_t softargmax_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_softargmax_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create Soft ArgMax operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create Soft ArgMax operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create Soft ArgMax operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
if (output_scale != 0x1.0p-8f) {
pytorch_qnnp_log_error(
"failed to create Soft ArgMax operator with %.7g output scale: only output scale of 1/256 is supported",
output_scale);
goto error;
}
if (output_zero_point != 0) {
pytorch_qnnp_log_error(
"failed to create Soft ArgMax operator with %" PRIu8
" output zero point: only output zero point of 0 is supported",
output_zero_point);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
softargmax_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (softargmax_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
softargmax_op->lookup_table = malloc(256 * sizeof(uint32_t));
if (softargmax_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for Soft ArgMax lookup table");
goto error;
}
uint32_t* lookup_table = softargmax_op->lookup_table;
const double qscale =
fmin(((double)UINT32_MAX) / (double)channels, 8388607.0);
for (int32_t i = 0; i < 256; i++) {
const double scaled_exp_xi =
qscale * exp((double)(i - 255) * (double)input_scale);
lookup_table[(uint32_t)i] = (uint32_t)lrint(scaled_exp_xi);
}
softargmax_op->channels = channels;
softargmax_op->ukernel_type = pytorch_qnnp_ukernel_type_softargmax;
softargmax_op->format = pytorch_qnnp_format_quint8;
*softargmax_out = softargmax_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(softargmax_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_softargmax_nc_q8(
pytorch_qnnp_operator_t softargmax,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_softargmax_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
softargmax->batch_size = 0;
return pytorch_qnnp_status_success;
}
softargmax->batch_size = batch_size;
softargmax->input = input;
softargmax->input_pixel_stride = input_stride;
softargmax->output = output;
softargmax->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 4,165
| 28.757143
| 112
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/tanh.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
enum pytorch_qnnp_status pytorch_qnnp_create_tanh_nc_q8(
size_t channels,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
pytorch_qnnp_operator_t* tanh_out) {
pytorch_qnnp_operator_t tanh_op = NULL;
enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_create_tanh_nc_q8 failed because QNNPACK is not properly initialized");
goto error;
}
status = pytorch_qnnp_status_invalid_parameter;
if (channels == 0) {
pytorch_qnnp_log_error(
"failed to create TanH operator with %zu channels: number of channels must be non-zero",
channels);
goto error;
}
if (input_scale <= 0.0f || !isnormal(input_scale)) {
pytorch_qnnp_log_error(
"failed to create TanH operator with %.7g input scale: scale must be finite and positive",
input_scale);
goto error;
}
if (output_scale <= 0.0f || !isnormal(output_scale)) {
pytorch_qnnp_log_error(
"failed to create TanH operator with %.7g output scale: scale must be finite and positive",
output_scale);
goto error;
}
if (output_min >= output_max) {
pytorch_qnnp_log_error(
"failed to create TanH operator with [%" PRIu8 ", %" PRIu8
"] output range: range min must be below range max",
output_min,
output_max);
goto error;
}
status = pytorch_qnnp_status_unsupported_parameter;
if (output_scale != 0x2.0p-8f) { // [-1, 1] range in 8 bits = 2.0 / 256
pytorch_qnnp_log_error(
"failed to create TanH operator with %.7g output scale: only output scale of 2/256 is supported",
output_scale);
goto error;
}
if (output_zero_point != 128) {
pytorch_qnnp_log_error(
"failed to create TanH operator with %" PRIu8
" output zero point: only output zero point of 128 is supported",
output_zero_point);
goto error;
}
status = pytorch_qnnp_status_out_of_memory;
tanh_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
if (tanh_op == NULL) {
pytorch_qnnp_log_error(
"failed to allocate %zu bytes for pytorch_qnnp_operator structure",
sizeof(struct pytorch_qnnp_operator));
goto error;
}
tanh_op->lookup_table = malloc(256 * sizeof(uint8_t));
if (tanh_op->lookup_table == NULL) {
pytorch_qnnp_log_error(
"failed to allocate 256 bytes for TanH lookup table");
goto error;
}
uint8_t* lookup_table = tanh_op->lookup_table;
const float scaled_min = (float)(int32_t)output_min;
const float scaled_max = (float)(int32_t)output_max;
for (int32_t i = 0; i < 256; i++) {
const float x =
input_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
/* Scale tanh(x) by 1 / output scale = 128.0
Also, offset by the zero_point from the scaled value, as we assume UINT8
*/
float scaled_tanh_x = 128.0f * tanhf(x) + 128.0f;
if (scaled_tanh_x < scaled_min) {
scaled_tanh_x = scaled_min;
}
if (scaled_tanh_x > scaled_max) {
scaled_tanh_x = scaled_max;
}
lookup_table[(uint32_t)i] = (uint8_t)lrintf(scaled_tanh_x);
}
tanh_op->channels = channels;
tanh_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
tanh_op->format = pytorch_qnnp_format_quint8;
*tanh_out = tanh_op;
return pytorch_qnnp_status_success;
error:
pytorch_qnnp_delete_operator(tanh_op);
return status;
}
enum pytorch_qnnp_status pytorch_qnnp_setup_tanh_nc_q8(
pytorch_qnnp_operator_t tanh,
size_t batch_size,
const uint8_t* input,
size_t input_stride,
uint8_t* output,
size_t output_stride) {
if (!pytorch_qnnp_params.initialized) {
pytorch_qnnp_log_error(
"pytorch_qnnp_setup_tanh_nc_q8 failed because QNNPACK is not properly initialized");
return pytorch_qnnp_status_uninitialized;
}
if (batch_size == 0) {
tanh->batch_size = 0;
return pytorch_qnnp_status_success;
}
tanh->batch_size = batch_size;
tanh->input = input;
tanh->input_pixel_stride = input_stride;
tanh->output = output;
tanh->output_pixel_stride = output_stride;
return pytorch_qnnp_status_success;
}
| 4,708
| 28.067901
| 105
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/8x8-neonfp16arith.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/hgemm.h>
void pytorch_hgemm_ukernel_8x8__neonfp16arith(
size_t mr,
size_t nr,
size_t k,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t c_stride,
const struct pytorch_qnnp_fp16_clamping_params
clamping_params[restrict static 1]) {
float16x8_t vacc0x01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc5x01234567 = vacc0x01234567;
float16x8_t vacc6x01234567 = vacc0x01234567;
float16x8_t vacc7x01234567 = vacc0x01234567;
const __fp16* a0 = a;
const __fp16* a1 = (const __fp16*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const __fp16* a2 = (const __fp16*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const __fp16* a3 = (const __fp16*)((uintptr_t)a2 + a_stride);
if (mr < 4) {
a3 = a2;
}
const __fp16* a4 = (const __fp16*)((uintptr_t)a3 + a_stride);
if (mr <= 4) {
a4 = a3;
}
const __fp16* a5 = (const __fp16*)((uintptr_t)a4 + a_stride);
if (mr < 6) {
a5 = a4;
}
const __fp16* a6 = (const __fp16*)((uintptr_t)a5 + a_stride);
if (mr <= 6) {
a6 = a5;
}
const __fp16* a7 = (const __fp16*)((uintptr_t)a6 + a_stride);
if (mr != 8) {
a7 = a6;
}
for (; k >= 4; k -= 4) {
const float16x4_t va0 = vld1_f16(a0);
a0 += 4;
const float16x4_t va1 = vld1_f16(a1);
a1 += 4;
const float16x4_t va2 = vld1_f16(a2);
a2 += 4;
const float16x4_t va3 = vld1_f16(a3);
a3 += 4;
const float16x4_t va4 = vld1_f16(a4);
a4 += 4;
const float16x4_t va5 = vld1_f16(a5);
a5 += 4;
const float16x4_t va6 = vld1_f16(a6);
a6 += 4;
const float16x4_t va7 = vld1_f16(a7);
a7 += 4;
{
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 0);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 0);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 0);
}
{
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 1);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 1);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 1);
}
{
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 2);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 2);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 2);
}
{
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 3);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 3);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 3);
}
}
if (k != 0) {
const size_t a_predecrement = 4 - k;
const int64x1_t va_shift = vmov_n_s64(-16 * a_predecrement);
const float16x4_t va0 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a0 - a_predecrement)), va_shift));
const float16x4_t va1 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a1 - a_predecrement)), va_shift));
const float16x4_t va2 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a2 - a_predecrement)), va_shift));
const float16x4_t va3 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a3 - a_predecrement)), va_shift));
const float16x4_t va4 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a4 - a_predecrement)), va_shift));
const float16x4_t va5 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a5 - a_predecrement)), va_shift));
const float16x4_t va6 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a6 - a_predecrement)), va_shift));
const float16x4_t va7 = vreinterpret_f16_u64(vshl_u64(
vreinterpret_u64_f16(vld1_f16(a7 - a_predecrement)), va_shift));
{
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 0);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 0);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 0);
}
if (k >= 2) {
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 1);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 1);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 1);
if (k > 2) {
const float16x8_t vb01234567 = vld1q_f16(w);
w = (void*)((uintptr_t)w + sizeof(float16x8_t));
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 2);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 2);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 2);
if (k >= 4) {
const float16x8_t vb01234567 = vld1q_f16(w);
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567, va5, 3);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567, va6, 3);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567, va7, 3);
}
}
}
}
const float16x8_t vscale =
vld1q_dup_f16((const __fp16*)&clamping_params->scale);
vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
vacc6x01234567 = vmulq_f16(vacc6x01234567, vscale);
vacc7x01234567 = vmulq_f16(vacc7x01234567, vscale);
const float16x8_t vmax = vld1q_dup_f16((const __fp16*)&clamping_params->max);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
const float16x8_t vmin = vld1q_dup_f16((const __fp16*)&clamping_params->min);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
__fp16* c0 = c;
__fp16* c1 = (__fp16*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
__fp16* c2 = (__fp16*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
__fp16* c3 = (__fp16*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
__fp16* c4 = (__fp16*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
__fp16* c5 = (__fp16*)((uintptr_t)c4 + c_stride);
if (mr < 6) {
c5 = c4;
}
__fp16* c6 = (__fp16*)((uintptr_t)c5 + c_stride);
if (mr <= 6) {
c6 = c5;
}
__fp16* c7 = (__fp16*)((uintptr_t)c6 + c_stride);
if (mr != 8) {
c7 = c6;
}
if (nr == 8) {
vst1q_f16(c0, vacc0x01234567);
vst1q_f16(c1, vacc1x01234567);
vst1q_f16(c2, vacc2x01234567);
vst1q_f16(c3, vacc3x01234567);
vst1q_f16(c4, vacc4x01234567);
vst1q_f16(c5, vacc5x01234567);
vst1q_f16(c6, vacc6x01234567);
vst1q_f16(c7, vacc7x01234567);
} else {
if (nr & 4) {
vst1_f16(c0, vget_low_f16(vacc0x01234567));
c0 += 4;
vst1_f16(c1, vget_low_f16(vacc1x01234567));
c1 += 4;
vst1_f16(c2, vget_low_f16(vacc2x01234567));
c2 += 4;
vst1_f16(c3, vget_low_f16(vacc3x01234567));
c3 += 4;
vst1_f16(c4, vget_low_f16(vacc4x01234567));
c4 += 4;
vst1_f16(c5, vget_low_f16(vacc5x01234567));
c5 += 4;
vst1_f16(c6, vget_low_f16(vacc6x01234567));
c6 += 4;
vst1_f16(c7, vget_low_f16(vacc7x01234567));
c7 += 4;
vacc0x01234567 = vextq_f16(vacc0x01234567, vacc0x01234567, 4);
vacc1x01234567 = vextq_f16(vacc1x01234567, vacc1x01234567, 4);
vacc2x01234567 = vextq_f16(vacc2x01234567, vacc2x01234567, 4);
vacc3x01234567 = vextq_f16(vacc3x01234567, vacc3x01234567, 4);
vacc4x01234567 = vextq_f16(vacc4x01234567, vacc4x01234567, 4);
vacc5x01234567 = vextq_f16(vacc5x01234567, vacc5x01234567, 4);
vacc6x01234567 = vextq_f16(vacc6x01234567, vacc6x01234567, 4);
vacc7x01234567 = vextq_f16(vacc7x01234567, vacc7x01234567, 4);
}
if (nr & 2) {
vst1_lane_u32(
__builtin_assume_aligned(c0, 1),
vreinterpret_u32_f16(vget_low_f16(vacc0x01234567)),
0);
c0 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c1, 1),
vreinterpret_u32_f16(vget_low_f16(vacc1x01234567)),
0);
c1 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c2, 1),
vreinterpret_u32_f16(vget_low_f16(vacc2x01234567)),
0);
c2 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c3, 1),
vreinterpret_u32_f16(vget_low_f16(vacc3x01234567)),
0);
c3 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c4, 1),
vreinterpret_u32_f16(vget_low_f16(vacc4x01234567)),
0);
c4 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c5, 1),
vreinterpret_u32_f16(vget_low_f16(vacc5x01234567)),
0);
c5 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c6, 1),
vreinterpret_u32_f16(vget_low_f16(vacc6x01234567)),
0);
c6 += 2;
vst1_lane_u32(
__builtin_assume_aligned(c7, 1),
vreinterpret_u32_f16(vget_low_f16(vacc7x01234567)),
0);
c7 += 2;
vacc0x01234567 = vextq_f16(vacc0x01234567, vacc0x01234567, 2);
vacc1x01234567 = vextq_f16(vacc1x01234567, vacc1x01234567, 2);
vacc2x01234567 = vextq_f16(vacc2x01234567, vacc2x01234567, 2);
vacc3x01234567 = vextq_f16(vacc3x01234567, vacc3x01234567, 2);
vacc4x01234567 = vextq_f16(vacc4x01234567, vacc4x01234567, 2);
vacc5x01234567 = vextq_f16(vacc5x01234567, vacc5x01234567, 2);
vacc6x01234567 = vextq_f16(vacc6x01234567, vacc6x01234567, 2);
vacc7x01234567 = vextq_f16(vacc7x01234567, vacc7x01234567, 2);
}
if (nr & 1) {
vst1q_lane_f16(c0, vacc0x01234567, 0);
vst1q_lane_f16(c1, vacc1x01234567, 0);
vst1q_lane_f16(c2, vacc2x01234567, 0);
vst1q_lane_f16(c3, vacc3x01234567, 0);
vst1q_lane_f16(c4, vacc4x01234567, 0);
vst1q_lane_f16(c5, vacc5x01234567, 0);
vst1q_lane_f16(c6, vacc6x01234567, 0);
vst1q_lane_f16(c7, vacc7x01234567, 0);
}
}
}
| 14,917
| 39.210243
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/mp8x9p8q-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_mp8x9p8q__neon(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(n != 0);
assert(ks > 9);
assert(kc >= 8);
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif
do {
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7);
i7 += 8;
const uint8x8_t vi8 = vld1_u8(i8);
i8 += 8;
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
const int32x4_t vacc_lo =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
const int32x4_t vacc_hi =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
acc += 4;
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
i8 = (const uint8_t*)((uintptr_t)i8 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
const uint8x8_t vi7 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i7)), vshift));
const uint8x8_t vi8 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i8)), vshift));
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
const int32x4_t vacc_lo =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
const int32x4_t vacc_hi =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
}
}
size_t m = ks;
for (m -= 9; m > 8; m -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7);
i7 += 8;
int32x4_t vacc_lo = vld1q_s32(acc);
int32x4_t vacc_hi = vld1q_s32(acc + 4);
const uint16x8_t vsum01 = vaddl_u8(vi0, vi1);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum0123 = vaddq_u16(vsum01, vsum23);
const uint16x8_t vsum4567 = vaddq_u16(vsum45, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vsum)));
vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
acc += 4;
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
const uint8x8_t vi7 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i7)), vshift));
int32x4_t vacc_lo = vld1q_s32(acc);
int32x4_t vacc_hi = vld1q_s32(acc + 4);
const uint16x8_t vsum01 = vaddl_u8(vi0, vi1);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum0123 = vaddq_u16(vsum01, vsum23);
const uint16x8_t vsum4567 = vaddq_u16(vsum45, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vsum)));
vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
}
}
{
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
input = (const uint8_t**)((uintptr_t)input + input_increment);
if (m < 2) {
i1 = zero;
}
if (m <= 2) {
i2 = zero;
}
if (m < 4) {
i3 = zero;
}
if (m <= 4) {
i4 = zero;
}
if (m < 6) {
i5 = zero;
}
if (m <= 6) {
i6 = zero;
}
if (m != 8) {
i7 = zero;
}
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7);
i7 += 8;
int32x4_t vacc_lo = vld1q_s32(acc);
acc += 4;
int32x4_t vacc_hi = vld1q_s32(acc);
acc += 4;
const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
vst1_u8(output, vout);
output += 8;
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
const uint8x8_t vi7 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i7)), vshift));
int32x4_t vacc_lo = vld1q_s32(acc);
acc += 4;
int32x4_t vacc_hi = vld1q_s32(acc);
const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (k & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u8(vout),
0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (k & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1),
vreinterpret_u16_u8(vout),
0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (k & 1) {
vst1_lane_u8(output, vout, 0);
output += 1;
}
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 16,937
| 36.556541
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/mp8x9p8q-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_mp8x9p8q__sse2(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(n != 0);
assert(ks > 9);
assert(kc >= 8);
const __m128i vbias =
_mm_load_si128((const __m128i*)&quantization_params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
do {
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
const __m128i vi8 = _mm_loadl_epi64((const __m128i*)i8);
i8 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
const __m128i vacc_lo =
_mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
const __m128i vacc_hi =
_mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
acc += 8;
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
i7 = (const uint8_t*)((uintptr_t)i7 - address_decrement);
i8 = (const uint8_t*)((uintptr_t)i8 - address_decrement);
const __m128i vshift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vshift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vshift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vshift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vshift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vshift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vshift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vshift);
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vshift);
const __m128i vi8 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i8), vshift);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
const __m128i vacc_lo =
_mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
const __m128i vacc_hi =
_mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
}
}
size_t m = ks;
for (m -= 9; m > 8; m -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
acc += 8;
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
i7 = (const uint8_t*)((uintptr_t)i7 - address_decrement);
const __m128i vshift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vshift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vshift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vshift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vshift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vshift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vshift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vshift);
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vshift);
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
}
}
{
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
input = (const uint8_t**)((uintptr_t)input + input_increment);
if (m < 2) {
i1 = zero;
}
if (m <= 2) {
i2 = zero;
}
if (m < 4) {
i3 = zero;
}
if (m <= 4) {
i4 = zero;
}
if (m < 6) {
i5 = zero;
}
if (m <= 6) {
i6 = zero;
}
if (m != 8) {
i7 = zero;
}
size_t k = kc;
int32_t* acc = buffer;
while (k >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
acc += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
i7 = (const uint8_t*)((uintptr_t)i7 - address_decrement);
const __m128i vshift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vshift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vshift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vshift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vshift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vshift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vshift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vshift);
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vshift);
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_min));
if (k & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (k & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (k & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 19,158
| 39.591102
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/up8x9-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_up8x9__neon(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(n != 0);
assert(ks <= 9);
assert(kc >= 8);
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif
do {
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**)((uintptr_t)input + input_increment);
if (ks < 2) {
i1 = zero;
}
if (ks <= 2) {
i2 = zero;
}
if (ks < 4) {
i3 = zero;
}
if (ks <= 4) {
i4 = zero;
}
if (ks < 6) {
i5 = zero;
}
if (ks <= 6) {
i6 = zero;
}
if (ks < 8) {
i7 = zero;
}
if (ks <= 8) {
i8 = zero;
}
size_t k = kc;
while (k >= 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7);
i7 += 8;
const uint8x8_t vi8 = vld1_u8(i8);
i8 += 8;
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
int32x4_t vacc_lo =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
int32x4_t vacc_hi =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
vst1_u8(output, vout);
output += 8;
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
i8 = (const uint8_t*)((uintptr_t)i8 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
const uint8x8_t vi7 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i7)), vshift));
const uint8x8_t vi8 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(i8)), vshift));
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
int32x4_t vacc_lo =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
int32x4_t vacc_hi =
vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (k & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (k & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (k & 1) {
vst1_lane_u8(output, vout, 0);
output += 1;
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 8,362
| 33.415638
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/up8x9-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_up8x9__sse2(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(n != 0);
assert(ks <= 9);
assert(kc >= 8);
const __m128i vbias =
_mm_load_si128((const __m128i*)&quantization_params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
do {
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**)((uintptr_t)input + input_increment);
if (ks < 2) {
i1 = zero;
}
if (ks <= 2) {
i2 = zero;
}
if (ks < 4) {
i3 = zero;
}
if (ks <= 4) {
i4 = zero;
}
if (ks < 6) {
i5 = zero;
}
if (ks <= 6) {
i6 = zero;
}
if (ks < 8) {
i7 = zero;
}
if (ks <= 8) {
i8 = zero;
}
size_t k = kc;
while (k >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
const __m128i vi8 = _mm_loadl_epi64((const __m128i*)i8);
i8 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
const __m128i vacc_lo =
_mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
const __m128i vacc_hi =
_mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
i7 = (const uint8_t*)((uintptr_t)i7 - address_decrement);
i8 = (const uint8_t*)((uintptr_t)i8 - address_decrement);
const __m128i vshift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vshift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vshift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vshift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vshift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vshift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vshift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vshift);
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vshift);
const __m128i vi8 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i8), vshift);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
const __m128i vacc_lo =
_mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
const __m128i vacc_hi =
_mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128(
(const __m128i*)&quantization_params->sse2.output_min));
if (k & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (k & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (k & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 8,457
| 34.687764
| 77
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/up8xm-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_up8xm__neon(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc < 8);
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
do {
int32x4_t vacc_lo = vbias;
int32x4_t vacc_hi = vbias;
const uint8_t** next_input =
(const uint8_t**)((uintptr_t)input + input_increment);
size_t m = ks;
do {
const uint8_t* i = *input++;
i += kc;
uint8x8_t vi = vmov_n_u8(0);
if (kc & 1) {
i -= 1;
vi = vld1_lane_u8(i, vi, 0);
}
if (kc & 2) {
vi = vext_u8(vi, vi, 6);
i -= 2;
vi = vreinterpret_u8_u16(vld1_lane_u16(
__builtin_assume_aligned(i, 1), vreinterpret_u16_u8(vi), 0));
}
if (kc & 4) {
vi = vext_u8(vi, vi, 4);
i -= 4;
vi = vreinterpret_u8_u32(vld1_lane_u32(
__builtin_assume_aligned(i, 1), vreinterpret_u32_u8(vi), 0));
}
const uint16x8_t vxi = vmovl_u8(vi);
vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vxi)));
vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vxi)));
} while (--m != 0);
input = next_input;
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (kc & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (kc & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (kc & 1) {
vst1_lane_u8(output, vout, 0);
output += 1;
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 3,965
| 30.47619
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8avgpool/up8xm-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8avgpool.h>
void pytorch_q8avgpool_ukernel_up8xm__sse2(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc < 8);
const __m128i vbias =
_mm_load_si128((const __m128i*)&quantization_params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
do {
const uint8_t** next_input =
(const uint8_t**)((uintptr_t)input + input_increment);
__m128i vacc_lo = vbias;
__m128i vacc_hi = vbias;
size_t m = ks;
do {
const uint8_t* i = *input++;
i += kc;
__m128i vi = _mm_setzero_si128();
if (kc & 1) {
i -= 1;
vi = _mm_cvtsi32_si128((int)(uint32_t)*i);
}
if (kc & 2) {
vi = _mm_slli_epi32(vi, 16);
i -= 2;
vi = _mm_insert_epi16(vi, *((const uint16_t*)i), 0);
}
if (kc & 4) {
i -= 4;
vi = _mm_unpacklo_epi32(
_mm_cvtsi32_si128((int)*((const uint32_t*)i)), vi);
}
const __m128i vxi = _mm_unpacklo_epi8(vi, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi, vzero));
} while (--m != 0);
input = next_input;
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (kc & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (kc & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (kc & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 3,032
| 28.446602
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/4x4c2-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8conv.h>
#include <requantization/runtime-sse2.h>
void pytorch_q8conv_ukernel_4x4c2__sse2(
size_t mr,
size_t nr,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*)w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*)((uintptr_t)w + 16);
const __m128i va_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.input_zero_point);
const int16_t vb_zero_point_0 =
quantization_params->sse2.kernel_zero_points[output_channel_index];
const int16_t vb_zero_point_1 =
quantization_params->sse2.kernel_zero_points[output_channel_index + 1];
const int16_t vb_zero_point_2 =
quantization_params->sse2.kernel_zero_points[output_channel_index + 2];
const int16_t vb_zero_point_3 =
quantization_params->sse2.kernel_zero_points[output_channel_index + 3];
const __m128i vb_zero_point = _mm_set_epi16(vb_zero_point_3,
vb_zero_point_3,
vb_zero_point_2,
vb_zero_point_2,
vb_zero_point_1,
vb_zero_point_1,
vb_zero_point_0,
vb_zero_point_0
);
const __m128i vzero = _mm_setzero_si128();
do {
const uint8_t* restrict a0 = *a++;
const uint8_t* restrict a1 = *a++;
const uint8_t* restrict a2 = *a++;
const uint8_t* restrict a3 = *a++;
size_t k = kc;
for (; k >= 8; k -= 8) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*)a0);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*)a1);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*)a2);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*)a3);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 8));
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 16));
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 24));
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (void*)((uintptr_t)w + 32);
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const __m128i va_shift = _mm_cvtsi32_si128(8 * a_predecrement);
const __m128i va0 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a0 - a_predecrement)), va_shift);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
const __m128i va1 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a1 - a_predecrement)), va_shift);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
const __m128i va2 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a2 - a_predecrement)), va_shift);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
const __m128i va3 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a3 - a_predecrement)), va_shift);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
w = (void*)((uintptr_t)w + 8);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
w = (void*)((uintptr_t)w + 8);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
w = (void*)((uintptr_t)w + 8);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
if (k > 6) {
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
w = (void*)((uintptr_t)w + 8);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
}
}
}
}
} while (--ks != 0);
const __m128 vmultiplier =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales
[output_channel_index]);
vacc0x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc0x0123),
vmultiplier
)
);
vacc1x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc1x0123),
vmultiplier
)
);
vacc2x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc2x0123),
vmultiplier
)
);
vacc3x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc3x0123),
vmultiplier
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
const __m128i vacc01x0123 = _mm_adds_epi16(
_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i vacc23x0123 = _mm_adds_epi16(
_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*)((uintptr_t)c2 + c_stride);
if (mr != 4) {
c3 = c2;
}
if (nr == 4) {
*((uint32_t*)c0) = (uint32_t)_mm_cvtsi128_si32(vout);
*((uint32_t*)c1) = (uint32_t)_mm_cvtsi128_si32(_mm_srli_epi64(vout, 32));
*((uint32_t*)c2) =
(uint32_t)_mm_cvtsi128_si32(_mm_unpackhi_epi32(vout, vout));
*((uint32_t*)c3) = (uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(vout, 12));
} else {
typedef PYTORCH_QNNP_UNALIGNED uint16_t unaligned_uint16_t;
if (nr >= 2) {
*((unaligned_uint16_t*)c0) = (uint16_t)_mm_extract_epi16(vout, 0);
c0 += 2;
*((unaligned_uint16_t*)c1) = (uint16_t)_mm_extract_epi16(vout, 2);
c1 += 2;
*((unaligned_uint16_t*)c2) = (uint16_t)_mm_extract_epi16(vout, 4);
c2 += 2;
*((unaligned_uint16_t*)c3) = (uint16_t)_mm_extract_epi16(vout, 6);
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
nr -= 2;
}
if (nr != 0) {
*((uint8_t*)c0) = (uint8_t)_mm_cvtsi128_si32(vout);
*((uint8_t*)c1) = (uint8_t)_mm_extract_epi16(vout, 2);
*((uint8_t*)c2) = (uint8_t)_mm_extract_epi16(vout, 4);
*((uint8_t*)c3) = (uint8_t)_mm_extract_epi16(vout, 6);
}
}
}
| 13,622
| 36.736842
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/mp8x27-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <math.h>
#include <qnnpack/q8dwconv.h>
void pytorch_q8dwconv_ukernel_mp8x27__sse2(
size_t channels,
size_t output_height,
size_t output_width,
const uint8_t** input,
const void* weights,
int32_t* outacc32,
uint8_t* output,
size_t input_row_stride,
size_t input_col_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
const int16_t input_zero_point =
quantization_params->sse2.input_zero_point[0];
const uint8_t* kernel_zero_points =
quantization_params->sse2.kernel_zero_points;
const float* requantization_scales =
quantization_params->sse2.requantization_scales;
const int16_t output_zero_point =
quantization_params->sse2.output_zero_point[0];
const uint8_t output_min = quantization_params->sse2.output_min[0];
const uint8_t output_max = quantization_params->sse2.output_max[0];
union {
const uint8_t* as_uint8_ptr;
const int32_t* as_int32_ptr;
} weights_ptr = {weights};
const size_t cr_block = 8;
const size_t kernel_depth = 3;
const size_t kernel_height = 3;
const size_t kernel_width = 3;
const size_t num_groups = ((channels - 1) / cr_block) + 1;
const size_t yz_block = kernel_depth * kernel_height;
const size_t yz_bias_size = (cr_block * sizeof(int32_t));
const size_t yz_weight_size = yz_block * cr_block;
for (size_t output_y = 0; output_y < output_height; output_y++) {
const uint8_t** input_row_start = input;
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t c = 0; c < channels; c++) {
int32_t accumulator =
(weights_ptr.as_int32_ptr +
((c / cr_block) * (yz_bias_size + yz_weight_size) /
sizeof(int32_t)))[c % cr_block];
for (int x = 0; x < kernel_width; x++) {
for (int y = 0; y < kernel_height; y++) {
for (int z = 0; z < kernel_depth; z++) {
int32_t input_val =
(int32_t)(input
[z + kernel_depth * y +
kernel_depth * kernel_height * x][c]);
/*
* The weights are setup as follows
* (where Wzyx means the weight for kernel position Z=z, Y=y, X=x, and cn means
* channel n)
*
* x = 0 (first yz slice) region:
* 0_______________32______________40______________48 96______________104
* | BIAS | W000 | W100 | | W220 |
* | c0 | ... | c8 | c0 | ... | c8 | c0 | ... | c8 | ... | c0 | ... | c8 |
* ----------------------------------------------- ---------------
* (4 bytes x 8) (1 byte x 8) (1 byte x 8) (1 byte x 8)
*
* 104_____________136_____________144_____________152 200_____________208
* | BIAS | W000 | W100 | | W220 |
* | c8 | ... | c15| c8 | ... | c15| c8 | ... | c15| ... | c8 | ... | c15|
* ----------------------------------------------- ---------------
*
* ... Repeat the above arrangement over all chunks of 8 channels, then ...
*
* x = 1 (second yz slice) region:
* +0_______________+8_____________+16 +64_____________+72
* | W001 | W101 | | W221 |
* | c0 | ... | c7 | c0 | ... | c7 | ... | c0 | ... | c7 |
* ------------------------------- ---------------
* +72_____________+80____________+88 +136____________+144
* | W001 | W101 | | W221 |
* | c8 | ... | c15| c8 | ... | c15| ... | c8 | ... | c15|
* ------------------------------- ---------------
*
* ... Repeat the above arrangement over all chunks of 8 channels, then ...
*
* x = 2 (third yz slice) region:
* +0_______________+8_____________+16 +64_____________+72
* | W002 | W102 | | W222 |
* | c0 | ... | c7 | c0 | ... | c7 | ... | c0 | ... | c7 |
* ------------------------------- ---------------
* +72____________+80____________+88 +136____________+144
* | W002 | W102 | | W222 |
* | c8 | ... | c15| c8 | ... | c15| ... | c8 | ... | c15|
* ------------------------------- ---------------
*
* ... Repeat the above arrangement over all chunks of 8 channels
*/
size_t yz_slice_advance_per_group = 0; // Get to yz slice
size_t channel_chunk_advance = 0; // Get to 8-channel chunk
size_t bias_advance = 0; // Get past bias
if (x == 0) {
channel_chunk_advance = yz_bias_size + yz_weight_size;
bias_advance = yz_bias_size;
} else {
yz_slice_advance_per_group = yz_bias_size + x * yz_weight_size;
channel_chunk_advance = yz_weight_size;
}
const size_t yz_position_advance =
((kernel_depth * y + z) * cr_block); // Get to y and z
const uint8_t* w_zyxc_ptr = weights_ptr.as_uint8_ptr +
yz_slice_advance_per_group * num_groups +
channel_chunk_advance * (c / cr_block) +
bias_advance +
yz_position_advance;
int32_t w = (int32_t)(w_zyxc_ptr[c % cr_block]);
int32_t kernel_zero_point =
(int32_t)(kernel_zero_points[c % channels]);
accumulator +=
(w - kernel_zero_point) * (input_val - input_zero_point);
}
}
}
// Requantization
// 1) Convert to float and multiply by scale
double scaled_accumulator =
accumulator * ((double)(requantization_scales[c]));
// 2) Cast to int
int32_t int_accumulator = (int32_t)(nearbyint(scaled_accumulator));
// 3) Add zero point
int32_t shifted_accumulator = int_accumulator + output_zero_point;
// 4) Clip to [output_min, output_max]
if (shifted_accumulator > output_max) {
shifted_accumulator = output_max;
} else if (shifted_accumulator < output_min) {
shifted_accumulator = output_min;
}
output[c] = (uint8_t)(shifted_accumulator);
}
input = (const uint8_t**)((uint8_t*)input + input_col_stride);
output += channels + output_increment;
}
input = (const uint8_t**)((uint8_t*)input_row_start + input_row_stride);
}
}
| 6,672
| 40.70625
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-sse2-per-channel.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8dwconv.h>
#include <requantization/runtime-sse2.h>
void pytorch_q8dwconv_ukernel_up8x9_per_channel__sse2(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
size_t input_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
const __m128i va_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.input_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**)((uintptr_t)input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc_lo = _mm_loadu_si128((const __m128i*)w);
__m128i vacc_hi = _mm_loadu_si128((const __m128i*)((uintptr_t)w + 16));
const __m128i vkernel_zero_point = _mm_loadl_epi64(
(const __m128i*)
&quantization_params->sse2.kernel_zero_points[channels - c]);
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vxi0 =
sub_zero_point(_mm_unpacklo_epi8(vi0, vzero), va_zero_point);
const __m128i vk0 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 32));
const __m128i vxk0 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk0, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vxi1 =
sub_zero_point(_mm_unpacklo_epi8(vi1, vzero), va_zero_point);
const __m128i vk1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 40));
const __m128i vxk1 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk1, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vxi2 =
sub_zero_point(_mm_unpacklo_epi8(vi2, vzero), va_zero_point);
const __m128i vk2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 48));
const __m128i vxk2 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk2, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vxi3 =
sub_zero_point(_mm_unpacklo_epi8(vi3, vzero), va_zero_point);
const __m128i vk3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 56));
const __m128i vxk3 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk3, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vxi4 =
sub_zero_point(_mm_unpacklo_epi8(vi4, vzero), va_zero_point);
const __m128i vk4 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 64));
const __m128i vxk4 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk4, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vxi5 =
sub_zero_point(_mm_unpacklo_epi8(vi5, vzero), va_zero_point);
const __m128i vk5 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 72));
const __m128i vxk5 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk5, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vxi6 =
sub_zero_point(_mm_unpacklo_epi8(vi6, vzero), va_zero_point);
const __m128i vk6 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 80));
const __m128i vxk6 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk6, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
const __m128i vxi7 =
sub_zero_point(_mm_unpacklo_epi8(vi7, vzero), va_zero_point);
const __m128i vk7 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 88));
const __m128i vxk7 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk7, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
const __m128i vi8 = _mm_loadl_epi64((const __m128i*)i8);
i8 += 8;
const __m128i vxi8 =
sub_zero_point(_mm_unpacklo_epi8(vi8, vzero), va_zero_point);
const __m128i vk8 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 96));
const __m128i vxk8 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk8, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
w = (void*)((uintptr_t)w + 104);
const __m128 vmultiplier_lo =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[channels - c]);
const __m128 vmultiplier_hi =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[channels - c + 4]);
vacc_lo = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_lo),
vmultiplier_lo
)
);
vacc_hi = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_hi),
vmultiplier_hi
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
__m128i vout =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
}
if (c != 0) {
const size_t i_predecrement = 8 - c;
const __m128i vi_shift = _mm_cvtsi32_si128(8 * i_predecrement);
const __m128i vkernel_zero_point = _mm_loadl_epi64(
(const __m128i*)
&quantization_params->sse2.kernel_zero_points[channels - c]);
i0 -= i_predecrement;
i1 -= i_predecrement;
i2 -= i_predecrement;
i3 -= i_predecrement;
i4 -= i_predecrement;
i5 -= i_predecrement;
i6 -= i_predecrement;
i7 -= i_predecrement;
i8 -= i_predecrement;
__m128i vacc_lo = _mm_loadu_si128((const __m128i*)w);
__m128i vacc_hi = _mm_loadu_si128((const __m128i*)((uintptr_t)w + 16));
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vi_shift);
const __m128i vxi0 =
sub_zero_point(_mm_unpacklo_epi8(vi0, vzero), va_zero_point);
const __m128i vk0 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 32));
const __m128i vxk0 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk0, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vi_shift);
const __m128i vxi1 =
sub_zero_point(_mm_unpacklo_epi8(vi1, vzero), va_zero_point);
const __m128i vk1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 40));
const __m128i vxk1 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk1, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vi_shift);
const __m128i vxi2 =
sub_zero_point(_mm_unpacklo_epi8(vi2, vzero), va_zero_point);
const __m128i vk2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 48));
const __m128i vxk2 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk2, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vi_shift);
const __m128i vxi3 =
sub_zero_point(_mm_unpacklo_epi8(vi3, vzero), va_zero_point);
const __m128i vk3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 56));
const __m128i vxk3 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk3, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vi_shift);
const __m128i vxi4 =
sub_zero_point(_mm_unpacklo_epi8(vi4, vzero), va_zero_point);
const __m128i vk4 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 64));
const __m128i vxk4 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk4, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vi_shift);
const __m128i vxi5 =
sub_zero_point(_mm_unpacklo_epi8(vi5, vzero), va_zero_point);
const __m128i vk5 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 72));
const __m128i vxk5 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk5, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vi_shift);
const __m128i vxi6 =
sub_zero_point(_mm_unpacklo_epi8(vi6, vzero), va_zero_point);
const __m128i vk6 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 80));
const __m128i vxk6 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk6, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vi_shift);
const __m128i vxi7 =
sub_zero_point(_mm_unpacklo_epi8(vi7, vzero), va_zero_point);
const __m128i vk7 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 88));
const __m128i vxk7 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk7, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
const __m128i vi8 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i8), vi_shift);
const __m128i vxi8 =
sub_zero_point(_mm_unpacklo_epi8(vi8, vzero), va_zero_point);
const __m128i vk8 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 96));
const __m128i vxk8 =
_mm_sub_epi16(
_mm_unpacklo_epi8(vk8, vzero),
_mm_unpacklo_epi8(vkernel_zero_point, vzero));
const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
const __m128 vmultiplier_lo =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[channels - c]);
const __m128 vmultiplier_hi =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[channels - c + 4]);
vacc_lo = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_lo),
vmultiplier_lo
)
);
vacc_hi = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_hi),
vmultiplier_hi
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
__m128i vout =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (c & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (c & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (c & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--output_width != 0);
}
| 18,569
| 41.108844
| 91
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.